xref: /linux/kernel/locking/locktorture.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Module-based torture test facility for locking
4  *
5  * Copyright (C) IBM Corporation, 2014
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *          Davidlohr Bueso <dave@stgolabs.net>
9  *	Based on kernel/rcu/torture.c.
10  */
11 
12 #define pr_fmt(fmt) fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/torture.h>
31 #include <linux/reboot.h>
32 
33 MODULE_DESCRIPTION("torture test facility for locking");
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
36 
37 torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
38 torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
39 torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
40 torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
41 torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
42 torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
43 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
44 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
45 torture_param(int, rt_boost, 2,
46 		   "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
47 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
48 torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
49 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
50 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
51 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
52 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
53 torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
54 /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
55 #define MAX_NESTED_LOCKS 8
56 
57 static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
58 module_param(torture_type, charp, 0444);
59 MODULE_PARM_DESC(torture_type,
60 		 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
61 
62 static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
63 static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.
64 
65 // Parse a cpumask kernel parameter.  If there are more users later on,
66 // this might need to got to a more central location.
param_set_cpumask(const char * val,const struct kernel_param * kp)67 static int param_set_cpumask(const char *val, const struct kernel_param *kp)
68 {
69 	cpumask_var_t *cm_bind = kp->arg;
70 	int ret;
71 	char *s;
72 
73 	if (!alloc_cpumask_var(cm_bind, GFP_KERNEL)) {
74 		s = "Out of memory";
75 		ret = -ENOMEM;
76 		goto out_err;
77 	}
78 	ret = cpulist_parse(val, *cm_bind);
79 	if (!ret)
80 		return ret;
81 	s = "Bad CPU range";
82 out_err:
83 	pr_warn("%s: %s, all CPUs set\n", kp->name, s);
84 	cpumask_setall(*cm_bind);
85 	return ret;
86 }
87 
88 // Output a cpumask kernel parameter.
param_get_cpumask(char * buffer,const struct kernel_param * kp)89 static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
90 {
91 	cpumask_var_t *cm_bind = kp->arg;
92 
93 	return sprintf(buffer, "%*pbl", cpumask_pr_args(*cm_bind));
94 }
95 
cpumask_nonempty(cpumask_var_t mask)96 static bool cpumask_nonempty(cpumask_var_t mask)
97 {
98 	return cpumask_available(mask) && !cpumask_empty(mask);
99 }
100 
101 static const struct kernel_param_ops lt_bind_ops = {
102 	.set = param_set_cpumask,
103 	.get = param_get_cpumask,
104 };
105 
106 module_param_cb(bind_readers, &lt_bind_ops, &bind_readers, 0644);
107 module_param_cb(bind_writers, &lt_bind_ops, &bind_writers, 0644);
108 
109 long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
110 
111 static struct task_struct *stats_task;
112 static struct task_struct **writer_tasks;
113 static struct task_struct **reader_tasks;
114 
115 static bool lock_is_write_held;
116 static atomic_t lock_is_read_held;
117 static unsigned long last_lock_release;
118 
119 struct lock_stress_stats {
120 	long n_lock_fail;
121 	long n_lock_acquired;
122 };
123 
124 struct call_rcu_chain {
125 	struct rcu_head crc_rh;
126 	bool crc_stop;
127 };
128 struct call_rcu_chain *call_rcu_chain_list;
129 
130 /* Forward reference. */
131 static void lock_torture_cleanup(void);
132 
133 /*
134  * Operations vector for selecting different types of tests.
135  */
136 struct lock_torture_ops {
137 	void (*init)(void);
138 	void (*exit)(void);
139 	int (*nested_lock)(int tid, u32 lockset);
140 	int (*writelock)(int tid);
141 	void (*write_delay)(struct torture_random_state *trsp);
142 	void (*task_boost)(struct torture_random_state *trsp);
143 	void (*writeunlock)(int tid);
144 	void (*nested_unlock)(int tid, u32 lockset);
145 	int (*readlock)(int tid);
146 	void (*read_delay)(struct torture_random_state *trsp);
147 	void (*readunlock)(int tid);
148 
149 	unsigned long flags; /* for irq spinlocks */
150 	const char *name;
151 };
152 
153 struct lock_torture_cxt {
154 	int nrealwriters_stress;
155 	int nrealreaders_stress;
156 	bool debug_lock;
157 	bool init_called;
158 	atomic_t n_lock_torture_errors;
159 	struct lock_torture_ops *cur_ops;
160 	struct lock_stress_stats *lwsa; /* writer statistics */
161 	struct lock_stress_stats *lrsa; /* reader statistics */
162 };
163 static struct lock_torture_cxt cxt = { 0, 0, false, false,
164 				       ATOMIC_INIT(0),
165 				       NULL, NULL};
166 /*
167  * Definitions for lock torture testing.
168  */
169 
torture_lock_busted_write_lock(int tid __maybe_unused)170 static int torture_lock_busted_write_lock(int tid __maybe_unused)
171 {
172 	return 0;  /* BUGGY, do not use in real life!!! */
173 }
174 
torture_lock_busted_write_delay(struct torture_random_state * trsp)175 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
176 {
177 	/* We want a long delay occasionally to force massive contention.  */
178 	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
179 		mdelay(long_hold);
180 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
181 		torture_preempt_schedule();  /* Allow test to be preempted. */
182 }
183 
torture_lock_busted_write_unlock(int tid __maybe_unused)184 static void torture_lock_busted_write_unlock(int tid __maybe_unused)
185 {
186 	  /* BUGGY, do not use in real life!!! */
187 }
188 
__torture_rt_boost(struct torture_random_state * trsp)189 static void __torture_rt_boost(struct torture_random_state *trsp)
190 {
191 	const unsigned int factor = rt_boost_factor;
192 
193 	if (!rt_task(current)) {
194 		/*
195 		 * Boost priority once every rt_boost_factor operations. When
196 		 * the task tries to take the lock, the rtmutex it will account
197 		 * for the new priority, and do any corresponding pi-dance.
198 		 */
199 		if (trsp && !(torture_random(trsp) %
200 			      (cxt.nrealwriters_stress * factor))) {
201 			sched_set_fifo(current);
202 		} else /* common case, do nothing */
203 			return;
204 	} else {
205 		/*
206 		 * The task will remain boosted for another 10 * rt_boost_factor
207 		 * operations, then restored back to its original prio, and so
208 		 * forth.
209 		 *
210 		 * When @trsp is nil, we want to force-reset the task for
211 		 * stopping the kthread.
212 		 */
213 		if (!trsp || !(torture_random(trsp) %
214 			       (cxt.nrealwriters_stress * factor * 2))) {
215 			sched_set_normal(current, 0);
216 		} else /* common case, do nothing */
217 			return;
218 	}
219 }
220 
torture_rt_boost(struct torture_random_state * trsp)221 static void torture_rt_boost(struct torture_random_state *trsp)
222 {
223 	if (rt_boost != 2)
224 		return;
225 
226 	__torture_rt_boost(trsp);
227 }
228 
229 static struct lock_torture_ops lock_busted_ops = {
230 	.writelock	= torture_lock_busted_write_lock,
231 	.write_delay	= torture_lock_busted_write_delay,
232 	.task_boost     = torture_rt_boost,
233 	.writeunlock	= torture_lock_busted_write_unlock,
234 	.readlock       = NULL,
235 	.read_delay     = NULL,
236 	.readunlock     = NULL,
237 	.name		= "lock_busted"
238 };
239 
240 static DEFINE_SPINLOCK(torture_spinlock);
241 
torture_spin_lock_write_lock(int tid __maybe_unused)242 static int torture_spin_lock_write_lock(int tid __maybe_unused)
243 __acquires(torture_spinlock)
244 {
245 	spin_lock(&torture_spinlock);
246 	return 0;
247 }
248 
torture_spin_lock_write_delay(struct torture_random_state * trsp)249 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
250 {
251 	const unsigned long shortdelay_us = 2;
252 	unsigned long j;
253 
254 	/* We want a short delay mostly to emulate likely code, and
255 	 * we want a long delay occasionally to force massive contention.
256 	 */
257 	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) {
258 		j = jiffies;
259 		mdelay(long_hold);
260 		pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
261 	}
262 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
263 		udelay(shortdelay_us);
264 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
265 		torture_preempt_schedule();  /* Allow test to be preempted. */
266 }
267 
torture_spin_lock_write_unlock(int tid __maybe_unused)268 static void torture_spin_lock_write_unlock(int tid __maybe_unused)
269 __releases(torture_spinlock)
270 {
271 	spin_unlock(&torture_spinlock);
272 }
273 
274 static struct lock_torture_ops spin_lock_ops = {
275 	.writelock	= torture_spin_lock_write_lock,
276 	.write_delay	= torture_spin_lock_write_delay,
277 	.task_boost     = torture_rt_boost,
278 	.writeunlock	= torture_spin_lock_write_unlock,
279 	.readlock       = NULL,
280 	.read_delay     = NULL,
281 	.readunlock     = NULL,
282 	.name		= "spin_lock"
283 };
284 
torture_spin_lock_write_lock_irq(int tid __maybe_unused)285 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
286 __acquires(torture_spinlock)
287 {
288 	unsigned long flags;
289 
290 	spin_lock_irqsave(&torture_spinlock, flags);
291 	cxt.cur_ops->flags = flags;
292 	return 0;
293 }
294 
torture_lock_spin_write_unlock_irq(int tid __maybe_unused)295 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
296 __releases(torture_spinlock)
297 {
298 	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
299 }
300 
301 static struct lock_torture_ops spin_lock_irq_ops = {
302 	.writelock	= torture_spin_lock_write_lock_irq,
303 	.write_delay	= torture_spin_lock_write_delay,
304 	.task_boost     = torture_rt_boost,
305 	.writeunlock	= torture_lock_spin_write_unlock_irq,
306 	.readlock       = NULL,
307 	.read_delay     = NULL,
308 	.readunlock     = NULL,
309 	.name		= "spin_lock_irq"
310 };
311 
312 static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
313 
torture_raw_spin_lock_write_lock(int tid __maybe_unused)314 static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
315 __acquires(torture_raw_spinlock)
316 {
317 	raw_spin_lock(&torture_raw_spinlock);
318 	return 0;
319 }
320 
torture_raw_spin_lock_write_unlock(int tid __maybe_unused)321 static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
322 __releases(torture_raw_spinlock)
323 {
324 	raw_spin_unlock(&torture_raw_spinlock);
325 }
326 
327 static struct lock_torture_ops raw_spin_lock_ops = {
328 	.writelock	= torture_raw_spin_lock_write_lock,
329 	.write_delay	= torture_spin_lock_write_delay,
330 	.task_boost	= torture_rt_boost,
331 	.writeunlock	= torture_raw_spin_lock_write_unlock,
332 	.readlock	= NULL,
333 	.read_delay	= NULL,
334 	.readunlock	= NULL,
335 	.name		= "raw_spin_lock"
336 };
337 
torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)338 static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
339 __acquires(torture_raw_spinlock)
340 {
341 	unsigned long flags;
342 
343 	raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
344 	cxt.cur_ops->flags = flags;
345 	return 0;
346 }
347 
torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)348 static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
349 __releases(torture_raw_spinlock)
350 {
351 	raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
352 }
353 
354 static struct lock_torture_ops raw_spin_lock_irq_ops = {
355 	.writelock	= torture_raw_spin_lock_write_lock_irq,
356 	.write_delay	= torture_spin_lock_write_delay,
357 	.task_boost	= torture_rt_boost,
358 	.writeunlock	= torture_raw_spin_lock_write_unlock_irq,
359 	.readlock	= NULL,
360 	.read_delay	= NULL,
361 	.readunlock	= NULL,
362 	.name		= "raw_spin_lock_irq"
363 };
364 
365 static DEFINE_RWLOCK(torture_rwlock);
366 
torture_rwlock_write_lock(int tid __maybe_unused)367 static int torture_rwlock_write_lock(int tid __maybe_unused)
368 __acquires(torture_rwlock)
369 {
370 	write_lock(&torture_rwlock);
371 	return 0;
372 }
373 
torture_rwlock_write_delay(struct torture_random_state * trsp)374 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
375 {
376 	const unsigned long shortdelay_us = 2;
377 
378 	/* We want a short delay mostly to emulate likely code, and
379 	 * we want a long delay occasionally to force massive contention.
380 	 */
381 	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
382 		mdelay(long_hold);
383 	else
384 		udelay(shortdelay_us);
385 }
386 
torture_rwlock_write_unlock(int tid __maybe_unused)387 static void torture_rwlock_write_unlock(int tid __maybe_unused)
388 __releases(torture_rwlock)
389 {
390 	write_unlock(&torture_rwlock);
391 }
392 
torture_rwlock_read_lock(int tid __maybe_unused)393 static int torture_rwlock_read_lock(int tid __maybe_unused)
394 __acquires(torture_rwlock)
395 {
396 	read_lock(&torture_rwlock);
397 	return 0;
398 }
399 
torture_rwlock_read_delay(struct torture_random_state * trsp)400 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
401 {
402 	const unsigned long shortdelay_us = 10;
403 
404 	/* We want a short delay mostly to emulate likely code, and
405 	 * we want a long delay occasionally to force massive contention.
406 	 */
407 	if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
408 		mdelay(long_hold);
409 	else
410 		udelay(shortdelay_us);
411 }
412 
torture_rwlock_read_unlock(int tid __maybe_unused)413 static void torture_rwlock_read_unlock(int tid __maybe_unused)
414 __releases(torture_rwlock)
415 {
416 	read_unlock(&torture_rwlock);
417 }
418 
419 static struct lock_torture_ops rw_lock_ops = {
420 	.writelock	= torture_rwlock_write_lock,
421 	.write_delay	= torture_rwlock_write_delay,
422 	.task_boost     = torture_rt_boost,
423 	.writeunlock	= torture_rwlock_write_unlock,
424 	.readlock       = torture_rwlock_read_lock,
425 	.read_delay     = torture_rwlock_read_delay,
426 	.readunlock     = torture_rwlock_read_unlock,
427 	.name		= "rw_lock"
428 };
429 
torture_rwlock_write_lock_irq(int tid __maybe_unused)430 static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
431 __acquires(torture_rwlock)
432 {
433 	unsigned long flags;
434 
435 	write_lock_irqsave(&torture_rwlock, flags);
436 	cxt.cur_ops->flags = flags;
437 	return 0;
438 }
439 
torture_rwlock_write_unlock_irq(int tid __maybe_unused)440 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
441 __releases(torture_rwlock)
442 {
443 	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
444 }
445 
torture_rwlock_read_lock_irq(int tid __maybe_unused)446 static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
447 __acquires(torture_rwlock)
448 {
449 	unsigned long flags;
450 
451 	read_lock_irqsave(&torture_rwlock, flags);
452 	cxt.cur_ops->flags = flags;
453 	return 0;
454 }
455 
torture_rwlock_read_unlock_irq(int tid __maybe_unused)456 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
457 __releases(torture_rwlock)
458 {
459 	read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
460 }
461 
462 static struct lock_torture_ops rw_lock_irq_ops = {
463 	.writelock	= torture_rwlock_write_lock_irq,
464 	.write_delay	= torture_rwlock_write_delay,
465 	.task_boost     = torture_rt_boost,
466 	.writeunlock	= torture_rwlock_write_unlock_irq,
467 	.readlock       = torture_rwlock_read_lock_irq,
468 	.read_delay     = torture_rwlock_read_delay,
469 	.readunlock     = torture_rwlock_read_unlock_irq,
470 	.name		= "rw_lock_irq"
471 };
472 
473 static DEFINE_MUTEX(torture_mutex);
474 static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
475 static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
476 
torture_mutex_init(void)477 static void torture_mutex_init(void)
478 {
479 	int i;
480 
481 	for (i = 0; i < MAX_NESTED_LOCKS; i++)
482 		__mutex_init(&torture_nested_mutexes[i], __func__,
483 			     &nested_mutex_keys[i]);
484 }
485 
torture_mutex_nested_lock(int tid __maybe_unused,u32 lockset)486 static int torture_mutex_nested_lock(int tid __maybe_unused,
487 				     u32 lockset)
488 {
489 	int i;
490 
491 	for (i = 0; i < nested_locks; i++)
492 		if (lockset & (1 << i))
493 			mutex_lock(&torture_nested_mutexes[i]);
494 	return 0;
495 }
496 
torture_mutex_lock(int tid __maybe_unused)497 static int torture_mutex_lock(int tid __maybe_unused)
498 __acquires(torture_mutex)
499 {
500 	mutex_lock(&torture_mutex);
501 	return 0;
502 }
503 
torture_mutex_delay(struct torture_random_state * trsp)504 static void torture_mutex_delay(struct torture_random_state *trsp)
505 {
506 	/* We want a long delay occasionally to force massive contention.  */
507 	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
508 		mdelay(long_hold * 5);
509 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
510 		torture_preempt_schedule();  /* Allow test to be preempted. */
511 }
512 
torture_mutex_unlock(int tid __maybe_unused)513 static void torture_mutex_unlock(int tid __maybe_unused)
514 __releases(torture_mutex)
515 {
516 	mutex_unlock(&torture_mutex);
517 }
518 
torture_mutex_nested_unlock(int tid __maybe_unused,u32 lockset)519 static void torture_mutex_nested_unlock(int tid __maybe_unused,
520 					u32 lockset)
521 {
522 	int i;
523 
524 	for (i = nested_locks - 1; i >= 0; i--)
525 		if (lockset & (1 << i))
526 			mutex_unlock(&torture_nested_mutexes[i]);
527 }
528 
529 static struct lock_torture_ops mutex_lock_ops = {
530 	.init		= torture_mutex_init,
531 	.nested_lock	= torture_mutex_nested_lock,
532 	.writelock	= torture_mutex_lock,
533 	.write_delay	= torture_mutex_delay,
534 	.task_boost     = torture_rt_boost,
535 	.writeunlock	= torture_mutex_unlock,
536 	.nested_unlock	= torture_mutex_nested_unlock,
537 	.readlock       = NULL,
538 	.read_delay     = NULL,
539 	.readunlock     = NULL,
540 	.name		= "mutex_lock"
541 };
542 
543 #include <linux/ww_mutex.h>
544 /*
545  * The torture ww_mutexes should belong to the same lock class as
546  * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
547  * function is called for initialization to ensure that.
548  */
549 static DEFINE_WD_CLASS(torture_ww_class);
550 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
551 static struct ww_acquire_ctx *ww_acquire_ctxs;
552 
torture_ww_mutex_init(void)553 static void torture_ww_mutex_init(void)
554 {
555 	ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
556 	ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
557 	ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
558 
559 	ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
560 					sizeof(*ww_acquire_ctxs),
561 					GFP_KERNEL);
562 	if (!ww_acquire_ctxs)
563 		VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
564 }
565 
torture_ww_mutex_exit(void)566 static void torture_ww_mutex_exit(void)
567 {
568 	kfree(ww_acquire_ctxs);
569 }
570 
torture_ww_mutex_lock(int tid)571 static int torture_ww_mutex_lock(int tid)
572 __acquires(torture_ww_mutex_0)
573 __acquires(torture_ww_mutex_1)
574 __acquires(torture_ww_mutex_2)
575 {
576 	LIST_HEAD(list);
577 	struct reorder_lock {
578 		struct list_head link;
579 		struct ww_mutex *lock;
580 	} locks[3], *ll, *ln;
581 	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
582 
583 	locks[0].lock = &torture_ww_mutex_0;
584 	list_add(&locks[0].link, &list);
585 
586 	locks[1].lock = &torture_ww_mutex_1;
587 	list_add(&locks[1].link, &list);
588 
589 	locks[2].lock = &torture_ww_mutex_2;
590 	list_add(&locks[2].link, &list);
591 
592 	ww_acquire_init(ctx, &torture_ww_class);
593 
594 	list_for_each_entry(ll, &list, link) {
595 		int err;
596 
597 		err = ww_mutex_lock(ll->lock, ctx);
598 		if (!err)
599 			continue;
600 
601 		ln = ll;
602 		list_for_each_entry_continue_reverse(ln, &list, link)
603 			ww_mutex_unlock(ln->lock);
604 
605 		if (err != -EDEADLK)
606 			return err;
607 
608 		ww_mutex_lock_slow(ll->lock, ctx);
609 		list_move(&ll->link, &list);
610 	}
611 
612 	return 0;
613 }
614 
torture_ww_mutex_unlock(int tid)615 static void torture_ww_mutex_unlock(int tid)
616 __releases(torture_ww_mutex_0)
617 __releases(torture_ww_mutex_1)
618 __releases(torture_ww_mutex_2)
619 {
620 	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
621 
622 	ww_mutex_unlock(&torture_ww_mutex_0);
623 	ww_mutex_unlock(&torture_ww_mutex_1);
624 	ww_mutex_unlock(&torture_ww_mutex_2);
625 	ww_acquire_fini(ctx);
626 }
627 
628 static struct lock_torture_ops ww_mutex_lock_ops = {
629 	.init		= torture_ww_mutex_init,
630 	.exit		= torture_ww_mutex_exit,
631 	.writelock	= torture_ww_mutex_lock,
632 	.write_delay	= torture_mutex_delay,
633 	.task_boost     = torture_rt_boost,
634 	.writeunlock	= torture_ww_mutex_unlock,
635 	.readlock       = NULL,
636 	.read_delay     = NULL,
637 	.readunlock     = NULL,
638 	.name		= "ww_mutex_lock"
639 };
640 
641 #ifdef CONFIG_RT_MUTEXES
642 static DEFINE_RT_MUTEX(torture_rtmutex);
643 static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
644 static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
645 
torture_rtmutex_init(void)646 static void torture_rtmutex_init(void)
647 {
648 	int i;
649 
650 	for (i = 0; i < MAX_NESTED_LOCKS; i++)
651 		__rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
652 				&nested_rtmutex_keys[i]);
653 }
654 
torture_rtmutex_nested_lock(int tid __maybe_unused,u32 lockset)655 static int torture_rtmutex_nested_lock(int tid __maybe_unused,
656 				       u32 lockset)
657 {
658 	int i;
659 
660 	for (i = 0; i < nested_locks; i++)
661 		if (lockset & (1 << i))
662 			rt_mutex_lock(&torture_nested_rtmutexes[i]);
663 	return 0;
664 }
665 
torture_rtmutex_lock(int tid __maybe_unused)666 static int torture_rtmutex_lock(int tid __maybe_unused)
667 __acquires(torture_rtmutex)
668 {
669 	rt_mutex_lock(&torture_rtmutex);
670 	return 0;
671 }
672 
torture_rtmutex_delay(struct torture_random_state * trsp)673 static void torture_rtmutex_delay(struct torture_random_state *trsp)
674 {
675 	const unsigned long shortdelay_us = 2;
676 
677 	/*
678 	 * We want a short delay mostly to emulate likely code, and
679 	 * we want a long delay occasionally to force massive contention.
680 	 */
681 	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
682 		mdelay(long_hold);
683 	if (!(torture_random(trsp) %
684 	      (cxt.nrealwriters_stress * 200 * shortdelay_us)))
685 		udelay(shortdelay_us);
686 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
687 		torture_preempt_schedule();  /* Allow test to be preempted. */
688 }
689 
torture_rtmutex_unlock(int tid __maybe_unused)690 static void torture_rtmutex_unlock(int tid __maybe_unused)
691 __releases(torture_rtmutex)
692 {
693 	rt_mutex_unlock(&torture_rtmutex);
694 }
695 
torture_rt_boost_rtmutex(struct torture_random_state * trsp)696 static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
697 {
698 	if (!rt_boost)
699 		return;
700 
701 	__torture_rt_boost(trsp);
702 }
703 
torture_rtmutex_nested_unlock(int tid __maybe_unused,u32 lockset)704 static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
705 					  u32 lockset)
706 {
707 	int i;
708 
709 	for (i = nested_locks - 1; i >= 0; i--)
710 		if (lockset & (1 << i))
711 			rt_mutex_unlock(&torture_nested_rtmutexes[i]);
712 }
713 
714 static struct lock_torture_ops rtmutex_lock_ops = {
715 	.init		= torture_rtmutex_init,
716 	.nested_lock	= torture_rtmutex_nested_lock,
717 	.writelock	= torture_rtmutex_lock,
718 	.write_delay	= torture_rtmutex_delay,
719 	.task_boost     = torture_rt_boost_rtmutex,
720 	.writeunlock	= torture_rtmutex_unlock,
721 	.nested_unlock	= torture_rtmutex_nested_unlock,
722 	.readlock       = NULL,
723 	.read_delay     = NULL,
724 	.readunlock     = NULL,
725 	.name		= "rtmutex_lock"
726 };
727 #endif
728 
729 static DECLARE_RWSEM(torture_rwsem);
torture_rwsem_down_write(int tid __maybe_unused)730 static int torture_rwsem_down_write(int tid __maybe_unused)
731 __acquires(torture_rwsem)
732 {
733 	down_write(&torture_rwsem);
734 	return 0;
735 }
736 
torture_rwsem_write_delay(struct torture_random_state * trsp)737 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
738 {
739 	/* We want a long delay occasionally to force massive contention.  */
740 	if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
741 		mdelay(long_hold * 10);
742 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
743 		torture_preempt_schedule();  /* Allow test to be preempted. */
744 }
745 
torture_rwsem_up_write(int tid __maybe_unused)746 static void torture_rwsem_up_write(int tid __maybe_unused)
747 __releases(torture_rwsem)
748 {
749 	up_write(&torture_rwsem);
750 }
751 
torture_rwsem_down_read(int tid __maybe_unused)752 static int torture_rwsem_down_read(int tid __maybe_unused)
753 __acquires(torture_rwsem)
754 {
755 	down_read(&torture_rwsem);
756 	return 0;
757 }
758 
torture_rwsem_read_delay(struct torture_random_state * trsp)759 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
760 {
761 	/* We want a long delay occasionally to force massive contention.  */
762 	if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
763 		mdelay(long_hold * 2);
764 	else
765 		mdelay(long_hold / 2);
766 	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
767 		torture_preempt_schedule();  /* Allow test to be preempted. */
768 }
769 
torture_rwsem_up_read(int tid __maybe_unused)770 static void torture_rwsem_up_read(int tid __maybe_unused)
771 __releases(torture_rwsem)
772 {
773 	up_read(&torture_rwsem);
774 }
775 
776 static struct lock_torture_ops rwsem_lock_ops = {
777 	.writelock	= torture_rwsem_down_write,
778 	.write_delay	= torture_rwsem_write_delay,
779 	.task_boost     = torture_rt_boost,
780 	.writeunlock	= torture_rwsem_up_write,
781 	.readlock       = torture_rwsem_down_read,
782 	.read_delay     = torture_rwsem_read_delay,
783 	.readunlock     = torture_rwsem_up_read,
784 	.name		= "rwsem_lock"
785 };
786 
787 #include <linux/percpu-rwsem.h>
788 static struct percpu_rw_semaphore pcpu_rwsem;
789 
torture_percpu_rwsem_init(void)790 static void torture_percpu_rwsem_init(void)
791 {
792 	BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
793 }
794 
torture_percpu_rwsem_exit(void)795 static void torture_percpu_rwsem_exit(void)
796 {
797 	percpu_free_rwsem(&pcpu_rwsem);
798 }
799 
torture_percpu_rwsem_down_write(int tid __maybe_unused)800 static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
801 __acquires(pcpu_rwsem)
802 {
803 	percpu_down_write(&pcpu_rwsem);
804 	return 0;
805 }
806 
torture_percpu_rwsem_up_write(int tid __maybe_unused)807 static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
808 __releases(pcpu_rwsem)
809 {
810 	percpu_up_write(&pcpu_rwsem);
811 }
812 
torture_percpu_rwsem_down_read(int tid __maybe_unused)813 static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
814 __acquires(pcpu_rwsem)
815 {
816 	percpu_down_read(&pcpu_rwsem);
817 	return 0;
818 }
819 
torture_percpu_rwsem_up_read(int tid __maybe_unused)820 static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
821 __releases(pcpu_rwsem)
822 {
823 	percpu_up_read(&pcpu_rwsem);
824 }
825 
826 static struct lock_torture_ops percpu_rwsem_lock_ops = {
827 	.init		= torture_percpu_rwsem_init,
828 	.exit		= torture_percpu_rwsem_exit,
829 	.writelock	= torture_percpu_rwsem_down_write,
830 	.write_delay	= torture_rwsem_write_delay,
831 	.task_boost     = torture_rt_boost,
832 	.writeunlock	= torture_percpu_rwsem_up_write,
833 	.readlock       = torture_percpu_rwsem_down_read,
834 	.read_delay     = torture_rwsem_read_delay,
835 	.readunlock     = torture_percpu_rwsem_up_read,
836 	.name		= "percpu_rwsem_lock"
837 };
838 
839 /*
840  * Lock torture writer kthread.  Repeatedly acquires and releases
841  * the lock, checking for duplicate acquisitions.
842  */
lock_torture_writer(void * arg)843 static int lock_torture_writer(void *arg)
844 {
845 	unsigned long j;
846 	unsigned long j1;
847 	u32 lockset_mask;
848 	struct lock_stress_stats *lwsp = arg;
849 	DEFINE_TORTURE_RANDOM(rand);
850 	bool skip_main_lock;
851 	int tid = lwsp - cxt.lwsa;
852 
853 	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
854 	if (!rt_task(current))
855 		set_user_nice(current, MAX_NICE);
856 
857 	do {
858 		if ((torture_random(&rand) & 0xfffff) == 0)
859 			schedule_timeout_uninterruptible(1);
860 
861 		lockset_mask = torture_random(&rand);
862 		/*
863 		 * When using nested_locks, we want to occasionally
864 		 * skip the main lock so we can avoid always serializing
865 		 * the lock chains on that central lock. By skipping the
866 		 * main lock occasionally, we can create different
867 		 * contention patterns (allowing for multiple disjoint
868 		 * blocked trees)
869 		 */
870 		skip_main_lock = (nested_locks &&
871 				 !(torture_random(&rand) % 100));
872 
873 		cxt.cur_ops->task_boost(&rand);
874 		if (cxt.cur_ops->nested_lock)
875 			cxt.cur_ops->nested_lock(tid, lockset_mask);
876 
877 		if (!skip_main_lock) {
878 			if (acq_writer_lim > 0)
879 				j = jiffies;
880 			cxt.cur_ops->writelock(tid);
881 			if (WARN_ON_ONCE(lock_is_write_held))
882 				lwsp->n_lock_fail++;
883 			lock_is_write_held = true;
884 			if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
885 				lwsp->n_lock_fail++; /* rare, but... */
886 			if (acq_writer_lim > 0) {
887 				j1 = jiffies;
888 				WARN_ONCE(time_after(j1, j + acq_writer_lim),
889 					  "%s: Lock acquisition took %lu jiffies.\n",
890 					  __func__, j1 - j);
891 			}
892 			lwsp->n_lock_acquired++;
893 
894 			cxt.cur_ops->write_delay(&rand);
895 
896 			lock_is_write_held = false;
897 			WRITE_ONCE(last_lock_release, jiffies);
898 			cxt.cur_ops->writeunlock(tid);
899 		}
900 		if (cxt.cur_ops->nested_unlock)
901 			cxt.cur_ops->nested_unlock(tid, lockset_mask);
902 
903 		stutter_wait("lock_torture_writer");
904 	} while (!torture_must_stop());
905 
906 	cxt.cur_ops->task_boost(NULL); /* reset prio */
907 	torture_kthread_stopping("lock_torture_writer");
908 	return 0;
909 }
910 
911 /*
912  * Lock torture reader kthread.  Repeatedly acquires and releases
913  * the reader lock.
914  */
lock_torture_reader(void * arg)915 static int lock_torture_reader(void *arg)
916 {
917 	struct lock_stress_stats *lrsp = arg;
918 	int tid = lrsp - cxt.lrsa;
919 	DEFINE_TORTURE_RANDOM(rand);
920 
921 	VERBOSE_TOROUT_STRING("lock_torture_reader task started");
922 	set_user_nice(current, MAX_NICE);
923 
924 	do {
925 		if ((torture_random(&rand) & 0xfffff) == 0)
926 			schedule_timeout_uninterruptible(1);
927 
928 		cxt.cur_ops->readlock(tid);
929 		atomic_inc(&lock_is_read_held);
930 		if (WARN_ON_ONCE(lock_is_write_held))
931 			lrsp->n_lock_fail++; /* rare, but... */
932 
933 		lrsp->n_lock_acquired++;
934 		cxt.cur_ops->read_delay(&rand);
935 		atomic_dec(&lock_is_read_held);
936 		cxt.cur_ops->readunlock(tid);
937 
938 		stutter_wait("lock_torture_reader");
939 	} while (!torture_must_stop());
940 	torture_kthread_stopping("lock_torture_reader");
941 	return 0;
942 }
943 
944 /*
945  * Create an lock-torture-statistics message in the specified buffer.
946  */
__torture_print_stats(char * page,struct lock_stress_stats * statp,bool write)947 static void __torture_print_stats(char *page,
948 				  struct lock_stress_stats *statp, bool write)
949 {
950 	long cur;
951 	bool fail = false;
952 	int i, n_stress;
953 	long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
954 	long long sum = 0;
955 
956 	n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
957 	for (i = 0; i < n_stress; i++) {
958 		if (data_race(statp[i].n_lock_fail))
959 			fail = true;
960 		cur = data_race(statp[i].n_lock_acquired);
961 		sum += cur;
962 		if (max < cur)
963 			max = cur;
964 		if (min > cur)
965 			min = cur;
966 	}
967 	page += sprintf(page,
968 			"%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
969 			write ? "Writes" : "Reads ",
970 			sum, max, min,
971 			!onoff_interval && max / 2 > min ? "???" : "",
972 			fail, fail ? "!!!" : "");
973 	if (fail)
974 		atomic_inc(&cxt.n_lock_torture_errors);
975 }
976 
977 /*
978  * Print torture statistics.  Caller must ensure that there is only one
979  * call to this function at a given time!!!  This is normally accomplished
980  * by relying on the module system to only have one copy of the module
981  * loaded, and then by giving the lock_torture_stats kthread full control
982  * (or the init/cleanup functions when lock_torture_stats thread is not
983  * running).
984  */
lock_torture_stats_print(void)985 static void lock_torture_stats_print(void)
986 {
987 	int size = cxt.nrealwriters_stress * 200 + 8192;
988 	char *buf;
989 
990 	if (cxt.cur_ops->readlock)
991 		size += cxt.nrealreaders_stress * 200 + 8192;
992 
993 	buf = kmalloc(size, GFP_KERNEL);
994 	if (!buf) {
995 		pr_err("lock_torture_stats_print: Out of memory, need: %d",
996 		       size);
997 		return;
998 	}
999 
1000 	__torture_print_stats(buf, cxt.lwsa, true);
1001 	pr_alert("%s", buf);
1002 	kfree(buf);
1003 
1004 	if (cxt.cur_ops->readlock) {
1005 		buf = kmalloc(size, GFP_KERNEL);
1006 		if (!buf) {
1007 			pr_err("lock_torture_stats_print: Out of memory, need: %d",
1008 			       size);
1009 			return;
1010 		}
1011 
1012 		__torture_print_stats(buf, cxt.lrsa, false);
1013 		pr_alert("%s", buf);
1014 		kfree(buf);
1015 	}
1016 }
1017 
1018 /*
1019  * Periodically prints torture statistics, if periodic statistics printing
1020  * was specified via the stat_interval module parameter.
1021  *
1022  * No need to worry about fullstop here, since this one doesn't reference
1023  * volatile state or register callbacks.
1024  */
lock_torture_stats(void * arg)1025 static int lock_torture_stats(void *arg)
1026 {
1027 	VERBOSE_TOROUT_STRING("lock_torture_stats task started");
1028 	do {
1029 		schedule_timeout_interruptible(stat_interval * HZ);
1030 		lock_torture_stats_print();
1031 		torture_shutdown_absorb("lock_torture_stats");
1032 	} while (!torture_must_stop());
1033 	torture_kthread_stopping("lock_torture_stats");
1034 	return 0;
1035 }
1036 
1037 
1038 static inline void
lock_torture_print_module_parms(struct lock_torture_ops * cur_ops,const char * tag)1039 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
1040 				const char *tag)
1041 {
1042 	static cpumask_t cpumask_all;
1043 	cpumask_t *rcmp = cpumask_nonempty(bind_readers) ? bind_readers : &cpumask_all;
1044 	cpumask_t *wcmp = cpumask_nonempty(bind_writers) ? bind_writers : &cpumask_all;
1045 
1046 	cpumask_setall(&cpumask_all);
1047 	pr_alert("%s" TORTURE_FLAG
1048 		 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
1049 		 torture_type, tag, cxt.debug_lock ? " [debug]": "",
1050 		 acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp),
1051 		 call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress,
1052 		 cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost,
1053 		 rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter,
1054 		 verbose, writer_fifo);
1055 }
1056 
1057 // If requested, maintain call_rcu() chains to keep a grace period always
1058 // in flight.  These increase the probability of getting an RCU CPU stall
1059 // warning and associated diagnostics when a locking primitive stalls.
1060 
call_rcu_chain_cb(struct rcu_head * rhp)1061 static void call_rcu_chain_cb(struct rcu_head *rhp)
1062 {
1063 	struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh);
1064 
1065 	if (!smp_load_acquire(&crcp->crc_stop)) {
1066 		(void)start_poll_synchronize_rcu(); // Start one grace period...
1067 		call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another.
1068 	}
1069 }
1070 
1071 // Start the requested number of call_rcu() chains.
call_rcu_chain_init(void)1072 static int call_rcu_chain_init(void)
1073 {
1074 	int i;
1075 
1076 	if (call_rcu_chains <= 0)
1077 		return 0;
1078 	call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
1079 	if (!call_rcu_chain_list)
1080 		return -ENOMEM;
1081 	for (i = 0; i < call_rcu_chains; i++) {
1082 		call_rcu_chain_list[i].crc_stop = false;
1083 		call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
1084 	}
1085 	return 0;
1086 }
1087 
1088 // Stop all of the call_rcu() chains.
call_rcu_chain_cleanup(void)1089 static void call_rcu_chain_cleanup(void)
1090 {
1091 	int i;
1092 
1093 	if (!call_rcu_chain_list)
1094 		return;
1095 	for (i = 0; i < call_rcu_chains; i++)
1096 		smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
1097 	rcu_barrier();
1098 	kfree(call_rcu_chain_list);
1099 	call_rcu_chain_list = NULL;
1100 }
1101 
lock_torture_cleanup(void)1102 static void lock_torture_cleanup(void)
1103 {
1104 	int i;
1105 
1106 	if (torture_cleanup_begin())
1107 		return;
1108 
1109 	/*
1110 	 * Indicates early cleanup, meaning that the test has not run,
1111 	 * such as when passing bogus args when loading the module.
1112 	 * However cxt->cur_ops.init() may have been invoked, so beside
1113 	 * perform the underlying torture-specific cleanups, cur_ops.exit()
1114 	 * will be invoked if needed.
1115 	 */
1116 	if (!cxt.lwsa && !cxt.lrsa)
1117 		goto end;
1118 
1119 	if (writer_tasks) {
1120 		for (i = 0; i < cxt.nrealwriters_stress; i++)
1121 			torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
1122 		kfree(writer_tasks);
1123 		writer_tasks = NULL;
1124 	}
1125 
1126 	if (reader_tasks) {
1127 		for (i = 0; i < cxt.nrealreaders_stress; i++)
1128 			torture_stop_kthread(lock_torture_reader,
1129 					     reader_tasks[i]);
1130 		kfree(reader_tasks);
1131 		reader_tasks = NULL;
1132 	}
1133 
1134 	torture_stop_kthread(lock_torture_stats, stats_task);
1135 	lock_torture_stats_print();  /* -After- the stats thread is stopped! */
1136 
1137 	if (atomic_read(&cxt.n_lock_torture_errors))
1138 		lock_torture_print_module_parms(cxt.cur_ops,
1139 						"End of test: FAILURE");
1140 	else if (torture_onoff_failures())
1141 		lock_torture_print_module_parms(cxt.cur_ops,
1142 						"End of test: LOCK_HOTPLUG");
1143 	else
1144 		lock_torture_print_module_parms(cxt.cur_ops,
1145 						"End of test: SUCCESS");
1146 
1147 	kfree(cxt.lwsa);
1148 	cxt.lwsa = NULL;
1149 	kfree(cxt.lrsa);
1150 	cxt.lrsa = NULL;
1151 
1152 	call_rcu_chain_cleanup();
1153 
1154 end:
1155 	if (cxt.init_called) {
1156 		if (cxt.cur_ops->exit)
1157 			cxt.cur_ops->exit();
1158 		cxt.init_called = false;
1159 	}
1160 	torture_cleanup_end();
1161 }
1162 
lock_torture_init(void)1163 static int __init lock_torture_init(void)
1164 {
1165 	int i, j;
1166 	int firsterr = 0;
1167 	static struct lock_torture_ops *torture_ops[] = {
1168 		&lock_busted_ops,
1169 		&spin_lock_ops, &spin_lock_irq_ops,
1170 		&raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1171 		&rw_lock_ops, &rw_lock_irq_ops,
1172 		&mutex_lock_ops,
1173 		&ww_mutex_lock_ops,
1174 #ifdef CONFIG_RT_MUTEXES
1175 		&rtmutex_lock_ops,
1176 #endif
1177 		&rwsem_lock_ops,
1178 		&percpu_rwsem_lock_ops,
1179 	};
1180 
1181 	if (!torture_init_begin(torture_type, verbose))
1182 		return -EBUSY;
1183 
1184 	/* Process args and tell the world that the torturer is on the job. */
1185 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1186 		cxt.cur_ops = torture_ops[i];
1187 		if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1188 			break;
1189 	}
1190 	if (i == ARRAY_SIZE(torture_ops)) {
1191 		pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1192 			 torture_type);
1193 		pr_alert("lock-torture types:");
1194 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1195 			pr_alert(" %s", torture_ops[i]->name);
1196 		pr_alert("\n");
1197 		firsterr = -EINVAL;
1198 		goto unwind;
1199 	}
1200 
1201 	if (nwriters_stress == 0 &&
1202 	    (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1203 		pr_alert("lock-torture: must run at least one locking thread\n");
1204 		firsterr = -EINVAL;
1205 		goto unwind;
1206 	}
1207 
1208 	if (nwriters_stress >= 0)
1209 		cxt.nrealwriters_stress = nwriters_stress;
1210 	else
1211 		cxt.nrealwriters_stress = 2 * num_online_cpus();
1212 
1213 	if (cxt.cur_ops->init) {
1214 		cxt.cur_ops->init();
1215 		cxt.init_called = true;
1216 	}
1217 
1218 #ifdef CONFIG_DEBUG_MUTEXES
1219 	if (str_has_prefix(torture_type, "mutex"))
1220 		cxt.debug_lock = true;
1221 #endif
1222 #ifdef CONFIG_DEBUG_RT_MUTEXES
1223 	if (str_has_prefix(torture_type, "rtmutex"))
1224 		cxt.debug_lock = true;
1225 #endif
1226 #ifdef CONFIG_DEBUG_SPINLOCK
1227 	if ((str_has_prefix(torture_type, "spin")) ||
1228 	    (str_has_prefix(torture_type, "rw_lock")))
1229 		cxt.debug_lock = true;
1230 #endif
1231 
1232 	/* Initialize the statistics so that each run gets its own numbers. */
1233 	if (nwriters_stress) {
1234 		lock_is_write_held = false;
1235 		cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1236 					 sizeof(*cxt.lwsa),
1237 					 GFP_KERNEL);
1238 		if (cxt.lwsa == NULL) {
1239 			VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1240 			firsterr = -ENOMEM;
1241 			goto unwind;
1242 		}
1243 
1244 		for (i = 0; i < cxt.nrealwriters_stress; i++) {
1245 			cxt.lwsa[i].n_lock_fail = 0;
1246 			cxt.lwsa[i].n_lock_acquired = 0;
1247 		}
1248 	}
1249 
1250 	if (cxt.cur_ops->readlock) {
1251 		if (nreaders_stress >= 0)
1252 			cxt.nrealreaders_stress = nreaders_stress;
1253 		else {
1254 			/*
1255 			 * By default distribute evenly the number of
1256 			 * readers and writers. We still run the same number
1257 			 * of threads as the writer-only locks default.
1258 			 */
1259 			if (nwriters_stress < 0) /* user doesn't care */
1260 				cxt.nrealwriters_stress = num_online_cpus();
1261 			cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1262 		}
1263 
1264 		if (nreaders_stress) {
1265 			cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1266 						 sizeof(*cxt.lrsa),
1267 						 GFP_KERNEL);
1268 			if (cxt.lrsa == NULL) {
1269 				VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1270 				firsterr = -ENOMEM;
1271 				kfree(cxt.lwsa);
1272 				cxt.lwsa = NULL;
1273 				goto unwind;
1274 			}
1275 
1276 			for (i = 0; i < cxt.nrealreaders_stress; i++) {
1277 				cxt.lrsa[i].n_lock_fail = 0;
1278 				cxt.lrsa[i].n_lock_acquired = 0;
1279 			}
1280 		}
1281 	}
1282 
1283 	firsterr = call_rcu_chain_init();
1284 	if (torture_init_error(firsterr))
1285 		goto unwind;
1286 
1287 	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1288 
1289 	/* Prepare torture context. */
1290 	if (onoff_interval > 0) {
1291 		firsterr = torture_onoff_init(onoff_holdoff * HZ,
1292 					      onoff_interval * HZ, NULL);
1293 		if (torture_init_error(firsterr))
1294 			goto unwind;
1295 	}
1296 	if (shuffle_interval > 0) {
1297 		firsterr = torture_shuffle_init(shuffle_interval);
1298 		if (torture_init_error(firsterr))
1299 			goto unwind;
1300 	}
1301 	if (shutdown_secs > 0) {
1302 		firsterr = torture_shutdown_init(shutdown_secs,
1303 						 lock_torture_cleanup);
1304 		if (torture_init_error(firsterr))
1305 			goto unwind;
1306 	}
1307 	if (stutter > 0) {
1308 		firsterr = torture_stutter_init(stutter, stutter);
1309 		if (torture_init_error(firsterr))
1310 			goto unwind;
1311 	}
1312 
1313 	if (nwriters_stress) {
1314 		writer_tasks = kcalloc(cxt.nrealwriters_stress,
1315 				       sizeof(writer_tasks[0]),
1316 				       GFP_KERNEL);
1317 		if (writer_tasks == NULL) {
1318 			TOROUT_ERRSTRING("writer_tasks: Out of memory");
1319 			firsterr = -ENOMEM;
1320 			goto unwind;
1321 		}
1322 	}
1323 
1324 	/* cap nested_locks to MAX_NESTED_LOCKS */
1325 	if (nested_locks > MAX_NESTED_LOCKS)
1326 		nested_locks = MAX_NESTED_LOCKS;
1327 
1328 	if (cxt.cur_ops->readlock) {
1329 		reader_tasks = kcalloc(cxt.nrealreaders_stress,
1330 				       sizeof(reader_tasks[0]),
1331 				       GFP_KERNEL);
1332 		if (reader_tasks == NULL) {
1333 			TOROUT_ERRSTRING("reader_tasks: Out of memory");
1334 			kfree(writer_tasks);
1335 			writer_tasks = NULL;
1336 			firsterr = -ENOMEM;
1337 			goto unwind;
1338 		}
1339 	}
1340 
1341 	/*
1342 	 * Create the kthreads and start torturing (oh, those poor little locks).
1343 	 *
1344 	 * TODO: Note that we interleave writers with readers, giving writers a
1345 	 * slight advantage, by creating its kthread first. This can be modified
1346 	 * for very specific needs, or even let the user choose the policy, if
1347 	 * ever wanted.
1348 	 */
1349 	for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1350 		    j < cxt.nrealreaders_stress; i++, j++) {
1351 		if (i >= cxt.nrealwriters_stress)
1352 			goto create_reader;
1353 
1354 		/* Create writer. */
1355 		firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1356 						     writer_tasks[i],
1357 						     writer_fifo ? sched_set_fifo : NULL);
1358 		if (torture_init_error(firsterr))
1359 			goto unwind;
1360 		if (cpumask_nonempty(bind_writers))
1361 			torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers);
1362 
1363 	create_reader:
1364 		if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1365 			continue;
1366 		/* Create reader. */
1367 		firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1368 						  reader_tasks[j]);
1369 		if (torture_init_error(firsterr))
1370 			goto unwind;
1371 		if (cpumask_nonempty(bind_readers))
1372 			torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers);
1373 	}
1374 	if (stat_interval > 0) {
1375 		firsterr = torture_create_kthread(lock_torture_stats, NULL,
1376 						  stats_task);
1377 		if (torture_init_error(firsterr))
1378 			goto unwind;
1379 	}
1380 	torture_init_end();
1381 	return 0;
1382 
1383 unwind:
1384 	torture_init_end();
1385 	lock_torture_cleanup();
1386 	if (shutdown_secs) {
1387 		WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1388 		kernel_power_off();
1389 	}
1390 	return firsterr;
1391 }
1392 
1393 module_init(lock_torture_init);
1394 module_exit(lock_torture_cleanup);
1395