xref: /linux/kernel/locking/test-ww_mutex.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Module-based API test facility for ww_mutexes
4  */
5 
6 #include <linux/kernel.h>
7 
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/prandom.h>
13 #include <linux/slab.h>
14 #include <linux/ww_mutex.h>
15 
16 static DEFINE_WD_CLASS(ww_class);
17 struct workqueue_struct *wq;
18 
19 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20 #define ww_acquire_init_noinject(a, b) do { \
21 		ww_acquire_init((a), (b)); \
22 		(a)->deadlock_inject_countdown = ~0U; \
23 	} while (0)
24 #else
25 #define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26 #endif
27 
28 struct test_mutex {
29 	struct work_struct work;
30 	struct ww_mutex mutex;
31 	struct completion ready, go, done;
32 	unsigned int flags;
33 };
34 
35 #define TEST_MTX_SPIN BIT(0)
36 #define TEST_MTX_TRY BIT(1)
37 #define TEST_MTX_CTX BIT(2)
38 #define __TEST_MTX_LAST BIT(3)
39 
test_mutex_work(struct work_struct * work)40 static void test_mutex_work(struct work_struct *work)
41 {
42 	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43 
44 	complete(&mtx->ready);
45 	wait_for_completion(&mtx->go);
46 
47 	if (mtx->flags & TEST_MTX_TRY) {
48 		while (!ww_mutex_trylock(&mtx->mutex, NULL))
49 			cond_resched();
50 	} else {
51 		ww_mutex_lock(&mtx->mutex, NULL);
52 	}
53 	complete(&mtx->done);
54 	ww_mutex_unlock(&mtx->mutex);
55 }
56 
__test_mutex(unsigned int flags)57 static int __test_mutex(unsigned int flags)
58 {
59 #define TIMEOUT (HZ / 16)
60 	struct test_mutex mtx;
61 	struct ww_acquire_ctx ctx;
62 	int ret;
63 
64 	ww_mutex_init(&mtx.mutex, &ww_class);
65 	if (flags & TEST_MTX_CTX)
66 		ww_acquire_init(&ctx, &ww_class);
67 
68 	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
69 	init_completion(&mtx.ready);
70 	init_completion(&mtx.go);
71 	init_completion(&mtx.done);
72 	mtx.flags = flags;
73 
74 	schedule_work(&mtx.work);
75 
76 	wait_for_completion(&mtx.ready);
77 	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
78 	complete(&mtx.go);
79 	if (flags & TEST_MTX_SPIN) {
80 		unsigned long timeout = jiffies + TIMEOUT;
81 
82 		ret = 0;
83 		do {
84 			if (completion_done(&mtx.done)) {
85 				ret = -EINVAL;
86 				break;
87 			}
88 			cond_resched();
89 		} while (time_before(jiffies, timeout));
90 	} else {
91 		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
92 	}
93 	ww_mutex_unlock(&mtx.mutex);
94 	if (flags & TEST_MTX_CTX)
95 		ww_acquire_fini(&ctx);
96 
97 	if (ret) {
98 		pr_err("%s(flags=%x): mutual exclusion failure\n",
99 		       __func__, flags);
100 		ret = -EINVAL;
101 	}
102 
103 	flush_work(&mtx.work);
104 	destroy_work_on_stack(&mtx.work);
105 	return ret;
106 #undef TIMEOUT
107 }
108 
test_mutex(void)109 static int test_mutex(void)
110 {
111 	int ret;
112 	int i;
113 
114 	for (i = 0; i < __TEST_MTX_LAST; i++) {
115 		ret = __test_mutex(i);
116 		if (ret)
117 			return ret;
118 	}
119 
120 	return 0;
121 }
122 
test_aa(bool trylock)123 static int test_aa(bool trylock)
124 {
125 	struct ww_mutex mutex;
126 	struct ww_acquire_ctx ctx;
127 	int ret;
128 	const char *from = trylock ? "trylock" : "lock";
129 
130 	ww_mutex_init(&mutex, &ww_class);
131 	ww_acquire_init(&ctx, &ww_class);
132 
133 	if (!trylock) {
134 		ret = ww_mutex_lock(&mutex, &ctx);
135 		if (ret) {
136 			pr_err("%s: initial lock failed!\n", __func__);
137 			goto out;
138 		}
139 	} else {
140 		ret = !ww_mutex_trylock(&mutex, &ctx);
141 		if (ret) {
142 			pr_err("%s: initial trylock failed!\n", __func__);
143 			goto out;
144 		}
145 	}
146 
147 	if (ww_mutex_trylock(&mutex, NULL))  {
148 		pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
149 		ww_mutex_unlock(&mutex);
150 		ret = -EINVAL;
151 		goto out;
152 	}
153 
154 	if (ww_mutex_trylock(&mutex, &ctx))  {
155 		pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
156 		ww_mutex_unlock(&mutex);
157 		ret = -EINVAL;
158 		goto out;
159 	}
160 
161 	ret = ww_mutex_lock(&mutex, &ctx);
162 	if (ret != -EALREADY) {
163 		pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
164 		       __func__, ret, from);
165 		if (!ret)
166 			ww_mutex_unlock(&mutex);
167 		ret = -EINVAL;
168 		goto out;
169 	}
170 
171 	ww_mutex_unlock(&mutex);
172 	ret = 0;
173 out:
174 	ww_acquire_fini(&ctx);
175 	return ret;
176 }
177 
178 struct test_abba {
179 	struct work_struct work;
180 	struct ww_mutex a_mutex;
181 	struct ww_mutex b_mutex;
182 	struct completion a_ready;
183 	struct completion b_ready;
184 	bool resolve, trylock;
185 	int result;
186 };
187 
test_abba_work(struct work_struct * work)188 static void test_abba_work(struct work_struct *work)
189 {
190 	struct test_abba *abba = container_of(work, typeof(*abba), work);
191 	struct ww_acquire_ctx ctx;
192 	int err;
193 
194 	ww_acquire_init_noinject(&ctx, &ww_class);
195 	if (!abba->trylock)
196 		ww_mutex_lock(&abba->b_mutex, &ctx);
197 	else
198 		WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
199 
200 	WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
201 
202 	complete(&abba->b_ready);
203 	wait_for_completion(&abba->a_ready);
204 
205 	err = ww_mutex_lock(&abba->a_mutex, &ctx);
206 	if (abba->resolve && err == -EDEADLK) {
207 		ww_mutex_unlock(&abba->b_mutex);
208 		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
209 		err = ww_mutex_lock(&abba->b_mutex, &ctx);
210 	}
211 
212 	if (!err)
213 		ww_mutex_unlock(&abba->a_mutex);
214 	ww_mutex_unlock(&abba->b_mutex);
215 	ww_acquire_fini(&ctx);
216 
217 	abba->result = err;
218 }
219 
test_abba(bool trylock,bool resolve)220 static int test_abba(bool trylock, bool resolve)
221 {
222 	struct test_abba abba;
223 	struct ww_acquire_ctx ctx;
224 	int err, ret;
225 
226 	ww_mutex_init(&abba.a_mutex, &ww_class);
227 	ww_mutex_init(&abba.b_mutex, &ww_class);
228 	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
229 	init_completion(&abba.a_ready);
230 	init_completion(&abba.b_ready);
231 	abba.trylock = trylock;
232 	abba.resolve = resolve;
233 
234 	schedule_work(&abba.work);
235 
236 	ww_acquire_init_noinject(&ctx, &ww_class);
237 	if (!trylock)
238 		ww_mutex_lock(&abba.a_mutex, &ctx);
239 	else
240 		WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
241 
242 	WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
243 
244 	complete(&abba.a_ready);
245 	wait_for_completion(&abba.b_ready);
246 
247 	err = ww_mutex_lock(&abba.b_mutex, &ctx);
248 	if (resolve && err == -EDEADLK) {
249 		ww_mutex_unlock(&abba.a_mutex);
250 		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
251 		err = ww_mutex_lock(&abba.a_mutex, &ctx);
252 	}
253 
254 	if (!err)
255 		ww_mutex_unlock(&abba.b_mutex);
256 	ww_mutex_unlock(&abba.a_mutex);
257 	ww_acquire_fini(&ctx);
258 
259 	flush_work(&abba.work);
260 	destroy_work_on_stack(&abba.work);
261 
262 	ret = 0;
263 	if (resolve) {
264 		if (err || abba.result) {
265 			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
266 			       __func__, err, abba.result);
267 			ret = -EINVAL;
268 		}
269 	} else {
270 		if (err != -EDEADLK && abba.result != -EDEADLK) {
271 			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
272 			       __func__, err, abba.result);
273 			ret = -EINVAL;
274 		}
275 	}
276 	return ret;
277 }
278 
279 struct test_cycle {
280 	struct work_struct work;
281 	struct ww_mutex a_mutex;
282 	struct ww_mutex *b_mutex;
283 	struct completion *a_signal;
284 	struct completion b_signal;
285 	int result;
286 };
287 
test_cycle_work(struct work_struct * work)288 static void test_cycle_work(struct work_struct *work)
289 {
290 	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
291 	struct ww_acquire_ctx ctx;
292 	int err, erra = 0;
293 
294 	ww_acquire_init_noinject(&ctx, &ww_class);
295 	ww_mutex_lock(&cycle->a_mutex, &ctx);
296 
297 	complete(cycle->a_signal);
298 	wait_for_completion(&cycle->b_signal);
299 
300 	err = ww_mutex_lock(cycle->b_mutex, &ctx);
301 	if (err == -EDEADLK) {
302 		err = 0;
303 		ww_mutex_unlock(&cycle->a_mutex);
304 		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
305 		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
306 	}
307 
308 	if (!err)
309 		ww_mutex_unlock(cycle->b_mutex);
310 	if (!erra)
311 		ww_mutex_unlock(&cycle->a_mutex);
312 	ww_acquire_fini(&ctx);
313 
314 	cycle->result = err ?: erra;
315 }
316 
__test_cycle(unsigned int nthreads)317 static int __test_cycle(unsigned int nthreads)
318 {
319 	struct test_cycle *cycles;
320 	unsigned int n, last = nthreads - 1;
321 	int ret;
322 
323 	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
324 	if (!cycles)
325 		return -ENOMEM;
326 
327 	for (n = 0; n < nthreads; n++) {
328 		struct test_cycle *cycle = &cycles[n];
329 
330 		ww_mutex_init(&cycle->a_mutex, &ww_class);
331 		if (n == last)
332 			cycle->b_mutex = &cycles[0].a_mutex;
333 		else
334 			cycle->b_mutex = &cycles[n + 1].a_mutex;
335 
336 		if (n == 0)
337 			cycle->a_signal = &cycles[last].b_signal;
338 		else
339 			cycle->a_signal = &cycles[n - 1].b_signal;
340 		init_completion(&cycle->b_signal);
341 
342 		INIT_WORK(&cycle->work, test_cycle_work);
343 		cycle->result = 0;
344 	}
345 
346 	for (n = 0; n < nthreads; n++)
347 		queue_work(wq, &cycles[n].work);
348 
349 	flush_workqueue(wq);
350 
351 	ret = 0;
352 	for (n = 0; n < nthreads; n++) {
353 		struct test_cycle *cycle = &cycles[n];
354 
355 		if (!cycle->result)
356 			continue;
357 
358 		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
359 		       n, nthreads, cycle->result);
360 		ret = -EINVAL;
361 		break;
362 	}
363 
364 	for (n = 0; n < nthreads; n++)
365 		ww_mutex_destroy(&cycles[n].a_mutex);
366 	kfree(cycles);
367 	return ret;
368 }
369 
test_cycle(unsigned int ncpus)370 static int test_cycle(unsigned int ncpus)
371 {
372 	unsigned int n;
373 	int ret;
374 
375 	for (n = 2; n <= ncpus + 1; n++) {
376 		ret = __test_cycle(n);
377 		if (ret)
378 			return ret;
379 	}
380 
381 	return 0;
382 }
383 
384 struct stress {
385 	struct work_struct work;
386 	struct ww_mutex *locks;
387 	unsigned long timeout;
388 	int nlocks;
389 };
390 
391 struct rnd_state rng;
392 DEFINE_SPINLOCK(rng_lock);
393 
prandom_u32_below(u32 ceil)394 static inline u32 prandom_u32_below(u32 ceil)
395 {
396 	u32 ret;
397 
398 	spin_lock(&rng_lock);
399 	ret = prandom_u32_state(&rng) % ceil;
400 	spin_unlock(&rng_lock);
401 	return ret;
402 }
403 
get_random_order(int count)404 static int *get_random_order(int count)
405 {
406 	int *order;
407 	int n, r, tmp;
408 
409 	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
410 	if (!order)
411 		return order;
412 
413 	for (n = 0; n < count; n++)
414 		order[n] = n;
415 
416 	for (n = count - 1; n > 1; n--) {
417 		r = prandom_u32_below(n + 1);
418 		if (r != n) {
419 			tmp = order[n];
420 			order[n] = order[r];
421 			order[r] = tmp;
422 		}
423 	}
424 
425 	return order;
426 }
427 
dummy_load(struct stress * stress)428 static void dummy_load(struct stress *stress)
429 {
430 	usleep_range(1000, 2000);
431 }
432 
stress_inorder_work(struct work_struct * work)433 static void stress_inorder_work(struct work_struct *work)
434 {
435 	struct stress *stress = container_of(work, typeof(*stress), work);
436 	const int nlocks = stress->nlocks;
437 	struct ww_mutex *locks = stress->locks;
438 	struct ww_acquire_ctx ctx;
439 	int *order;
440 
441 	order = get_random_order(nlocks);
442 	if (!order)
443 		return;
444 
445 	do {
446 		int contended = -1;
447 		int n, err;
448 
449 		ww_acquire_init(&ctx, &ww_class);
450 retry:
451 		err = 0;
452 		for (n = 0; n < nlocks; n++) {
453 			if (n == contended)
454 				continue;
455 
456 			err = ww_mutex_lock(&locks[order[n]], &ctx);
457 			if (err < 0)
458 				break;
459 		}
460 		if (!err)
461 			dummy_load(stress);
462 
463 		if (contended > n)
464 			ww_mutex_unlock(&locks[order[contended]]);
465 		contended = n;
466 		while (n--)
467 			ww_mutex_unlock(&locks[order[n]]);
468 
469 		if (err == -EDEADLK) {
470 			if (!time_after(jiffies, stress->timeout)) {
471 				ww_mutex_lock_slow(&locks[order[contended]], &ctx);
472 				goto retry;
473 			}
474 		}
475 
476 		ww_acquire_fini(&ctx);
477 		if (err) {
478 			pr_err_once("stress (%s) failed with %d\n",
479 				    __func__, err);
480 			break;
481 		}
482 	} while (!time_after(jiffies, stress->timeout));
483 
484 	kfree(order);
485 }
486 
487 struct reorder_lock {
488 	struct list_head link;
489 	struct ww_mutex *lock;
490 };
491 
stress_reorder_work(struct work_struct * work)492 static void stress_reorder_work(struct work_struct *work)
493 {
494 	struct stress *stress = container_of(work, typeof(*stress), work);
495 	LIST_HEAD(locks);
496 	struct ww_acquire_ctx ctx;
497 	struct reorder_lock *ll, *ln;
498 	int *order;
499 	int n, err;
500 
501 	order = get_random_order(stress->nlocks);
502 	if (!order)
503 		return;
504 
505 	for (n = 0; n < stress->nlocks; n++) {
506 		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
507 		if (!ll)
508 			goto out;
509 
510 		ll->lock = &stress->locks[order[n]];
511 		list_add(&ll->link, &locks);
512 	}
513 	kfree(order);
514 	order = NULL;
515 
516 	do {
517 		ww_acquire_init(&ctx, &ww_class);
518 
519 		list_for_each_entry(ll, &locks, link) {
520 			err = ww_mutex_lock(ll->lock, &ctx);
521 			if (!err)
522 				continue;
523 
524 			ln = ll;
525 			list_for_each_entry_continue_reverse(ln, &locks, link)
526 				ww_mutex_unlock(ln->lock);
527 
528 			if (err != -EDEADLK) {
529 				pr_err_once("stress (%s) failed with %d\n",
530 					    __func__, err);
531 				break;
532 			}
533 
534 			ww_mutex_lock_slow(ll->lock, &ctx);
535 			list_move(&ll->link, &locks); /* restarts iteration */
536 		}
537 
538 		dummy_load(stress);
539 		list_for_each_entry(ll, &locks, link)
540 			ww_mutex_unlock(ll->lock);
541 
542 		ww_acquire_fini(&ctx);
543 	} while (!time_after(jiffies, stress->timeout));
544 
545 out:
546 	list_for_each_entry_safe(ll, ln, &locks, link)
547 		kfree(ll);
548 	kfree(order);
549 }
550 
stress_one_work(struct work_struct * work)551 static void stress_one_work(struct work_struct *work)
552 {
553 	struct stress *stress = container_of(work, typeof(*stress), work);
554 	const int nlocks = stress->nlocks;
555 	struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
556 	int err;
557 
558 	do {
559 		err = ww_mutex_lock(lock, NULL);
560 		if (!err) {
561 			dummy_load(stress);
562 			ww_mutex_unlock(lock);
563 		} else {
564 			pr_err_once("stress (%s) failed with %d\n",
565 				    __func__, err);
566 			break;
567 		}
568 	} while (!time_after(jiffies, stress->timeout));
569 }
570 
571 #define STRESS_INORDER BIT(0)
572 #define STRESS_REORDER BIT(1)
573 #define STRESS_ONE BIT(2)
574 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
575 
stress(int nlocks,int nthreads,unsigned int flags)576 static int stress(int nlocks, int nthreads, unsigned int flags)
577 {
578 	struct ww_mutex *locks;
579 	struct stress *stress_array;
580 	int n, count;
581 
582 	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
583 	if (!locks)
584 		return -ENOMEM;
585 
586 	stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
587 				     GFP_KERNEL);
588 	if (!stress_array) {
589 		kfree(locks);
590 		return -ENOMEM;
591 	}
592 
593 	for (n = 0; n < nlocks; n++)
594 		ww_mutex_init(&locks[n], &ww_class);
595 
596 	count = 0;
597 	for (n = 0; nthreads; n++) {
598 		struct stress *stress;
599 		void (*fn)(struct work_struct *work);
600 
601 		fn = NULL;
602 		switch (n & 3) {
603 		case 0:
604 			if (flags & STRESS_INORDER)
605 				fn = stress_inorder_work;
606 			break;
607 		case 1:
608 			if (flags & STRESS_REORDER)
609 				fn = stress_reorder_work;
610 			break;
611 		case 2:
612 			if (flags & STRESS_ONE)
613 				fn = stress_one_work;
614 			break;
615 		}
616 
617 		if (!fn)
618 			continue;
619 
620 		stress = &stress_array[count++];
621 
622 		INIT_WORK(&stress->work, fn);
623 		stress->locks = locks;
624 		stress->nlocks = nlocks;
625 		stress->timeout = jiffies + 2*HZ;
626 
627 		queue_work(wq, &stress->work);
628 		nthreads--;
629 	}
630 
631 	flush_workqueue(wq);
632 
633 	for (n = 0; n < nlocks; n++)
634 		ww_mutex_destroy(&locks[n]);
635 	kfree(stress_array);
636 	kfree(locks);
637 
638 	return 0;
639 }
640 
test_ww_mutex_init(void)641 static int __init test_ww_mutex_init(void)
642 {
643 	int ncpus = num_online_cpus();
644 	int ret, i;
645 
646 	printk(KERN_INFO "Beginning ww mutex selftests\n");
647 
648 	prandom_seed_state(&rng, get_random_u64());
649 
650 	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
651 	if (!wq)
652 		return -ENOMEM;
653 
654 	ret = test_mutex();
655 	if (ret)
656 		return ret;
657 
658 	ret = test_aa(false);
659 	if (ret)
660 		return ret;
661 
662 	ret = test_aa(true);
663 	if (ret)
664 		return ret;
665 
666 	for (i = 0; i < 4; i++) {
667 		ret = test_abba(i & 1, i & 2);
668 		if (ret)
669 			return ret;
670 	}
671 
672 	ret = test_cycle(ncpus);
673 	if (ret)
674 		return ret;
675 
676 	ret = stress(16, 2*ncpus, STRESS_INORDER);
677 	if (ret)
678 		return ret;
679 
680 	ret = stress(16, 2*ncpus, STRESS_REORDER);
681 	if (ret)
682 		return ret;
683 
684 	ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
685 	if (ret)
686 		return ret;
687 
688 	printk(KERN_INFO "All ww mutex selftests passed\n");
689 	return 0;
690 }
691 
test_ww_mutex_exit(void)692 static void __exit test_ww_mutex_exit(void)
693 {
694 	destroy_workqueue(wq);
695 }
696 
697 module_init(test_ww_mutex_init);
698 module_exit(test_ww_mutex_exit);
699 
700 MODULE_LICENSE("GPL");
701 MODULE_AUTHOR("Intel Corporation");
702 MODULE_DESCRIPTION("API test facility for ww_mutexes");
703