xref: /linux/kernel/locking/test-ww_mutex.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Module-based API test facility for ww_mutexes
4  */
5 
6 #include <linux/kernel.h>
7 
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/prandom.h>
13 #include <linux/slab.h>
14 #include <linux/ww_mutex.h>
15 
16 static DEFINE_WD_CLASS(wd_class);
17 static DEFINE_WW_CLASS(ww_class);
18 struct workqueue_struct *wq;
19 
20 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
21 #define ww_acquire_init_noinject(a, b) do { \
22 		ww_acquire_init((a), (b)); \
23 		(a)->deadlock_inject_countdown = ~0U; \
24 	} while (0)
25 #else
26 #define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
27 #endif
28 
29 struct test_mutex {
30 	struct work_struct work;
31 	struct ww_mutex mutex;
32 	struct completion ready, go, done;
33 	unsigned int flags;
34 };
35 
36 #define TEST_MTX_SPIN BIT(0)
37 #define TEST_MTX_TRY BIT(1)
38 #define TEST_MTX_CTX BIT(2)
39 #define __TEST_MTX_LAST BIT(3)
40 
41 static void test_mutex_work(struct work_struct *work)
42 {
43 	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
44 
45 	complete(&mtx->ready);
46 	wait_for_completion(&mtx->go);
47 
48 	if (mtx->flags & TEST_MTX_TRY) {
49 		while (!ww_mutex_trylock(&mtx->mutex, NULL))
50 			cond_resched();
51 	} else {
52 		ww_mutex_lock(&mtx->mutex, NULL);
53 	}
54 	complete(&mtx->done);
55 	ww_mutex_unlock(&mtx->mutex);
56 }
57 
58 static int __test_mutex(struct ww_class *class, unsigned int flags)
59 {
60 #define TIMEOUT (HZ / 16)
61 	struct test_mutex mtx;
62 	struct ww_acquire_ctx ctx;
63 	int ret;
64 
65 	ww_mutex_init(&mtx.mutex, class);
66 	if (flags & TEST_MTX_CTX)
67 		ww_acquire_init(&ctx, class);
68 
69 	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
70 	init_completion(&mtx.ready);
71 	init_completion(&mtx.go);
72 	init_completion(&mtx.done);
73 	mtx.flags = flags;
74 
75 	queue_work(wq, &mtx.work);
76 
77 	wait_for_completion(&mtx.ready);
78 	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
79 	complete(&mtx.go);
80 	if (flags & TEST_MTX_SPIN) {
81 		unsigned long timeout = jiffies + TIMEOUT;
82 
83 		ret = 0;
84 		do {
85 			if (completion_done(&mtx.done)) {
86 				ret = -EINVAL;
87 				break;
88 			}
89 			cond_resched();
90 		} while (time_before(jiffies, timeout));
91 	} else {
92 		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
93 	}
94 	ww_mutex_unlock(&mtx.mutex);
95 	if (flags & TEST_MTX_CTX)
96 		ww_acquire_fini(&ctx);
97 
98 	if (ret) {
99 		pr_err("%s(flags=%x): mutual exclusion failure\n",
100 		       __func__, flags);
101 		ret = -EINVAL;
102 	}
103 
104 	flush_work(&mtx.work);
105 	destroy_work_on_stack(&mtx.work);
106 	return ret;
107 #undef TIMEOUT
108 }
109 
110 static int test_mutex(struct ww_class *class)
111 {
112 	int ret;
113 	int i;
114 
115 	for (i = 0; i < __TEST_MTX_LAST; i++) {
116 		ret = __test_mutex(class, i);
117 		if (ret)
118 			return ret;
119 	}
120 
121 	return 0;
122 }
123 
124 static int test_aa(struct ww_class *class, bool trylock)
125 {
126 	struct ww_mutex mutex;
127 	struct ww_acquire_ctx ctx;
128 	int ret;
129 	const char *from = trylock ? "trylock" : "lock";
130 
131 	ww_mutex_init(&mutex, class);
132 	ww_acquire_init(&ctx, class);
133 
134 	if (!trylock) {
135 		ret = ww_mutex_lock(&mutex, &ctx);
136 		if (ret) {
137 			pr_err("%s: initial lock failed!\n", __func__);
138 			goto out;
139 		}
140 	} else {
141 		ret = !ww_mutex_trylock(&mutex, &ctx);
142 		if (ret) {
143 			pr_err("%s: initial trylock failed!\n", __func__);
144 			goto out;
145 		}
146 	}
147 
148 	if (ww_mutex_trylock(&mutex, NULL))  {
149 		pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
150 		ww_mutex_unlock(&mutex);
151 		ret = -EINVAL;
152 		goto out;
153 	}
154 
155 	if (ww_mutex_trylock(&mutex, &ctx))  {
156 		pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
157 		ww_mutex_unlock(&mutex);
158 		ret = -EINVAL;
159 		goto out;
160 	}
161 
162 	ret = ww_mutex_lock(&mutex, &ctx);
163 	if (ret != -EALREADY) {
164 		pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
165 		       __func__, ret, from);
166 		if (!ret)
167 			ww_mutex_unlock(&mutex);
168 		ret = -EINVAL;
169 		goto out;
170 	}
171 
172 	ww_mutex_unlock(&mutex);
173 	ret = 0;
174 out:
175 	ww_acquire_fini(&ctx);
176 	return ret;
177 }
178 
179 struct test_abba {
180 	struct work_struct work;
181 	struct ww_class *class;
182 	struct ww_mutex a_mutex;
183 	struct ww_mutex b_mutex;
184 	struct completion a_ready;
185 	struct completion b_ready;
186 	bool resolve, trylock;
187 	int result;
188 };
189 
190 static void test_abba_work(struct work_struct *work)
191 {
192 	struct test_abba *abba = container_of(work, typeof(*abba), work);
193 	struct ww_acquire_ctx ctx;
194 	int err;
195 
196 	ww_acquire_init_noinject(&ctx, abba->class);
197 	if (!abba->trylock)
198 		ww_mutex_lock(&abba->b_mutex, &ctx);
199 	else
200 		WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
201 
202 	WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
203 
204 	complete(&abba->b_ready);
205 	wait_for_completion(&abba->a_ready);
206 
207 	err = ww_mutex_lock(&abba->a_mutex, &ctx);
208 	if (abba->resolve && err == -EDEADLK) {
209 		ww_mutex_unlock(&abba->b_mutex);
210 		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
211 		err = ww_mutex_lock(&abba->b_mutex, &ctx);
212 	}
213 
214 	if (!err)
215 		ww_mutex_unlock(&abba->a_mutex);
216 	ww_mutex_unlock(&abba->b_mutex);
217 	ww_acquire_fini(&ctx);
218 
219 	abba->result = err;
220 }
221 
222 static int test_abba(struct ww_class *class, bool trylock, bool resolve)
223 {
224 	struct test_abba abba;
225 	struct ww_acquire_ctx ctx;
226 	int err, ret;
227 
228 	ww_mutex_init(&abba.a_mutex, class);
229 	ww_mutex_init(&abba.b_mutex, class);
230 	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
231 	init_completion(&abba.a_ready);
232 	init_completion(&abba.b_ready);
233 	abba.class = class;
234 	abba.trylock = trylock;
235 	abba.resolve = resolve;
236 
237 	queue_work(wq, &abba.work);
238 
239 	ww_acquire_init_noinject(&ctx, class);
240 	if (!trylock)
241 		ww_mutex_lock(&abba.a_mutex, &ctx);
242 	else
243 		WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
244 
245 	WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
246 
247 	complete(&abba.a_ready);
248 	wait_for_completion(&abba.b_ready);
249 
250 	err = ww_mutex_lock(&abba.b_mutex, &ctx);
251 	if (resolve && err == -EDEADLK) {
252 		ww_mutex_unlock(&abba.a_mutex);
253 		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
254 		err = ww_mutex_lock(&abba.a_mutex, &ctx);
255 	}
256 
257 	if (!err)
258 		ww_mutex_unlock(&abba.b_mutex);
259 	ww_mutex_unlock(&abba.a_mutex);
260 	ww_acquire_fini(&ctx);
261 
262 	flush_work(&abba.work);
263 	destroy_work_on_stack(&abba.work);
264 
265 	ret = 0;
266 	if (resolve) {
267 		if (err || abba.result) {
268 			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
269 			       __func__, err, abba.result);
270 			ret = -EINVAL;
271 		}
272 	} else {
273 		if (err != -EDEADLK && abba.result != -EDEADLK) {
274 			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
275 			       __func__, err, abba.result);
276 			ret = -EINVAL;
277 		}
278 	}
279 	return ret;
280 }
281 
282 struct test_cycle {
283 	struct work_struct work;
284 	struct ww_class *class;
285 	struct ww_mutex a_mutex;
286 	struct ww_mutex *b_mutex;
287 	struct completion *a_signal;
288 	struct completion b_signal;
289 	int result;
290 };
291 
292 static void test_cycle_work(struct work_struct *work)
293 {
294 	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
295 	struct ww_acquire_ctx ctx;
296 	int err, erra = 0;
297 
298 	ww_acquire_init_noinject(&ctx, cycle->class);
299 	ww_mutex_lock(&cycle->a_mutex, &ctx);
300 
301 	complete(cycle->a_signal);
302 	wait_for_completion(&cycle->b_signal);
303 
304 	err = ww_mutex_lock(cycle->b_mutex, &ctx);
305 	if (err == -EDEADLK) {
306 		err = 0;
307 		ww_mutex_unlock(&cycle->a_mutex);
308 		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
309 		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
310 	}
311 
312 	if (!err)
313 		ww_mutex_unlock(cycle->b_mutex);
314 	if (!erra)
315 		ww_mutex_unlock(&cycle->a_mutex);
316 	ww_acquire_fini(&ctx);
317 
318 	cycle->result = err ?: erra;
319 }
320 
321 static int __test_cycle(struct ww_class *class, unsigned int nthreads)
322 {
323 	struct test_cycle *cycles;
324 	unsigned int n, last = nthreads - 1;
325 	int ret;
326 
327 	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
328 	if (!cycles)
329 		return -ENOMEM;
330 
331 	for (n = 0; n < nthreads; n++) {
332 		struct test_cycle *cycle = &cycles[n];
333 
334 		cycle->class = class;
335 		ww_mutex_init(&cycle->a_mutex, class);
336 		if (n == last)
337 			cycle->b_mutex = &cycles[0].a_mutex;
338 		else
339 			cycle->b_mutex = &cycles[n + 1].a_mutex;
340 
341 		if (n == 0)
342 			cycle->a_signal = &cycles[last].b_signal;
343 		else
344 			cycle->a_signal = &cycles[n - 1].b_signal;
345 		init_completion(&cycle->b_signal);
346 
347 		INIT_WORK(&cycle->work, test_cycle_work);
348 		cycle->result = 0;
349 	}
350 
351 	for (n = 0; n < nthreads; n++)
352 		queue_work(wq, &cycles[n].work);
353 
354 	flush_workqueue(wq);
355 
356 	ret = 0;
357 	for (n = 0; n < nthreads; n++) {
358 		struct test_cycle *cycle = &cycles[n];
359 
360 		if (!cycle->result)
361 			continue;
362 
363 		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
364 		       n, nthreads, cycle->result);
365 		ret = -EINVAL;
366 		break;
367 	}
368 
369 	for (n = 0; n < nthreads; n++)
370 		ww_mutex_destroy(&cycles[n].a_mutex);
371 	kfree(cycles);
372 	return ret;
373 }
374 
375 static int test_cycle(struct ww_class *class, unsigned int ncpus)
376 {
377 	unsigned int n;
378 	int ret;
379 
380 	for (n = 2; n <= ncpus + 1; n++) {
381 		ret = __test_cycle(class, n);
382 		if (ret)
383 			return ret;
384 	}
385 
386 	return 0;
387 }
388 
389 struct stress {
390 	struct work_struct work;
391 	struct ww_mutex *locks;
392 	struct ww_class *class;
393 	unsigned long timeout;
394 	int nlocks;
395 };
396 
397 struct rnd_state rng;
398 DEFINE_SPINLOCK(rng_lock);
399 
400 static inline u32 prandom_u32_below(u32 ceil)
401 {
402 	u32 ret;
403 
404 	spin_lock(&rng_lock);
405 	ret = prandom_u32_state(&rng) % ceil;
406 	spin_unlock(&rng_lock);
407 	return ret;
408 }
409 
410 static int *get_random_order(int count)
411 {
412 	int *order;
413 	int n, r;
414 
415 	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
416 	if (!order)
417 		return order;
418 
419 	for (n = 0; n < count; n++)
420 		order[n] = n;
421 
422 	for (n = count - 1; n > 1; n--) {
423 		r = prandom_u32_below(n + 1);
424 		if (r != n)
425 			swap(order[n], order[r]);
426 	}
427 
428 	return order;
429 }
430 
431 static void dummy_load(struct stress *stress)
432 {
433 	usleep_range(1000, 2000);
434 }
435 
436 static void stress_inorder_work(struct work_struct *work)
437 {
438 	struct stress *stress = container_of(work, typeof(*stress), work);
439 	const int nlocks = stress->nlocks;
440 	struct ww_mutex *locks = stress->locks;
441 	struct ww_acquire_ctx ctx;
442 	int *order;
443 
444 	order = get_random_order(nlocks);
445 	if (!order)
446 		return;
447 
448 	do {
449 		int contended = -1;
450 		int n, err;
451 
452 		ww_acquire_init(&ctx, stress->class);
453 retry:
454 		err = 0;
455 		for (n = 0; n < nlocks; n++) {
456 			if (n == contended)
457 				continue;
458 
459 			err = ww_mutex_lock(&locks[order[n]], &ctx);
460 			if (err < 0)
461 				break;
462 		}
463 		if (!err)
464 			dummy_load(stress);
465 
466 		if (contended > n)
467 			ww_mutex_unlock(&locks[order[contended]]);
468 		contended = n;
469 		while (n--)
470 			ww_mutex_unlock(&locks[order[n]]);
471 
472 		if (err == -EDEADLK) {
473 			if (!time_after(jiffies, stress->timeout)) {
474 				ww_mutex_lock_slow(&locks[order[contended]], &ctx);
475 				goto retry;
476 			}
477 		}
478 
479 		ww_acquire_fini(&ctx);
480 		if (err) {
481 			pr_err_once("stress (%s) failed with %d\n",
482 				    __func__, err);
483 			break;
484 		}
485 	} while (!time_after(jiffies, stress->timeout));
486 
487 	kfree(order);
488 }
489 
490 struct reorder_lock {
491 	struct list_head link;
492 	struct ww_mutex *lock;
493 };
494 
495 static void stress_reorder_work(struct work_struct *work)
496 {
497 	struct stress *stress = container_of(work, typeof(*stress), work);
498 	LIST_HEAD(locks);
499 	struct ww_acquire_ctx ctx;
500 	struct reorder_lock *ll, *ln;
501 	int *order;
502 	int n, err;
503 
504 	order = get_random_order(stress->nlocks);
505 	if (!order)
506 		return;
507 
508 	for (n = 0; n < stress->nlocks; n++) {
509 		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
510 		if (!ll)
511 			goto out;
512 
513 		ll->lock = &stress->locks[order[n]];
514 		list_add(&ll->link, &locks);
515 	}
516 	kfree(order);
517 	order = NULL;
518 
519 	do {
520 		ww_acquire_init(&ctx, stress->class);
521 
522 		list_for_each_entry(ll, &locks, link) {
523 			err = ww_mutex_lock(ll->lock, &ctx);
524 			if (!err)
525 				continue;
526 
527 			ln = ll;
528 			list_for_each_entry_continue_reverse(ln, &locks, link)
529 				ww_mutex_unlock(ln->lock);
530 
531 			if (err != -EDEADLK) {
532 				pr_err_once("stress (%s) failed with %d\n",
533 					    __func__, err);
534 				break;
535 			}
536 
537 			ww_mutex_lock_slow(ll->lock, &ctx);
538 			list_move(&ll->link, &locks); /* restarts iteration */
539 		}
540 
541 		dummy_load(stress);
542 		list_for_each_entry(ll, &locks, link)
543 			ww_mutex_unlock(ll->lock);
544 
545 		ww_acquire_fini(&ctx);
546 	} while (!time_after(jiffies, stress->timeout));
547 
548 out:
549 	list_for_each_entry_safe(ll, ln, &locks, link)
550 		kfree(ll);
551 	kfree(order);
552 }
553 
554 static void stress_one_work(struct work_struct *work)
555 {
556 	struct stress *stress = container_of(work, typeof(*stress), work);
557 	const int nlocks = stress->nlocks;
558 	struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
559 	int err;
560 
561 	do {
562 		err = ww_mutex_lock(lock, NULL);
563 		if (!err) {
564 			dummy_load(stress);
565 			ww_mutex_unlock(lock);
566 		} else {
567 			pr_err_once("stress (%s) failed with %d\n",
568 				    __func__, err);
569 			break;
570 		}
571 	} while (!time_after(jiffies, stress->timeout));
572 }
573 
574 #define STRESS_INORDER BIT(0)
575 #define STRESS_REORDER BIT(1)
576 #define STRESS_ONE BIT(2)
577 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
578 
579 static int stress(struct ww_class *class, int nlocks, int nthreads, unsigned int flags)
580 {
581 	struct ww_mutex *locks;
582 	struct stress *stress_array;
583 	int n, count;
584 
585 	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
586 	if (!locks)
587 		return -ENOMEM;
588 
589 	stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
590 				     GFP_KERNEL);
591 	if (!stress_array) {
592 		kfree(locks);
593 		return -ENOMEM;
594 	}
595 
596 	for (n = 0; n < nlocks; n++)
597 		ww_mutex_init(&locks[n], class);
598 
599 	count = 0;
600 	for (n = 0; nthreads; n++) {
601 		struct stress *stress;
602 		void (*fn)(struct work_struct *work);
603 
604 		fn = NULL;
605 		switch (n & 3) {
606 		case 0:
607 			if (flags & STRESS_INORDER)
608 				fn = stress_inorder_work;
609 			break;
610 		case 1:
611 			if (flags & STRESS_REORDER)
612 				fn = stress_reorder_work;
613 			break;
614 		case 2:
615 			if (flags & STRESS_ONE)
616 				fn = stress_one_work;
617 			break;
618 		}
619 
620 		if (!fn)
621 			continue;
622 
623 		stress = &stress_array[count++];
624 
625 		INIT_WORK(&stress->work, fn);
626 		stress->class = class;
627 		stress->locks = locks;
628 		stress->nlocks = nlocks;
629 		stress->timeout = jiffies + 2*HZ;
630 
631 		queue_work(wq, &stress->work);
632 		nthreads--;
633 	}
634 
635 	flush_workqueue(wq);
636 
637 	for (n = 0; n < nlocks; n++)
638 		ww_mutex_destroy(&locks[n]);
639 	kfree(stress_array);
640 	kfree(locks);
641 
642 	return 0;
643 }
644 
645 static int run_tests(struct ww_class *class)
646 {
647 	int ncpus = num_online_cpus();
648 	int ret, i;
649 
650 	ret = test_mutex(class);
651 	if (ret)
652 		return ret;
653 
654 	ret = test_aa(class, false);
655 	if (ret)
656 		return ret;
657 
658 	ret = test_aa(class, true);
659 	if (ret)
660 		return ret;
661 
662 	for (i = 0; i < 4; i++) {
663 		ret = test_abba(class, i & 1, i & 2);
664 		if (ret)
665 			return ret;
666 	}
667 
668 	ret = test_cycle(class, ncpus);
669 	if (ret)
670 		return ret;
671 
672 	ret = stress(class, 16, 2 * ncpus, STRESS_INORDER);
673 	if (ret)
674 		return ret;
675 
676 	ret = stress(class, 16, 2 * ncpus, STRESS_REORDER);
677 	if (ret)
678 		return ret;
679 
680 	ret = stress(class, 2046, hweight32(STRESS_ALL) * ncpus, STRESS_ALL);
681 	if (ret)
682 		return ret;
683 
684 	return 0;
685 }
686 
687 static int run_test_classes(void)
688 {
689 	int ret;
690 
691 	pr_info("Beginning ww (wound) mutex selftests\n");
692 
693 	ret = run_tests(&ww_class);
694 	if (ret)
695 		return ret;
696 
697 	pr_info("Beginning ww (die) mutex selftests\n");
698 	ret = run_tests(&wd_class);
699 	if (ret)
700 		return ret;
701 
702 	pr_info("All ww mutex selftests passed\n");
703 	return 0;
704 }
705 
706 static DEFINE_MUTEX(run_lock);
707 
708 static ssize_t run_tests_store(struct kobject *kobj, struct kobj_attribute *attr,
709 			       const char *buf, size_t count)
710 {
711 	if (!mutex_trylock(&run_lock)) {
712 		pr_err("Test already running\n");
713 		return count;
714 	}
715 
716 	run_test_classes();
717 	mutex_unlock(&run_lock);
718 
719 	return count;
720 }
721 
722 static struct kobj_attribute run_tests_attribute =
723 	__ATTR(run_tests, 0664, NULL, run_tests_store);
724 
725 static struct attribute *attrs[] = {
726 	&run_tests_attribute.attr,
727 	NULL,   /* need to NULL terminate the list of attributes */
728 };
729 
730 static struct attribute_group attr_group = {
731 	.attrs = attrs,
732 };
733 
734 static struct kobject *test_ww_mutex_kobj;
735 
736 static int __init test_ww_mutex_init(void)
737 {
738 	int ret;
739 
740 	prandom_seed_state(&rng, get_random_u64());
741 
742 	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
743 	if (!wq)
744 		return -ENOMEM;
745 
746 	test_ww_mutex_kobj = kobject_create_and_add("test_ww_mutex", kernel_kobj);
747 	if (!test_ww_mutex_kobj) {
748 		destroy_workqueue(wq);
749 		return -ENOMEM;
750 	}
751 
752 	/* Create the files associated with this kobject */
753 	ret = sysfs_create_group(test_ww_mutex_kobj, &attr_group);
754 	if (ret) {
755 		kobject_put(test_ww_mutex_kobj);
756 		destroy_workqueue(wq);
757 		return ret;
758 	}
759 
760 	mutex_lock(&run_lock);
761 	ret = run_test_classes();
762 	mutex_unlock(&run_lock);
763 
764 	return ret;
765 }
766 
767 static void __exit test_ww_mutex_exit(void)
768 {
769 	kobject_put(test_ww_mutex_kobj);
770 	destroy_workqueue(wq);
771 }
772 
773 module_init(test_ww_mutex_init);
774 module_exit(test_ww_mutex_exit);
775 
776 MODULE_LICENSE("GPL");
777 MODULE_AUTHOR("Intel Corporation");
778 MODULE_DESCRIPTION("API test facility for ww_mutexes");
779