xref: /linux/kernel/locking/test-ww_mutex.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Module-based API test facility for ww_mutexes
4  */
5 
6 #include <linux/kernel.h>
7 
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/prandom.h>
13 #include <linux/slab.h>
14 #include <linux/ww_mutex.h>
15 
16 static DEFINE_WD_CLASS(wd_class);
17 static DEFINE_WW_CLASS(ww_class);
18 struct workqueue_struct *wq;
19 
20 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
21 #define ww_acquire_init_noinject(a, b) do { \
22 		ww_acquire_init((a), (b)); \
23 		(a)->deadlock_inject_countdown = ~0U; \
24 	} while (0)
25 #else
26 #define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
27 #endif
28 
29 struct test_mutex {
30 	struct work_struct work;
31 	struct ww_mutex mutex;
32 	struct completion ready, go, done;
33 	unsigned int flags;
34 };
35 
36 #define TEST_MTX_SPIN BIT(0)
37 #define TEST_MTX_TRY BIT(1)
38 #define TEST_MTX_CTX BIT(2)
39 #define __TEST_MTX_LAST BIT(3)
40 
test_mutex_work(struct work_struct * work)41 static void test_mutex_work(struct work_struct *work)
42 {
43 	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
44 
45 	complete(&mtx->ready);
46 	wait_for_completion(&mtx->go);
47 
48 	if (mtx->flags & TEST_MTX_TRY) {
49 		while (!ww_mutex_trylock(&mtx->mutex, NULL))
50 			cond_resched();
51 	} else {
52 		ww_mutex_lock(&mtx->mutex, NULL);
53 	}
54 	complete(&mtx->done);
55 	ww_mutex_unlock(&mtx->mutex);
56 }
57 
__test_mutex(struct ww_class * class,unsigned int flags)58 static int __test_mutex(struct ww_class *class, unsigned int flags)
59 {
60 #define TIMEOUT (HZ / 16)
61 	struct test_mutex mtx;
62 	struct ww_acquire_ctx ctx;
63 	int ret;
64 
65 	ww_mutex_init(&mtx.mutex, class);
66 	if (flags & TEST_MTX_CTX)
67 		ww_acquire_init(&ctx, class);
68 
69 	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
70 	init_completion(&mtx.ready);
71 	init_completion(&mtx.go);
72 	init_completion(&mtx.done);
73 	mtx.flags = flags;
74 
75 	queue_work(wq, &mtx.work);
76 
77 	wait_for_completion(&mtx.ready);
78 	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
79 	complete(&mtx.go);
80 	if (flags & TEST_MTX_SPIN) {
81 		unsigned long timeout = jiffies + TIMEOUT;
82 
83 		ret = 0;
84 		do {
85 			if (completion_done(&mtx.done)) {
86 				ret = -EINVAL;
87 				break;
88 			}
89 			cond_resched();
90 		} while (time_before(jiffies, timeout));
91 	} else {
92 		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
93 	}
94 	ww_mutex_unlock(&mtx.mutex);
95 	if (flags & TEST_MTX_CTX)
96 		ww_acquire_fini(&ctx);
97 
98 	if (ret) {
99 		pr_err("%s(flags=%x): mutual exclusion failure\n",
100 		       __func__, flags);
101 		ret = -EINVAL;
102 	}
103 
104 	flush_work(&mtx.work);
105 	destroy_work_on_stack(&mtx.work);
106 	return ret;
107 #undef TIMEOUT
108 }
109 
test_mutex(struct ww_class * class)110 static int test_mutex(struct ww_class *class)
111 {
112 	int ret;
113 	int i;
114 
115 	for (i = 0; i < __TEST_MTX_LAST; i++) {
116 		ret = __test_mutex(class, i);
117 		if (ret)
118 			return ret;
119 	}
120 
121 	return 0;
122 }
123 
test_aa(struct ww_class * class,bool trylock)124 static int test_aa(struct ww_class *class, bool trylock)
125 {
126 	struct ww_mutex mutex;
127 	struct ww_acquire_ctx ctx;
128 	int ret;
129 	const char *from = trylock ? "trylock" : "lock";
130 
131 	ww_mutex_init(&mutex, class);
132 	ww_acquire_init(&ctx, class);
133 
134 	if (!trylock) {
135 		ret = ww_mutex_lock(&mutex, &ctx);
136 		if (ret) {
137 			pr_err("%s: initial lock failed!\n", __func__);
138 			goto out;
139 		}
140 	} else {
141 		ret = !ww_mutex_trylock(&mutex, &ctx);
142 		if (ret) {
143 			pr_err("%s: initial trylock failed!\n", __func__);
144 			goto out;
145 		}
146 	}
147 
148 	if (ww_mutex_trylock(&mutex, NULL))  {
149 		pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
150 		ww_mutex_unlock(&mutex);
151 		ret = -EINVAL;
152 		goto out;
153 	}
154 
155 	if (ww_mutex_trylock(&mutex, &ctx))  {
156 		pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
157 		ww_mutex_unlock(&mutex);
158 		ret = -EINVAL;
159 		goto out;
160 	}
161 
162 	ret = ww_mutex_lock(&mutex, &ctx);
163 	if (ret != -EALREADY) {
164 		pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
165 		       __func__, ret, from);
166 		if (!ret)
167 			ww_mutex_unlock(&mutex);
168 		ret = -EINVAL;
169 		goto out;
170 	}
171 
172 	ww_mutex_unlock(&mutex);
173 	ret = 0;
174 out:
175 	ww_acquire_fini(&ctx);
176 	return ret;
177 }
178 
179 struct test_abba {
180 	struct work_struct work;
181 	struct ww_class *class;
182 	struct ww_mutex a_mutex;
183 	struct ww_mutex b_mutex;
184 	struct completion a_ready;
185 	struct completion b_ready;
186 	bool resolve, trylock;
187 	int result;
188 };
189 
test_abba_work(struct work_struct * work)190 static void test_abba_work(struct work_struct *work)
191 {
192 	struct test_abba *abba = container_of(work, typeof(*abba), work);
193 	struct ww_acquire_ctx ctx;
194 	int err;
195 
196 	ww_acquire_init_noinject(&ctx, abba->class);
197 	if (!abba->trylock)
198 		ww_mutex_lock(&abba->b_mutex, &ctx);
199 	else
200 		WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
201 
202 	WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
203 
204 	complete(&abba->b_ready);
205 	wait_for_completion(&abba->a_ready);
206 
207 	err = ww_mutex_lock(&abba->a_mutex, &ctx);
208 	if (abba->resolve && err == -EDEADLK) {
209 		ww_mutex_unlock(&abba->b_mutex);
210 		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
211 		err = ww_mutex_lock(&abba->b_mutex, &ctx);
212 	}
213 
214 	if (!err)
215 		ww_mutex_unlock(&abba->a_mutex);
216 	ww_mutex_unlock(&abba->b_mutex);
217 	ww_acquire_fini(&ctx);
218 
219 	abba->result = err;
220 }
221 
test_abba(struct ww_class * class,bool trylock,bool resolve)222 static int test_abba(struct ww_class *class, bool trylock, bool resolve)
223 {
224 	struct test_abba abba;
225 	struct ww_acquire_ctx ctx;
226 	int err, ret;
227 
228 	ww_mutex_init(&abba.a_mutex, class);
229 	ww_mutex_init(&abba.b_mutex, class);
230 	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
231 	init_completion(&abba.a_ready);
232 	init_completion(&abba.b_ready);
233 	abba.class = class;
234 	abba.trylock = trylock;
235 	abba.resolve = resolve;
236 
237 	queue_work(wq, &abba.work);
238 
239 	ww_acquire_init_noinject(&ctx, class);
240 	if (!trylock)
241 		ww_mutex_lock(&abba.a_mutex, &ctx);
242 	else
243 		WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
244 
245 	WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
246 
247 	complete(&abba.a_ready);
248 	wait_for_completion(&abba.b_ready);
249 
250 	err = ww_mutex_lock(&abba.b_mutex, &ctx);
251 	if (resolve && err == -EDEADLK) {
252 		ww_mutex_unlock(&abba.a_mutex);
253 		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
254 		err = ww_mutex_lock(&abba.a_mutex, &ctx);
255 	}
256 
257 	if (!err)
258 		ww_mutex_unlock(&abba.b_mutex);
259 	ww_mutex_unlock(&abba.a_mutex);
260 	ww_acquire_fini(&ctx);
261 
262 	flush_work(&abba.work);
263 	destroy_work_on_stack(&abba.work);
264 
265 	ret = 0;
266 	if (resolve) {
267 		if (err || abba.result) {
268 			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
269 			       __func__, err, abba.result);
270 			ret = -EINVAL;
271 		}
272 	} else {
273 		if (err != -EDEADLK && abba.result != -EDEADLK) {
274 			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
275 			       __func__, err, abba.result);
276 			ret = -EINVAL;
277 		}
278 	}
279 	return ret;
280 }
281 
282 struct test_cycle {
283 	struct work_struct work;
284 	struct ww_class *class;
285 	struct ww_mutex a_mutex;
286 	struct ww_mutex *b_mutex;
287 	struct completion *a_signal;
288 	struct completion b_signal;
289 	int result;
290 };
291 
test_cycle_work(struct work_struct * work)292 static void test_cycle_work(struct work_struct *work)
293 {
294 	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
295 	struct ww_acquire_ctx ctx;
296 	int err, erra = 0;
297 
298 	ww_acquire_init_noinject(&ctx, cycle->class);
299 	ww_mutex_lock(&cycle->a_mutex, &ctx);
300 
301 	complete(cycle->a_signal);
302 	wait_for_completion(&cycle->b_signal);
303 
304 	err = ww_mutex_lock(cycle->b_mutex, &ctx);
305 	if (err == -EDEADLK) {
306 		err = 0;
307 		ww_mutex_unlock(&cycle->a_mutex);
308 		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
309 		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
310 	}
311 
312 	if (!err)
313 		ww_mutex_unlock(cycle->b_mutex);
314 	if (!erra)
315 		ww_mutex_unlock(&cycle->a_mutex);
316 	ww_acquire_fini(&ctx);
317 
318 	cycle->result = err ?: erra;
319 }
320 
__test_cycle(struct ww_class * class,unsigned int nthreads)321 static int __test_cycle(struct ww_class *class, unsigned int nthreads)
322 {
323 	struct test_cycle *cycles;
324 	unsigned int n, last = nthreads - 1;
325 	int ret;
326 
327 	cycles = kmalloc_objs(*cycles, nthreads, GFP_KERNEL);
328 	if (!cycles)
329 		return -ENOMEM;
330 
331 	for (n = 0; n < nthreads; n++) {
332 		struct test_cycle *cycle = &cycles[n];
333 
334 		cycle->class = class;
335 		ww_mutex_init(&cycle->a_mutex, class);
336 		if (n == last)
337 			cycle->b_mutex = &cycles[0].a_mutex;
338 		else
339 			cycle->b_mutex = &cycles[n + 1].a_mutex;
340 
341 		if (n == 0)
342 			cycle->a_signal = &cycles[last].b_signal;
343 		else
344 			cycle->a_signal = &cycles[n - 1].b_signal;
345 		init_completion(&cycle->b_signal);
346 
347 		INIT_WORK(&cycle->work, test_cycle_work);
348 		cycle->result = 0;
349 	}
350 
351 	for (n = 0; n < nthreads; n++)
352 		queue_work(wq, &cycles[n].work);
353 
354 	flush_workqueue(wq);
355 
356 	ret = 0;
357 	for (n = 0; n < nthreads; n++) {
358 		struct test_cycle *cycle = &cycles[n];
359 
360 		if (!cycle->result)
361 			continue;
362 
363 		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
364 		       n, nthreads, cycle->result);
365 		ret = -EINVAL;
366 		break;
367 	}
368 
369 	for (n = 0; n < nthreads; n++)
370 		ww_mutex_destroy(&cycles[n].a_mutex);
371 	kfree(cycles);
372 	return ret;
373 }
374 
test_cycle(struct ww_class * class,unsigned int ncpus)375 static int test_cycle(struct ww_class *class, unsigned int ncpus)
376 {
377 	unsigned int n;
378 	int ret;
379 
380 	for (n = 2; n <= ncpus + 1; n++) {
381 		ret = __test_cycle(class, n);
382 		if (ret)
383 			return ret;
384 	}
385 
386 	return 0;
387 }
388 
389 struct stress {
390 	struct work_struct work;
391 	struct ww_mutex *locks;
392 	struct ww_class *class;
393 	unsigned long timeout;
394 	int nlocks;
395 };
396 
397 struct rnd_state rng;
398 DEFINE_SPINLOCK(rng_lock);
399 
prandom_u32_below(u32 ceil)400 static inline u32 prandom_u32_below(u32 ceil)
401 {
402 	u32 ret;
403 
404 	spin_lock(&rng_lock);
405 	ret = prandom_u32_state(&rng) % ceil;
406 	spin_unlock(&rng_lock);
407 	return ret;
408 }
409 
get_random_order(int count)410 static int *get_random_order(int count)
411 {
412 	int *order;
413 	int n, r;
414 
415 	order = kmalloc_objs(*order, count, GFP_KERNEL);
416 	if (!order)
417 		return order;
418 
419 	for (n = 0; n < count; n++)
420 		order[n] = n;
421 
422 	for (n = count - 1; n > 1; n--) {
423 		r = prandom_u32_below(n + 1);
424 		if (r != n)
425 			swap(order[n], order[r]);
426 	}
427 
428 	return order;
429 }
430 
dummy_load(struct stress * stress)431 static void dummy_load(struct stress *stress)
432 {
433 	usleep_range(1000, 2000);
434 }
435 
stress_inorder_work(struct work_struct * work)436 static void stress_inorder_work(struct work_struct *work)
437 {
438 	struct stress *stress = container_of(work, typeof(*stress), work);
439 	const int nlocks = stress->nlocks;
440 	struct ww_mutex *locks = stress->locks;
441 	struct ww_acquire_ctx ctx;
442 	int *order;
443 
444 	order = get_random_order(nlocks);
445 	if (!order)
446 		return;
447 
448 	do {
449 		int contended = -1;
450 		int n, err;
451 
452 		ww_acquire_init(&ctx, stress->class);
453 retry:
454 		err = 0;
455 		for (n = 0; n < nlocks; n++) {
456 			if (n == contended)
457 				continue;
458 
459 			err = ww_mutex_lock(&locks[order[n]], &ctx);
460 			if (err < 0)
461 				break;
462 		}
463 		if (!err)
464 			dummy_load(stress);
465 
466 		if (contended > n)
467 			ww_mutex_unlock(&locks[order[contended]]);
468 		contended = n;
469 		while (n--)
470 			ww_mutex_unlock(&locks[order[n]]);
471 
472 		if (err == -EDEADLK) {
473 			if (!time_after(jiffies, stress->timeout)) {
474 				ww_mutex_lock_slow(&locks[order[contended]], &ctx);
475 				goto retry;
476 			}
477 		}
478 
479 		ww_acquire_fini(&ctx);
480 		if (err) {
481 			pr_err_once("stress (%s) failed with %d\n",
482 				    __func__, err);
483 			break;
484 		}
485 	} while (!time_after(jiffies, stress->timeout));
486 
487 	kfree(order);
488 }
489 
490 struct reorder_lock {
491 	struct list_head link;
492 	struct ww_mutex *lock;
493 };
494 
stress_reorder_work(struct work_struct * work)495 static void stress_reorder_work(struct work_struct *work)
496 {
497 	struct stress *stress = container_of(work, typeof(*stress), work);
498 	LIST_HEAD(locks);
499 	struct ww_acquire_ctx ctx;
500 	struct reorder_lock *ll, *ln;
501 	int *order;
502 	int n, err;
503 
504 	order = get_random_order(stress->nlocks);
505 	if (!order)
506 		return;
507 
508 	for (n = 0; n < stress->nlocks; n++) {
509 		ll = kmalloc_obj(*ll, GFP_KERNEL);
510 		if (!ll)
511 			goto out;
512 
513 		ll->lock = &stress->locks[order[n]];
514 		list_add(&ll->link, &locks);
515 	}
516 	kfree(order);
517 	order = NULL;
518 
519 	do {
520 		ww_acquire_init(&ctx, stress->class);
521 
522 		list_for_each_entry(ll, &locks, link) {
523 			err = ww_mutex_lock(ll->lock, &ctx);
524 			if (!err)
525 				continue;
526 
527 			ln = ll;
528 			list_for_each_entry_continue_reverse(ln, &locks, link)
529 				ww_mutex_unlock(ln->lock);
530 
531 			if (err != -EDEADLK) {
532 				pr_err_once("stress (%s) failed with %d\n",
533 					    __func__, err);
534 				break;
535 			}
536 
537 			ww_mutex_lock_slow(ll->lock, &ctx);
538 			list_move(&ll->link, &locks); /* restarts iteration */
539 		}
540 
541 		dummy_load(stress);
542 		list_for_each_entry(ll, &locks, link)
543 			ww_mutex_unlock(ll->lock);
544 
545 		ww_acquire_fini(&ctx);
546 	} while (!time_after(jiffies, stress->timeout));
547 
548 out:
549 	list_for_each_entry_safe(ll, ln, &locks, link)
550 		kfree(ll);
551 	kfree(order);
552 }
553 
stress_one_work(struct work_struct * work)554 static void stress_one_work(struct work_struct *work)
555 {
556 	struct stress *stress = container_of(work, typeof(*stress), work);
557 	const int nlocks = stress->nlocks;
558 	struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
559 	int err;
560 
561 	do {
562 		err = ww_mutex_lock(lock, NULL);
563 		if (!err) {
564 			dummy_load(stress);
565 			ww_mutex_unlock(lock);
566 		} else {
567 			pr_err_once("stress (%s) failed with %d\n",
568 				    __func__, err);
569 			break;
570 		}
571 	} while (!time_after(jiffies, stress->timeout));
572 }
573 
574 #define STRESS_INORDER BIT(0)
575 #define STRESS_REORDER BIT(1)
576 #define STRESS_ONE BIT(2)
577 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
578 
stress(struct ww_class * class,int nlocks,int nthreads,unsigned int flags)579 static int stress(struct ww_class *class, int nlocks, int nthreads, unsigned int flags)
580 {
581 	struct ww_mutex *locks;
582 	struct stress *stress_array;
583 	int n, count;
584 
585 	locks = kmalloc_objs(*locks, nlocks, GFP_KERNEL);
586 	if (!locks)
587 		return -ENOMEM;
588 
589 	stress_array = kmalloc_objs(*stress_array, nthreads, GFP_KERNEL);
590 	if (!stress_array) {
591 		kfree(locks);
592 		return -ENOMEM;
593 	}
594 
595 	for (n = 0; n < nlocks; n++)
596 		ww_mutex_init(&locks[n], class);
597 
598 	count = 0;
599 	for (n = 0; nthreads; n++) {
600 		struct stress *stress;
601 		void (*fn)(struct work_struct *work);
602 
603 		fn = NULL;
604 		switch (n & 3) {
605 		case 0:
606 			if (flags & STRESS_INORDER)
607 				fn = stress_inorder_work;
608 			break;
609 		case 1:
610 			if (flags & STRESS_REORDER)
611 				fn = stress_reorder_work;
612 			break;
613 		case 2:
614 			if (flags & STRESS_ONE)
615 				fn = stress_one_work;
616 			break;
617 		}
618 
619 		if (!fn)
620 			continue;
621 
622 		stress = &stress_array[count++];
623 
624 		INIT_WORK(&stress->work, fn);
625 		stress->class = class;
626 		stress->locks = locks;
627 		stress->nlocks = nlocks;
628 		stress->timeout = jiffies + 2*HZ;
629 
630 		queue_work(wq, &stress->work);
631 		nthreads--;
632 	}
633 
634 	flush_workqueue(wq);
635 
636 	for (n = 0; n < nlocks; n++)
637 		ww_mutex_destroy(&locks[n]);
638 	kfree(stress_array);
639 	kfree(locks);
640 
641 	return 0;
642 }
643 
run_tests(struct ww_class * class)644 static int run_tests(struct ww_class *class)
645 {
646 	int ncpus = num_online_cpus();
647 	int ret, i;
648 
649 	ret = test_mutex(class);
650 	if (ret)
651 		return ret;
652 
653 	ret = test_aa(class, false);
654 	if (ret)
655 		return ret;
656 
657 	ret = test_aa(class, true);
658 	if (ret)
659 		return ret;
660 
661 	for (i = 0; i < 4; i++) {
662 		ret = test_abba(class, i & 1, i & 2);
663 		if (ret)
664 			return ret;
665 	}
666 
667 	ret = test_cycle(class, ncpus);
668 	if (ret)
669 		return ret;
670 
671 	ret = stress(class, 16, 2 * ncpus, STRESS_INORDER);
672 	if (ret)
673 		return ret;
674 
675 	ret = stress(class, 16, 2 * ncpus, STRESS_REORDER);
676 	if (ret)
677 		return ret;
678 
679 	ret = stress(class, 2046, hweight32(STRESS_ALL) * ncpus, STRESS_ALL);
680 	if (ret)
681 		return ret;
682 
683 	return 0;
684 }
685 
run_test_classes(void)686 static int run_test_classes(void)
687 {
688 	int ret;
689 
690 	pr_info("Beginning ww (wound) mutex selftests\n");
691 
692 	ret = run_tests(&ww_class);
693 	if (ret)
694 		return ret;
695 
696 	pr_info("Beginning ww (die) mutex selftests\n");
697 	ret = run_tests(&wd_class);
698 	if (ret)
699 		return ret;
700 
701 	pr_info("All ww mutex selftests passed\n");
702 	return 0;
703 }
704 
705 static DEFINE_MUTEX(run_lock);
706 
run_tests_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)707 static ssize_t run_tests_store(struct kobject *kobj, struct kobj_attribute *attr,
708 			       const char *buf, size_t count)
709 {
710 	if (!mutex_trylock(&run_lock)) {
711 		pr_err("Test already running\n");
712 		return count;
713 	}
714 
715 	run_test_classes();
716 	mutex_unlock(&run_lock);
717 
718 	return count;
719 }
720 
721 static struct kobj_attribute run_tests_attribute =
722 	__ATTR(run_tests, 0664, NULL, run_tests_store);
723 
724 static struct attribute *attrs[] = {
725 	&run_tests_attribute.attr,
726 	NULL,   /* need to NULL terminate the list of attributes */
727 };
728 
729 static struct attribute_group attr_group = {
730 	.attrs = attrs,
731 };
732 
733 static struct kobject *test_ww_mutex_kobj;
734 
test_ww_mutex_init(void)735 static int __init test_ww_mutex_init(void)
736 {
737 	int ret;
738 
739 	prandom_seed_state(&rng, get_random_u64());
740 
741 	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
742 	if (!wq)
743 		return -ENOMEM;
744 
745 	test_ww_mutex_kobj = kobject_create_and_add("test_ww_mutex", kernel_kobj);
746 	if (!test_ww_mutex_kobj) {
747 		destroy_workqueue(wq);
748 		return -ENOMEM;
749 	}
750 
751 	/* Create the files associated with this kobject */
752 	ret = sysfs_create_group(test_ww_mutex_kobj, &attr_group);
753 	if (ret) {
754 		kobject_put(test_ww_mutex_kobj);
755 		destroy_workqueue(wq);
756 		return ret;
757 	}
758 
759 	mutex_lock(&run_lock);
760 	ret = run_test_classes();
761 	mutex_unlock(&run_lock);
762 
763 	return ret;
764 }
765 
test_ww_mutex_exit(void)766 static void __exit test_ww_mutex_exit(void)
767 {
768 	kobject_put(test_ww_mutex_kobj);
769 	destroy_workqueue(wq);
770 }
771 
772 module_init(test_ww_mutex_init);
773 module_exit(test_ww_mutex_exit);
774 
775 MODULE_LICENSE("GPL");
776 MODULE_AUTHOR("Intel Corporation");
777 MODULE_DESCRIPTION("API test facility for ww_mutexes");
778