Lines Matching +full:cycle +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Module-based API test facility for ww_mutexes
22 (a)->deadlock_inject_countdown = ~0U; \
23 } while (0)
35 #define TEST_MTX_SPIN BIT(0)
44 complete(&mtx->ready); in test_mutex_work()
45 wait_for_completion(&mtx->go); in test_mutex_work()
47 if (mtx->flags & TEST_MTX_TRY) { in test_mutex_work()
48 while (!ww_mutex_trylock(&mtx->mutex, NULL)) in test_mutex_work()
51 ww_mutex_lock(&mtx->mutex, NULL); in test_mutex_work()
53 complete(&mtx->done); in test_mutex_work()
54 ww_mutex_unlock(&mtx->mutex); in test_mutex_work()
82 ret = 0; in __test_mutex()
85 ret = -EINVAL; in __test_mutex()
100 ret = -EINVAL; in __test_mutex()
114 for (i = 0; i < __TEST_MTX_LAST; i++) { in test_mutex()
120 return 0; in test_mutex()
150 ret = -EINVAL; in test_aa()
157 ret = -EINVAL; in test_aa()
162 if (ret != -EALREADY) { in test_aa()
167 ret = -EINVAL; in test_aa()
172 ret = 0; in test_aa()
195 if (!abba->trylock) in test_abba_work()
196 ww_mutex_lock(&abba->b_mutex, &ctx); in test_abba_work()
198 WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx)); in test_abba_work()
200 WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx); in test_abba_work()
202 complete(&abba->b_ready); in test_abba_work()
203 wait_for_completion(&abba->a_ready); in test_abba_work()
205 err = ww_mutex_lock(&abba->a_mutex, &ctx); in test_abba_work()
206 if (abba->resolve && err == -EDEADLK) { in test_abba_work()
207 ww_mutex_unlock(&abba->b_mutex); in test_abba_work()
208 ww_mutex_lock_slow(&abba->a_mutex, &ctx); in test_abba_work()
209 err = ww_mutex_lock(&abba->b_mutex, &ctx); in test_abba_work()
213 ww_mutex_unlock(&abba->a_mutex); in test_abba_work()
214 ww_mutex_unlock(&abba->b_mutex); in test_abba_work()
217 abba->result = err; in test_abba_work()
248 if (resolve && err == -EDEADLK) { in test_abba()
262 ret = 0; in test_abba()
267 ret = -EINVAL; in test_abba()
270 if (err != -EDEADLK && abba.result != -EDEADLK) { in test_abba()
273 ret = -EINVAL; in test_abba()
290 struct test_cycle *cycle = container_of(work, typeof(*cycle), work); in test_cycle_work() local
292 int err, erra = 0; in test_cycle_work()
295 ww_mutex_lock(&cycle->a_mutex, &ctx); in test_cycle_work()
297 complete(cycle->a_signal); in test_cycle_work()
298 wait_for_completion(&cycle->b_signal); in test_cycle_work()
300 err = ww_mutex_lock(cycle->b_mutex, &ctx); in test_cycle_work()
301 if (err == -EDEADLK) { in test_cycle_work()
302 err = 0; in test_cycle_work()
303 ww_mutex_unlock(&cycle->a_mutex); in test_cycle_work()
304 ww_mutex_lock_slow(cycle->b_mutex, &ctx); in test_cycle_work()
305 erra = ww_mutex_lock(&cycle->a_mutex, &ctx); in test_cycle_work()
309 ww_mutex_unlock(cycle->b_mutex); in test_cycle_work()
311 ww_mutex_unlock(&cycle->a_mutex); in test_cycle_work()
314 cycle->result = err ?: erra; in test_cycle_work()
320 unsigned int n, last = nthreads - 1; in __test_cycle()
325 return -ENOMEM; in __test_cycle()
327 for (n = 0; n < nthreads; n++) { in __test_cycle()
328 struct test_cycle *cycle = &cycles[n]; in __test_cycle() local
330 ww_mutex_init(&cycle->a_mutex, &ww_class); in __test_cycle()
332 cycle->b_mutex = &cycles[0].a_mutex; in __test_cycle()
334 cycle->b_mutex = &cycles[n + 1].a_mutex; in __test_cycle()
336 if (n == 0) in __test_cycle()
337 cycle->a_signal = &cycles[last].b_signal; in __test_cycle()
339 cycle->a_signal = &cycles[n - 1].b_signal; in __test_cycle()
340 init_completion(&cycle->b_signal); in __test_cycle()
342 INIT_WORK(&cycle->work, test_cycle_work); in __test_cycle()
343 cycle->result = 0; in __test_cycle()
346 for (n = 0; n < nthreads; n++) in __test_cycle()
351 ret = 0; in __test_cycle()
352 for (n = 0; n < nthreads; n++) { in __test_cycle()
353 struct test_cycle *cycle = &cycles[n]; in __test_cycle() local
355 if (!cycle->result) in __test_cycle()
359 n, nthreads, cycle->result); in __test_cycle()
360 ret = -EINVAL; in __test_cycle()
364 for (n = 0; n < nthreads; n++) in __test_cycle()
381 return 0; in test_cycle()
413 for (n = 0; n < count; n++) in get_random_order()
416 for (n = count - 1; n > 1; n--) { in get_random_order()
436 const int nlocks = stress->nlocks; in stress_inorder_work()
437 struct ww_mutex *locks = stress->locks; in stress_inorder_work()
446 int contended = -1; in stress_inorder_work()
451 err = 0; in stress_inorder_work()
452 for (n = 0; n < nlocks; n++) { in stress_inorder_work()
457 if (err < 0) in stress_inorder_work()
466 while (n--) in stress_inorder_work()
469 if (err == -EDEADLK) { in stress_inorder_work()
470 if (!time_after(jiffies, stress->timeout)) { in stress_inorder_work()
482 } while (!time_after(jiffies, stress->timeout)); in stress_inorder_work()
501 order = get_random_order(stress->nlocks); in stress_reorder_work()
505 for (n = 0; n < stress->nlocks; n++) { in stress_reorder_work()
510 ll->lock = &stress->locks[order[n]]; in stress_reorder_work()
511 list_add(&ll->link, &locks); in stress_reorder_work()
520 err = ww_mutex_lock(ll->lock, &ctx); in stress_reorder_work()
526 ww_mutex_unlock(ln->lock); in stress_reorder_work()
528 if (err != -EDEADLK) { in stress_reorder_work()
534 ww_mutex_lock_slow(ll->lock, &ctx); in stress_reorder_work()
535 list_move(&ll->link, &locks); /* restarts iteration */ in stress_reorder_work()
540 ww_mutex_unlock(ll->lock); in stress_reorder_work()
543 } while (!time_after(jiffies, stress->timeout)); in stress_reorder_work()
554 const int nlocks = stress->nlocks; in stress_one_work()
555 struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks); in stress_one_work()
568 } while (!time_after(jiffies, stress->timeout)); in stress_one_work()
571 #define STRESS_INORDER BIT(0)
584 return -ENOMEM; in stress()
590 return -ENOMEM; in stress()
593 for (n = 0; n < nlocks; n++) in stress()
596 count = 0; in stress()
597 for (n = 0; nthreads; n++) { in stress()
603 case 0: in stress()
622 INIT_WORK(&stress->work, fn); in stress()
623 stress->locks = locks; in stress()
624 stress->nlocks = nlocks; in stress()
625 stress->timeout = jiffies + 2*HZ; in stress()
627 queue_work(wq, &stress->work); in stress()
628 nthreads--; in stress()
633 for (n = 0; n < nlocks; n++) in stress()
638 return 0; in stress()
650 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); in test_ww_mutex_init()
652 return -ENOMEM; in test_ww_mutex_init()
666 for (i = 0; i < 4; i++) { in test_ww_mutex_init()
689 return 0; in test_ww_mutex_init()