Lines Matching +full:init +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0+
3 * Module-based torture test facility for locking
28 #include <linux/delay.h>
38 torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
41 torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
42 torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
46 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
47 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
69 cpumask_var_t *cm_bind = kp->arg; in param_set_cpumask()
75 ret = -ENOMEM; in param_set_cpumask()
83 pr_warn("%s: %s, all CPUs set\n", kp->name, s); in param_set_cpumask()
91 cpumask_var_t *cm_bind = kp->arg; in param_get_cpumask()
137 void (*init)(void); member
177 /* We want a long delay occasionally to force massive contention. */ in torture_lock_busted_write_delay()
197 * for the new priority, and do any corresponding pi-dance. in __torture_rt_boost()
210 * When @trsp is nil, we want to force-reset the task for in __torture_rt_boost()
254 /* We want a short delay mostly to emulate likely code, and in torture_spin_lock_write_delay()
255 * we want a long delay occasionally to force massive contention. in torture_spin_lock_write_delay()
260 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j); in torture_spin_lock_write_delay()
291 cxt.cur_ops->flags = flags; in torture_spin_lock_write_lock_irq()
298 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); in torture_lock_spin_write_unlock_irq()
344 cxt.cur_ops->flags = flags; in torture_raw_spin_lock_write_lock_irq()
351 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags); in torture_raw_spin_lock_write_unlock_irq()
378 /* We want a short delay mostly to emulate likely code, and in torture_rwlock_write_delay()
379 * we want a long delay occasionally to force massive contention. in torture_rwlock_write_delay()
404 /* We want a short delay mostly to emulate likely code, and in torture_rwlock_read_delay()
405 * we want a long delay occasionally to force massive contention. in torture_rwlock_read_delay()
436 cxt.cur_ops->flags = flags; in torture_rwlock_write_lock_irq()
443 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); in torture_rwlock_write_unlock_irq()
452 cxt.cur_ops->flags = flags; in torture_rwlock_read_lock_irq()
459 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); in torture_rwlock_read_unlock_irq()
506 /* We want a long delay occasionally to force massive contention. */ in torture_mutex_delay()
524 for (i = nested_locks - 1; i >= 0; i--) in torture_mutex_nested_unlock()
530 .init = torture_mutex_init,
597 err = ww_mutex_lock(ll->lock, ctx); in torture_ww_mutex_lock()
603 ww_mutex_unlock(ln->lock); in torture_ww_mutex_lock()
605 if (err != -EDEADLK) in torture_ww_mutex_lock()
608 ww_mutex_lock_slow(ll->lock, ctx); in torture_ww_mutex_lock()
609 list_move(&ll->link, &list); in torture_ww_mutex_lock()
629 .init = torture_ww_mutex_init,
678 * We want a short delay mostly to emulate likely code, and in torture_rtmutex_delay()
679 * we want a long delay occasionally to force massive contention. in torture_rtmutex_delay()
709 for (i = nested_locks - 1; i >= 0; i--) in torture_rtmutex_nested_unlock()
715 .init = torture_rtmutex_init,
739 /* We want a long delay occasionally to force massive contention. */ in torture_rwsem_write_delay()
761 /* We want a long delay occasionally to force massive contention. */ in torture_rwsem_read_delay()
787 #include <linux/percpu-rwsem.h>
827 .init = torture_percpu_rwsem_init,
851 int tid = lwsp - cxt.lwsa; in lock_torture_writer()
873 cxt.cur_ops->task_boost(&rand); in lock_torture_writer()
874 if (cxt.cur_ops->nested_lock) in lock_torture_writer()
875 cxt.cur_ops->nested_lock(tid, lockset_mask); in lock_torture_writer()
880 cxt.cur_ops->writelock(tid); in lock_torture_writer()
882 lwsp->n_lock_fail++; in lock_torture_writer()
885 lwsp->n_lock_fail++; /* rare, but... */ in lock_torture_writer()
890 __func__, j1 - j); in lock_torture_writer()
892 lwsp->n_lock_acquired++; in lock_torture_writer()
894 cxt.cur_ops->write_delay(&rand); in lock_torture_writer()
898 cxt.cur_ops->writeunlock(tid); in lock_torture_writer()
900 if (cxt.cur_ops->nested_unlock) in lock_torture_writer()
901 cxt.cur_ops->nested_unlock(tid, lockset_mask); in lock_torture_writer()
906 cxt.cur_ops->task_boost(NULL); /* reset prio */ in lock_torture_writer()
918 int tid = lrsp - cxt.lrsa; in lock_torture_reader()
928 cxt.cur_ops->readlock(tid); in lock_torture_reader()
931 lrsp->n_lock_fail++; /* rare, but... */ in lock_torture_reader()
933 lrsp->n_lock_acquired++; in lock_torture_reader()
934 cxt.cur_ops->read_delay(&rand); in lock_torture_reader()
936 cxt.cur_ops->readunlock(tid); in lock_torture_reader()
945 * Create an lock-torture-statistics message in the specified buffer.
982 * (or the init/cleanup functions when lock_torture_stats thread is not
990 if (cxt.cur_ops->readlock) in lock_torture_stats_print()
1004 if (cxt.cur_ops->readlock) { in lock_torture_stats_print()
1048 …"--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d… in lock_torture_print_module_parms()
1065 if (!smp_load_acquire(&crcp->crc_stop)) { in call_rcu_chain_cb()
1067 call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another. in call_rcu_chain_cb()
1080 return -ENOMEM; in call_rcu_chain_init()
1112 * However cxt->cur_ops.init() may have been invoked, so beside in lock_torture_cleanup()
1113 * perform the underlying torture-specific cleanups, cur_ops.exit() in lock_torture_cleanup()
1135 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ in lock_torture_cleanup()
1156 if (cxt.cur_ops->exit) in lock_torture_cleanup()
1157 cxt.cur_ops->exit(); in lock_torture_cleanup()
1182 return -EBUSY; in lock_torture_init()
1187 if (strcmp(torture_type, cxt.cur_ops->name) == 0) in lock_torture_init()
1191 pr_alert("lock-torture: invalid torture type: \"%s\"\n", in lock_torture_init()
1193 pr_alert("lock-torture types:"); in lock_torture_init()
1195 pr_alert(" %s", torture_ops[i]->name); in lock_torture_init()
1197 firsterr = -EINVAL; in lock_torture_init()
1202 (!cxt.cur_ops->readlock || nreaders_stress == 0)) { in lock_torture_init()
1203 pr_alert("lock-torture: must run at least one locking thread\n"); in lock_torture_init()
1204 firsterr = -EINVAL; in lock_torture_init()
1213 if (cxt.cur_ops->init) { in lock_torture_init()
1214 cxt.cur_ops->init(); in lock_torture_init()
1240 firsterr = -ENOMEM; in lock_torture_init()
1250 if (cxt.cur_ops->readlock) { in lock_torture_init()
1257 * of threads as the writer-only locks default. in lock_torture_init()
1270 firsterr = -ENOMEM; in lock_torture_init()
1319 firsterr = -ENOMEM; in lock_torture_init()
1328 if (cxt.cur_ops->readlock) { in lock_torture_init()
1336 firsterr = -ENOMEM; in lock_torture_init()
1361 torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers); in lock_torture_init()
1364 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) in lock_torture_init()
1372 torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers); in lock_torture_init()