/linux/drivers/infiniband/hw/hfi1/ |
H A D | iowait.h | 11 #include <linux/wait.h> 57 * @list: used to add/insert into QP/PQ wait lists 63 * @lock: lock protected head of wait queue 65 * @wait_dma: wait for sdma_busy == 0 66 * @wait_pio: wait for pio_busy == 0 71 * @flags: wait flags (one per QP) 72 * @wait: SE array for multiple legs 102 struct iowait_work *wait, 107 void (*wakeup)(struct iowait *wait, int reason); 108 void (*sdma_drained)(struct iowait *wait); [all …]
|
H A D | iowait.c | 12 void iowait_set_flag(struct iowait *wait, u32 flag) in iowait_set_flag() argument 14 trace_hfi1_iowait_set(wait, flag); in iowait_set_flag() 15 set_bit(flag, &wait->flags); in iowait_set_flag() 18 bool iowait_flag_set(struct iowait *wait, u32 flag) in iowait_flag_set() argument 20 return test_bit(flag, &wait->flags); in iowait_flag_set() 23 inline void iowait_clear_flag(struct iowait *wait, u32 flag) in iowait_clear_flag() argument 25 trace_hfi1_iowait_clear(wait, flag); in iowait_clear_flag() 26 clear_bit(flag, &wait->flags); in iowait_clear_flag() 30 * iowait_init() - initialize wait structure 31 * @wait: wait struct to initialize [all …]
|
H A D | trace_iowait.h | 17 TP_PROTO(struct iowait *wait, u32 flag), 18 TP_ARGS(wait, flag), 26 __entry->addr = (unsigned long)wait; 27 __entry->flags = wait->flags; 29 __entry->qpn = iowait_to_qp(wait)->ibqp.qp_num; 41 TP_PROTO(struct iowait *wait, u32 flag), 42 TP_ARGS(wait, flag)); 45 TP_PROTO(struct iowait *wait, u32 flag), 46 TP_ARGS(wait, flag));
|
H A D | vnic_sdma.c | 155 ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait), in hfi1_vnic_send_dma() 163 iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait); in hfi1_vnic_send_dma() 182 * sdma descriptors available to send the packet. It adds Tx queue's wait 187 struct iowait_work *wait, in hfi1_vnic_sdma_sleep() argument 193 container_of(wait->iow, struct hfi1_vnic_sdma, wait); in hfi1_vnic_sdma_sleep() 202 if (list_empty(&vnic_sdma->wait.list)) { in hfi1_vnic_sdma_sleep() 203 iowait_get_priority(wait->iow); in hfi1_vnic_sdma_sleep() 204 iowait_queue(pkts_sent, wait->iow, &sde->dmawait); in hfi1_vnic_sdma_sleep() 214 * queue's wait structure was previously added to sdma engine's dmawait list. 217 static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason) in hfi1_vnic_sdma_wakeup() argument [all …]
|
/linux/drivers/gpu/drm/i915/display/ |
H A D | intel_display_rps.c | 15 struct wait_queue_entry wait; member 24 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); in do_rps_boost() local 25 struct i915_request *rq = wait->request; in do_rps_boost() 36 drm_crtc_vblank_put(wait->crtc); in do_rps_boost() 38 list_del(&wait->wait.entry); in do_rps_boost() 39 kfree(wait); in do_rps_boost() 46 struct wait_rps_boost *wait; in intel_display_rps_boost_after_vblank() local 57 wait = kmalloc(sizeof(*wait), GFP_KERNEL); in intel_display_rps_boost_after_vblank() 58 if (!wait) { in intel_display_rps_boost_after_vblank() 63 wait->request = to_request(dma_fence_get(fence)); in intel_display_rps_boost_after_vblank() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvif/ |
H A D | timer.c | 26 nvif_timer_wait_test(struct nvif_timer_wait *wait) in nvif_timer_wait_test() argument 28 u64 time = nvif_device_time(wait->device); in nvif_timer_wait_test() 30 if (wait->reads == 0) { in nvif_timer_wait_test() 31 wait->time0 = time; in nvif_timer_wait_test() 32 wait->time1 = time; in nvif_timer_wait_test() 35 if (wait->time1 == time) { in nvif_timer_wait_test() 36 if (WARN_ON(wait->reads++ == 16)) in nvif_timer_wait_test() 39 wait->time1 = time; in nvif_timer_wait_test() 40 wait->reads = 1; in nvif_timer_wait_test() 43 if (wait->time1 - wait->time0 > wait->limit) in nvif_timer_wait_test() [all …]
|
/linux/drivers/net/ethernet/cisco/enic/ |
H A D | vnic_dev.c | 213 int wait) in _vnic_dev_cmd() argument 242 for (delay = 0; delay < wait; delay++) { in _vnic_dev_cmd() 281 int wait) in _vnic_dev_cmd2() argument 332 for (delay = 0; delay < wait; delay++) { in _vnic_dev_cmd2() 440 u64 *a0, u64 *a1, int wait) in vnic_dev_cmd_proxy() argument 452 err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); in vnic_dev_cmd_proxy() 473 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) in vnic_dev_cmd_no_proxy() argument 480 err = vdev->devcmd_rtn(vdev, cmd, wait); in vnic_dev_cmd_no_proxy() 501 u64 *a0, u64 *a1, int wait) in vnic_dev_cmd() argument 508 a0, a1, wait); in vnic_dev_cmd() [all …]
|
/linux/arch/mips/kernel/ |
H A D | idle.c | 3 * MIPS idle loop and WAIT instruction support. 23 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, 24 * the implementation of the "wait" feature differs between CPU families. This 25 * points to the function that implements CPU specific wait. 26 * The wait instruction stops the pipeline and reduces the power consumption of 58 " wait \n" in r4k_wait_irqoff() 64 * have any pending stores when the WAIT instruction is executed. 76 " wait \n" in rm7k_wait_irqoff() 82 * Au1 'wait' is only useful when the 32kHz counter is used as timer, 97 " wait \n" in au1k_wait() [all …]
|
/linux/fs/ |
H A D | fs_pin.c | 16 spin_lock_irq(&pin->wait.lock); in pin_remove() 18 wake_up_locked(&pin->wait); in pin_remove() 19 spin_unlock_irq(&pin->wait.lock); in pin_remove() 32 wait_queue_entry_t wait; in pin_kill() local 38 init_wait(&wait); in pin_kill() 39 spin_lock_irq(&p->wait.lock); in pin_kill() 42 spin_unlock_irq(&p->wait.lock); in pin_kill() 48 spin_unlock_irq(&p->wait.lock); in pin_kill() 52 __add_wait_queue(&p->wait, &wait); in pin_kill() 55 spin_unlock_irq(&p->wait.lock); in pin_kill() [all …]
|
/linux/kernel/sched/ |
H A D | swait.c | 3 * <linux/swait.h> (simple wait queues ) implementation: 84 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) in __prepare_to_swait() argument 86 wait->task = current; in __prepare_to_swait() 87 if (list_empty(&wait->task_list)) in __prepare_to_swait() 88 list_add_tail(&wait->task_list, &q->task_list); in __prepare_to_swait() 91 void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait_exclusive() argument 96 __prepare_to_swait(q, wait); in prepare_to_swait_exclusive() 102 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait_event() argument 113 list_del_init(&wait->task_list); in prepare_to_swait_event() 116 __prepare_to_swait(q, wait); in prepare_to_swait_event() [all …]
|
H A D | completion.c | 4 * Generic wait-for-completion handler; 20 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete_with_flags() 24 swake_up_locked(&x->wait, wake_flags); in complete_with_flags() 25 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete_with_flags() 73 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete_all() 75 swake_up_all_locked(&x->wait); in complete_all() 76 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete_all() 85 DECLARE_SWAITQUEUE(wait); in do_wait_for_common() 92 __prepare_to_swait(&x->wait, &wait); in do_wait_for_common() 94 raw_spin_unlock_irq(&x->wait.lock); in do_wait_for_common() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/timer/ |
H A D | base.c | 27 nvkm_timer_wait_test(struct nvkm_timer_wait *wait) in nvkm_timer_wait_test() argument 29 struct nvkm_subdev *subdev = &wait->tmr->subdev; in nvkm_timer_wait_test() 30 u64 time = nvkm_timer_read(wait->tmr); in nvkm_timer_wait_test() 32 if (wait->reads == 0) { in nvkm_timer_wait_test() 33 wait->time0 = time; in nvkm_timer_wait_test() 34 wait->time1 = time; in nvkm_timer_wait_test() 37 if (wait->time1 == time) { in nvkm_timer_wait_test() 38 if (wait->reads++ == 16) { in nvkm_timer_wait_test() 43 wait->time1 = time; in nvkm_timer_wait_test() 44 wait->reads = 1; in nvkm_timer_wait_test() [all …]
|
/linux/fs/bcachefs/ |
H A D | clock.c | 63 struct io_clock_wait *wait = container_of(timer, in io_clock_wait_fn() local 66 wait->expired = 1; in io_clock_wait_fn() 67 wake_up_process(wait->task); in io_clock_wait_fn() 72 struct io_clock_wait *wait = container_of(timer, in io_clock_cpu_timeout() local 75 wait->expired = 1; in io_clock_cpu_timeout() 76 wake_up_process(wait->task); in io_clock_cpu_timeout() 81 struct io_clock_wait wait = { in bch2_io_clock_schedule_timeout() local 88 bch2_io_timer_add(clock, &wait.io_timer); in bch2_io_clock_schedule_timeout() 90 bch2_io_timer_del(clock, &wait.io_timer); in bch2_io_clock_schedule_timeout() 97 struct io_clock_wait wait = { in bch2_kthread_io_clock_wait() local [all …]
|
/linux/net/core/ |
H A D | stream.c | 21 #include <linux/wait.h> 41 wake_up_interruptible_poll(&wq->wait, EPOLLOUT | in sk_stream_write_space() 50 * sk_stream_wait_connect - Wait for a socket to get into the connected state 51 * @sk: sock to wait on 52 * @timeo_p: for how long to wait 58 DEFINE_WAIT_FUNC(wait, woken_wake_function); in sk_stream_wait_connect() 73 add_wait_queue(sk_sleep(sk), &wait); in sk_stream_wait_connect() 78 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)), &wait); in sk_stream_wait_connect() 79 remove_wait_queue(sk_sleep(sk), &wait); in sk_stream_wait_connect() 99 DEFINE_WAIT_FUNC(wait, woken_wake_function); in sk_stream_wait_close() [all …]
|
/linux/drivers/gpu/drm/omapdrm/ |
H A D | omap_irq.c | 22 struct omap_irq_wait *wait; in omap_irq_update() local 27 list_for_each_entry(wait, &priv->wait_list, node) in omap_irq_update() 28 irqmask |= wait->irqmask; in omap_irq_update() 35 static void omap_irq_wait_handler(struct omap_irq_wait *wait) in omap_irq_wait_handler() argument 37 wait->count--; in omap_irq_wait_handler() 38 wake_up(&wait->wq); in omap_irq_wait_handler() 45 struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL); in omap_irq_wait_init() local 48 init_waitqueue_head(&wait->wq); in omap_irq_wait_init() 49 wait->irqmask = irqmask; in omap_irq_wait_init() 50 wait->count = count; in omap_irq_wait_init() [all …]
|
/linux/drivers/scsi/fnic/ |
H A D | vnic_dev.c | 62 int wait); 249 static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) in vnic_dev_cmd1() argument 283 for (delay = 0; delay < wait; delay++) { in vnic_dev_cmd1() 312 int wait) in vnic_dev_cmd2() argument 379 for (delay = 0; delay < wait; delay++) { in vnic_dev_cmd2() 501 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) in vnic_dev_cmd_no_proxy() argument 508 err = (*vdev->devcmd_rtn)(vdev, cmd, wait); in vnic_dev_cmd_no_proxy() 518 u64 *a0, u64 *a1, int wait) in vnic_dev_cmd() argument 525 return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); in vnic_dev_cmd() 534 int wait = 1000; in vnic_dev_fw_info() local [all …]
|
/linux/include/linux/ |
H A D | swait.h | 8 #include <linux/wait.h> 12 * Simple waitqueues are semantically very different to regular wait queues 13 * (wait.h). The most important difference is that the simple waitqueue allows 36 * For all the above, note that simple wait queues should _only_ be used under 38 * wait queues in most cases. 92 * returns true if the wait list is not empty 105 * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); 111 * finish_swait(&wq_head, &wait); 115 * observe an empty wait list while the waiter might not observe @cond. 138 * modifications to the wait queue (task_list). in swq_has_sleeper() [all …]
|
H A D | smp.h | 51 int wait); 54 void *info, bool wait, const struct cpumask *mask); 69 static inline void on_each_cpu(smp_call_func_t func, void *info, int wait) in on_each_cpu() argument 71 on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask); in on_each_cpu() 80 * @wait: If true, wait (atomically) until function has completed 83 * If @wait is true, then returns once @func has returned. 91 smp_call_func_t func, void *info, bool wait) in on_each_cpu_mask() argument 93 on_each_cpu_cond_mask(NULL, func, info, wait, mask); in on_each_cpu_mask() 103 smp_call_func_t func, void *info, bool wait) in on_each_cpu_cond() argument 105 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); in on_each_cpu_cond() [all …]
|
H A D | umh.h | 14 #define UMH_NO_WAIT 0x00 /* don't wait at all */ 15 #define UMH_WAIT_EXEC 0x01 /* wait for the exec, but not the process */ 16 #define UMH_WAIT_PROC 0x02 /* wait for the process to complete */ 17 #define UMH_KILLABLE 0x04 /* wait for EXEC/PROC killable */ 18 #define UMH_FREEZABLE 0x08 /* wait for EXEC/PROC freezable */ 26 int wait; member 34 call_usermodehelper(const char *path, char **argv, char **envp, int wait); 43 call_usermodehelper_exec(struct subprocess_info *info, int wait);
|
/linux/net/atm/ |
H A D | svc.c | 13 #include <linux/wait.h> 53 DEFINE_WAIT(wait); in svc_disconnect() 61 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_disconnect() 66 finish_wait(sk_sleep(sk), &wait); in svc_disconnect() 103 DEFINE_WAIT(wait); in svc_bind() 137 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_bind() 142 finish_wait(sk_sleep(sk), &wait); in svc_bind() 159 DEFINE_WAIT(wait); in svc_connect() 219 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in svc_connect() 223 prepare_to_wait(sk_sleep(sk), &wait, in svc_connect() [all …]
|
/linux/drivers/scsi/snic/ |
H A D | vnic_dev.c | 55 int wait); 248 int wait) in _svnic_dev_cmd2() argument 315 for (delay = 0; delay < wait; delay++) { in _svnic_dev_cmd2() 434 u64 *a0, u64 *a1, int wait) in svnic_dev_cmd() argument 442 err = (*vdev->devcmd_rtn)(vdev, cmd, wait); in svnic_dev_cmd() 454 int wait = VNIC_DVCMD_TMO; in svnic_dev_fw_info() local 467 err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); in svnic_dev_fw_info() 479 int wait = VNIC_DVCMD_TMO; in svnic_dev_spec() local 485 err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); in svnic_dev_spec() 511 int wait = VNIC_DVCMD_TMO; in svnic_dev_stats_clear() local [all …]
|
/linux/Documentation/locking/ |
H A D | ww-mutex-design.rst | 2 Wound/Wait Deadlock-Proof Mutex Design 5 Please read mutex-design.rst first, as it applies to wait/wound mutexes too. 14 a handful of situations where the driver needs to wait for buffers to 37 and the deadlock handling approach is called Wait-Die. The name is based on 41 and dies. Hence Wait-Die. 42 There is also another algorithm called Wound-Wait: 46 transaction. Hence Wound-Wait. 48 However, the Wound-Wait algorithm is typically stated to generate fewer backoffs 49 compared to Wait-Die, but is, on the other hand, associated with more work than 50 Wait-Die when recovering from a backoff. Wound-Wait is also a preemptive [all …]
|
/linux/drivers/gpu/drm/ci/xfails/ |
H A D | i915-tgl-fails.txt | 42 perf_pmu@event-wait,Timeout 50 perf_pmu@semaphore-wait-idle,Timeout 61 syncobj_eventfd@timeline-wait-before-signal,Timeout 62 syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout 64 syncobj_wait@invalid-single-wait-all-unsubmitted,Timeout 65 syncobj_wait@multi-wait-all-submitted,Timeout 66 syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout 67 syncobj_wait@wait-any-complex,Timeout 68 syncobj_wait@wait-delayed-signal,Timeout
|
/linux/include/linux/mailbox/ |
H A D | mtk-cmdq-mailbox.h | 26 * bit 0-11: wait value 27 * bit 15: 1 - wait, 0 - no wait 47 * wait for event and clear 48 * it is just clear if no wait 49 * format: [wait] op event update:1 to_wait:1 wait:1 50 * [clear] op event update:1 to_wait:0 wait:0
|
/linux/kernel/locking/ |
H A D | ww_mutex.h | 165 * Wait-Die: 170 * Wound-Wait: 225 * Depending on the algorithm, @a will either need to wait for @b, or die. 269 * Wait-Die; wake a lesser waiter context (when locks held) such that it can 294 * Wound-Wait; wound a lesser @hold_ctx if it holds the lock. 345 * waiting behind us on the wait-list, check if they need to die, or wound us. 350 * This relies on never mixing wait-die/wound-wait on the same wait-list; 353 * The current task must not be on the wait list. 440 * Wound-Wait: If we're wounded, kill ourself. 442 * Wait-Die: If we're trying to acquire a lock already held by an older [all …]
|