/linux/kernel/sched/ |
H A D | completion.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Generic wait-for-completion handler; 7 * wait_for_completion default blocks whereas semaphore default non-block. The 20 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete_with_flags() 22 if (x->done != UINT_MAX) in complete_with_flags() 23 x->done++; in complete_with_flags() 24 swake_up_locked(&x->wait, wake_flags); in complete_with_flags() 25 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete_with_flags() 34 * complete: - signals a single thread waiting on this completion 35 * @x: holds the state of this particular completion [all …]
|
H A D | swait.c | 1 // SPDX-License-Identifier: GPL-2.0 3 * <linux/swait.h> (simple wait queues ) implementation: 9 raw_spin_lock_init(&q->lock); in __init_swait_queue_head() 10 lockdep_set_class_and_name(&q->lock, key, name); in __init_swait_queue_head() 11 INIT_LIST_HEAD(&q->task_list); in __init_swait_queue_head() 25 if (list_empty(&q->task_list)) in swake_up_locked() 28 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); in swake_up_locked() 29 try_to_wake_up(curr->task, TASK_NORMAL, wake_flags); in swake_up_locked() 30 list_del_init(&curr->task_list); in swake_up_locked() 43 while (!list_empty(&q->task_list)) in swake_up_all_locked() [all …]
|
H A D | wait.c | 1 // SPDX-License-Identifier: GPL-2.0-only 10 spin_lock_init(&wq_head->lock); in __init_waitqueue_head() 11 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head() 12 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head() 21 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue() 22 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue() 24 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue() 32 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive() 33 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive() 35 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive() [all …]
|
/linux/fs/bcachefs/ |
H A D | six.c | 1 // SPDX-License-Identifier: GPL-2.0 74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask() 75 atomic_or(mask, &lock->state); in six_set_bitmask() 80 if (atomic_read(&lock->state) & mask) in six_clear_bitmask() 81 atomic_and(~mask, &lock->state); in six_clear_bitmask() 91 EBUG_ON(lock->owner); in six_set_owner() 92 lock->owner = owner; in six_set_owner() 94 EBUG_ON(lock->owner != current); in six_set_owner() 104 read_count += *per_cpu_ptr(lock->readers, cpu); in pcpu_read_count() 109 * __do_six_trylock() - main trylock routine [all …]
|
H A D | six.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 10 * but with an additional state: read/shared, intent, exclusive/write 12 * The purpose of the intent state is to allow for greater concurrency on tree 17 * But by adding an intent state, which is exclusive with other intent locks but 23 * six_lock_read(&foo->lock); 24 * six_unlock_read(&foo->lock); 27 * six_lock_intent(&foo->lock); 28 * six_lock_write(&foo->lock); 29 * six_unlock_write(&foo->lock); 30 * six_unlock_intent(&foo->lock); [all …]
|
/linux/drivers/gpu/drm/i915/display/ |
H A D | intel_display_rps.c | 1 // SPDX-License-Identifier: MIT 15 struct wait_queue_entry wait; member 24 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); in do_rps_boost() local 25 struct i915_request *rq = wait->request; in do_rps_boost() 36 drm_crtc_vblank_put(wait->crtc); in do_rps_boost() 38 list_del(&wait->wait.entry); in do_rps_boost() 39 kfree(wait); in do_rps_boost() 46 struct wait_rps_boost *wait; in intel_display_rps_boost_after_vblank() local 51 if (DISPLAY_VER(to_i915(crtc->dev)) < 6) in intel_display_rps_boost_after_vblank() 57 wait = kmalloc(sizeof(*wait), GFP_KERNEL); in intel_display_rps_boost_after_vblank() [all …]
|
H A D | hsw_ips.c | 1 // SPDX-License-Identifier: MIT 19 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); in hsw_ips_enable() 20 struct drm_i915_private *i915 = to_i915(crtc->base.dev); in hsw_ips_enable() 23 if (!crtc_state->ips_enabled) in hsw_ips_enable() 27 * We can only enable IPS after we enable a plane and wait for a vblank in hsw_ips_enable() 29 * a vblank wait. in hsw_ips_enable() 31 drm_WARN_ON(display->drm, in hsw_ips_enable() 32 !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); in hsw_ips_enable() 36 if (display->ips.false_color) in hsw_ips_enable() 40 drm_WARN_ON(display->drm, in hsw_ips_enable() [all …]
|
/linux/drivers/mtd/chips/ |
H A D | cfi_cmdset_0020.c | 1 // SPDX-License-Identifier: GPL-2.0 9 * - completely revamped method functions so they are aware and 11 * - scalability vs code size is completely set at compile-time 13 * - optimized write buffer method 14 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others 15 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture 17 * - added a writev function 18 * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de> 19 * - Plugged memory leak in cfi_staa_writev(). 69 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport); in cfi_tell_features() [all …]
|
/linux/include/linux/ |
H A D | swait.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 8 #include <linux/wait.h> 12 * Simple waitqueues are semantically very different to regular wait queues 13 * (wait.h). The most important difference is that the simple waitqueue allows 14 * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold 24 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; 26 * sleeper state. 28 * - the !exclusive mode; because that leads to O(n) wakeups, everything is 31 * - custom wake callback functions; because you cannot give any guarantees 35 * As a side effect of these; the data structures are slimmer albeit more ad-hoc. [all …]
|
/linux/include/soc/qcom/ |
H A D | tcs.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 3 * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. 15 * rpmh_state: state for the request 17 * RPMH_SLEEP_STATE: State of the resource when the processor subsystem 20 * RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously 22 * RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state 35 * @data: the resource state request 36 * @wait 44 u32 wait; global() member 57 enum rpmh_state state; global() member [all...] |
/linux/drivers/scsi/isci/ |
H A D | phy.h | 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 63 /* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS 64 * before restarting the starting state machine. Technically, the old parallel 74 * notification from the hardware that we restart the hardware OOB state 80 * isci_phy - hba local phy infrastructure 83 * @phy_index: physical index relative to the controller (0-3) 162 * struct sci_phy_properties - This structure defines the properties common to 189 * struct sci_sas_phy_properties - This structure defines the properties, [all …]
|
/linux/drivers/staging/gpib/nec7210/ |
H A D | nec7210.c | 1 // SPDX-License-Identifier: GPL-2.0 28 priv->auxa_bits |= HR_REOS; in nec7210_enable_eos() 30 priv->auxa_bits |= HR_BIN; in nec7210_enable_eos() 32 priv->auxa_bits &= ~HR_BIN; in nec7210_enable_eos() 33 write_byte(priv, priv->auxa_bits, AUXMR); in nec7210_enable_eos() 40 priv->auxa_bits &= ~HR_REOS; in nec7210_disable_eos() 41 write_byte(priv, priv->auxa_bits, AUXMR); in nec7210_disable_eos() 49 clear_bit(COMMAND_READY_BN, &priv->state); in nec7210_parallel_poll() 53 // wait for result FIXME: support timeouts in nec7210_parallel_poll() 54 ret = wait_event_interruptible(board->wait, test_bit(COMMAND_READY_BN, &priv->state)); in nec7210_parallel_poll() [all …]
|
/linux/drivers/net/ethernet/sfc/falcon/ |
H A D | selftest.c | 1 // SPDX-License-Identifier: GPL-2.0-only 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2006-2012 Solarflare Communications Inc. 26 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a 28 * - The PREEMPT_RT patches mostly deal with this, but also allow a 38 * The self-test should stress every RSS vector, and unfortunately 42 char pad[2]; /* Ensures ip is 4-byte aligned */ 65 [EF4_INT_MODE_MSIX] = "MSI-X", 70 STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode) 73 * struct ef4_loopback_state - persistent state during a loopback selftest [all …]
|
/linux/drivers/net/ethernet/sfc/siena/ |
H A D | selftest.c | 1 // SPDX-License-Identifier: GPL-2.0-only 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2006-2012 Solarflare Communications Inc. 29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a 31 * - The PREEMPT_RT patches mostly deal with this, but also allow a 41 * The self-test should stress every RSS vector, and unfortunately 45 char pad[2]; /* Ensures ip is 4-byte aligned */ 68 [EFX_INT_MODE_MSIX] = "MSI-X", 73 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode) 76 * struct efx_loopback_state - persistent state during a loopback selftest [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | vnic_sdma.c | 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 3 * Copyright(c) 2017 - 2018 Intel Corporation. 20 * struct vnic_txreq - VNIC transmit descriptor 42 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma; in vnic_sdma_complete() 44 sdma_txclean(vnic_sdma->dd, txreq); in vnic_sdma_complete() 45 dev_kfree_skb_any(tx->skb); in vnic_sdma_complete() 46 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx); in vnic_sdma_complete() 55 sde->dd, in build_vnic_ulp_payload() 56 &tx->txreq, in build_vnic_ulp_payload() 57 tx->skb->data, in build_vnic_ulp_payload() [all …]
|
/linux/drivers/net/ethernet/sfc/ |
H A D | selftest.c | 1 // SPDX-License-Identifier: GPL-2.0-only 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2006-2012 Solarflare Communications Inc. 29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a 31 * - The PREEMPT_RT patches mostly deal with this, but also allow a 41 * The self-test should stress every RSS vector. 44 char pad[2]; /* Ensures ip is 4-byte aligned */ 67 [EFX_INT_MODE_MSIX] = "MSI-X", 72 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) 75 * struct efx_loopback_state - persistent state during a loopback selftest [all …]
|
/linux/kernel/locking/ |
H A D | qspinlock_paravirt.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val 17 * pv_kick(cpu) -- wakes a suspended vcpu 31 * mitigates the slight slowdown for non-overcommitted guest with this 32 * aggressive wait-early mechanism. 53 u8 state; member 63 * The pending bit is set by the queue head vCPU of the MCS wait queue in 67 * enter the MCS wait queue. So lock starvation shouldn't happen as long 72 * mode spinning on the lock unless the MCS wait queue is empty. In this 85 * present in the MCS wait queue but the pending bit isn't set. in pv_hybrid_queued_unfair_trylock() [all …]
|
/linux/drivers/gpu/drm/amd/display/dmub/ |
H A D | dmub_srv.h | 36 * This interface is not thread-safe. Ensure that all access to the interface 58 * wait until the queue has been cleared. 61 * This does not clear DMUB hardware state, only software state. 82 /* enum dmub_window_memory_type - memory location type specification for windows */ 88 /* enum dmub_status - return code for dmcub functions */ 99 /* enum dmub_asic - dmub asic identifier */ 121 /* enum dmub_window_id - dmub window identifier */ 135 /* enum dmub_notification_type - dmub outbox notification identifier */ 158 /* enum dmub_memory_access_type - memory access method */ 165 /* enum dmub_power_state type - to track DC power state in dmub_srv */ [all …]
|
/linux/arch/powerpc/platforms/powernv/ |
H A D | opal-async.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * Copyright 2013-2017 IBM Corp. 16 #include <linux/wait.h> 31 enum opal_async_token_state state; member 44 int i, token = -EBUSY; in __opal_async_get_token() 49 if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) { in __opal_async_get_token() 50 opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED; in __opal_async_get_token() 70 /* Wait until a token is available */ in opal_async_get_token_interruptible() 72 return -ERESTARTSYS; in opal_async_get_token_interruptible() 90 return -EINVAL; in __opal_async_release_token() [all …]
|
/linux/drivers/net/wwan/iosm/ |
H A D | iosm_ipc_imem_ops.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * Copyright (C) 2020-21 Intel Corporation. 18 dev_dbg(ipc_imem->dev, "%s if id: %d", in ipc_imem_sys_wwan_open() 19 ipc_imem_phase_get_string(ipc_imem->phase), if_id); in ipc_imem_sys_wwan_open() 23 dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id, in ipc_imem_sys_wwan_open() 24 ipc_imem_phase_get_string(ipc_imem->phase)); in ipc_imem_sys_wwan_open() 25 return -EIO; in ipc_imem_sys_wwan_open() 28 return ipc_mux_open_session(ipc_imem->mux, if_id); in ipc_imem_sys_wwan_open() 35 if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START && in ipc_imem_sys_wwan_close() 37 ipc_mux_close_session(ipc_imem->mux, if_id); in ipc_imem_sys_wwan_close() [all …]
|
H A D | iosm_ipc_pm.h | 1 /* SPDX-License-Identifier: GPL-2.0-only 3 * Copyright (C) 2020-21 Intel Corporation. 18 * union ipc_pm_cond - Conditions for D3 and the sleep message to CP. 22 * @link: Device link state. 35 * enum ipc_mem_host_pm_state - Possible states of the HOST SLEEP finite state 38 * @IPC_MEM_HOST_PM_ACTIVE_WAIT: Intermediate state before going to 40 * @IPC_MEM_HOST_PM_SLEEP_WAIT_IDLE: Intermediate state to wait for idle 42 * @IPC_MEM_HOST_PM_SLEEP_WAIT_D3: Intermediate state to wait for D3 44 * @IPC_MEM_HOST_PM_SLEEP: after this state the interface is not 46 * @IPC_MEM_HOST_PM_SLEEP_WAIT_EXIT_SLEEP: Intermediate state before exiting [all …]
|
/linux/drivers/gpu/drm/renesas/rcar-du/ |
H A D | rcar_du_crtc.h | 1 /* SPDX-License-Identifier: GPL-2.0+ */ 3 * R-Car Display Unit CRTCs 5 * Copyright (C) 2013-2015 Renesas Electronics Corporation 15 #include <linux/wait.h> 26 * struct rcar_du_crtc - the CRTC, representing a DU superposition processor 37 * @flip_wait: wait queue used to signal page flip completion 39 * @vblank_wait: wait queue used to signal vertical blanking 40 * @vblank_count: number of vertical blanking interrupts to wait for 82 * struct rcar_du_crtc_state - Driver-specific CRTC state 83 * @state: base DRM CRTC state [all …]
|
/linux/net/atm/ |
H A D | svc.c | 1 // SPDX-License-Identifier: GPL-2.0 2 /* net/atm/svc.c - ATM SVC sockets */ 4 /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 13 #include <linux/wait.h> 53 DEFINE_WAIT(wait); in svc_disconnect() 58 if (test_bit(ATM_VF_REGIS, &vcc->flags)) { in svc_disconnect() 61 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_disconnect() 62 if (test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) in svc_disconnect() 66 finish_wait(sk_sleep(sk), &wait); in svc_disconnect() 68 /* beware - socket is still in use by atmsigd until the last in svc_disconnect() [all …]
|
/linux/kernel/futex/ |
H A D | requeue.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 15 * and the hash bucket lock blocking would collide and corrupt state. 23 * The following state transitions are valid: 26 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_IGNORE 27 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_WAIT 30 * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_INPROGRESS 31 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_DONE/LOCKED 32 * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_NONE (requeue failed) 33 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_DONE/LOCKED 34 * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_IGNORE (requeue failed) [all …]
|
/linux/drivers/cpuidle/ |
H A D | coupled.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 3 * coupled.c - helper functions to enter the same idle state on multiple cpus 27 * will corrupt the gic state unless the other cpu runs a work 28 * around). Each cpu has a power state that it can enter without 29 * coordinating with the other cpu (usually Wait For Interrupt, or 32 * sometimes the whole SoC). Entering a coupled power state must 35 * This file implements a solution, where each cpu will wait in the 36 * WFI state until all cpus are ready to enter a coupled state, at 37 * which point the coupled state function will be called on all 44 * power state enter function at the same time. During this pass, [all …]
|