/linux/fs/bcachefs/ |
H A D | six.c | 1 // SPDX-License-Identifier: GPL-2.0 14 #include <trace/events/lock.h> 27 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type); 38 /* Value we add to the lock in order to take the lock: */ 41 /* If the lock has this value (used as a mask), taking the lock fails: */ 44 /* Mask that indicates lock is held for this type: */ 47 /* Waitlist we wakeup when releasing the lock: */ 72 static inline void six_set_bitmask(struct six_lock *lock, u32 mask) in six_set_bitmask() argument 74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask() 75 atomic_or(mask, &lock->state); in six_set_bitmask() [all …]
|
H A D | six.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 10 * but with an additional state: read/shared, intent, exclusive/write 12 * The purpose of the intent state is to allow for greater concurrency on tree 14 * write lock without deadlocking, so an operation that updates multiple nodes 17 * But by adding an intent state, which is exclusive with other intent locks but 23 * six_lock_read(&foo->lock); 24 * six_unlock_read(&foo->lock); 26 * An intent lock must be held before taking a write lock: 27 * six_lock_intent(&foo->lock); 28 * six_lock_write(&foo->lock); [all …]
|
/linux/drivers/md/dm-vdo/ |
H A D | dedupe.c | 1 // SPDX-License-Identifier: GPL-2.0-only 14 * deduplicate against a single block instead of being serialized through a PBN read lock. Only one 19 * to that zone. The concurrency guarantees of this single-threaded model allow the code to omit 20 * more fine-grained locking for the hash_lock structures. 22 * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and 23 * ending states INITIALIZING and BYPASSING, every state represents and is held for the duration of 24 * an asynchronous operation. All state transitions are performed on the thread of the hash_zone 25 * containing the lock. An asynchronous operation is almost always performed upon entering a state, 26 * and the callback from that operation triggers exiting the state and entering a new state. 28 * In all states except DEDUPING, there is a single data_vio, called the lock agent, performing the [all …]
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_task.c | 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 15 return -ENOMEM; in rxe_alloc_wq() 29 * state will move out of busy if task returns a non zero value 30 * in do_task(). If state is already busy it is raised to armed 33 * Context: caller should hold task->lock. 34 * Returns: true if state transitioned from idle to busy else false. 38 WARN_ON(rxe_read(task->qp) <= 0); in __reserve_if_idle() 40 if (task->state == TASK_STATE_IDLE) { in __reserve_if_idle() 41 rxe_get(task->qp); in __reserve_if_idle() 42 task->state = TASK_STATE_BUSY; in __reserve_if_idle() [all …]
|
/linux/net/ncsi/ |
H A D | ncsi-manage.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 22 #include "ncsi-pkt.h" 23 #include "ncsi-netlink.h" 30 return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1); in ncsi_channel_has_link() 43 if (nc->state == NCSI_CHANNEL_ACTIVE && in ncsi_channel_is_last() 53 struct ncsi_dev *nd = &ndp->ndev; in ncsi_report_link() 58 nd->state = ncsi_dev_state_functional; in ncsi_report_link() 60 nd->link_up = 0; in ncsi_report_link() 64 nd->link_up = 0; in ncsi_report_link() 67 spin_lock_irqsave(&nc->lock, flags); in ncsi_report_link() [all …]
|
/linux/kernel/locking/ |
H A D | qspinlock_paravirt.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val 17 * pv_kick(cpu) -- wakes a suspended vcpu 30 * not running. The one lock stealing attempt allowed at slowpath entry 31 * mitigates the slight slowdown for non-overcommitted guest with this 32 * aggressive wait-early mechanism. 53 u8 state; member 57 * Hybrid PV queued/unfair lock 60 * it will be called once when a lock waiter enter the PV slowpath before 64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock. [all …]
|
/linux/drivers/hid/ |
H A D | hid-wiimote-core.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 4 * Copyright (c) 2011-2013 David Herrmann <dh.herrmann@gmail.com> 17 #include "hid-ids.h" 18 #include "hid-wiimote.h" 28 if (!hdev->ll_driver->output_report) in wiimote_hid_send() 29 return -ENODEV; in wiimote_hid_send() 33 return -ENOMEM; in wiimote_hid_send() 50 spin_lock_irqsave(&wdata->queue.lock, flags); in wiimote_queue_worker() 52 while (wdata->queue.head != wdata->queue.tail) { in wiimote_queue_worker() 53 spin_unlock_irqrestore(&wdata->queue.lock, flags); in wiimote_queue_worker() [all …]
|
H A D | hid-wiimote-debug.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 4 * Copyright (c) 2011-2013 David Herrmann <dh.herrmann@gmail.com> 15 #include "hid-wiimote.h" 26 struct wiimote_debug *dbg = f->private_data; in wiidebug_eeprom_read() 27 struct wiimote_data *wdata = dbg->wdata; in wiidebug_eeprom_read() 34 return -EINVAL; in wiidebug_eeprom_read() 44 spin_lock_irqsave(&wdata->state.lock, flags); in wiidebug_eeprom_read() 45 wdata->state.cmd_read_size = s; in wiidebug_eeprom_read() 46 wdata->state.cmd_read_buf = buf; in wiidebug_eeprom_read() 49 spin_unlock_irqrestore(&wdata->state.lock, flags); in wiidebug_eeprom_read() [all …]
|
/linux/drivers/regulator/ |
H A D | rpi-panel-attiny-regulator.c | 1 // SPDX-License-Identifier: GPL-2.0 64 /* lock to serialise overall accesses to the Atmel */ 65 struct mutex lock; member 81 static int attiny_set_port_state(struct attiny_lcd *state, int reg, u8 val) in attiny_set_port_state() argument 83 state->port_states[reg - REG_PORTA] = val; in attiny_set_port_state() 84 return regmap_write(state->regmap, reg, val); in attiny_set_port_state() 87 static u8 attiny_get_port_state(struct attiny_lcd *state, int reg) in attiny_get_port_state() argument 89 return state->port_states[reg - REG_PORTA]; in attiny_get_port_state() 94 struct attiny_lcd *state = rdev_get_drvdata(rdev); in attiny_lcd_power_enable() local 96 mutex_lock(&state->lock); in attiny_lcd_power_enable() [all …]
|
/linux/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_mailbox.c | 7 * Copyright (c) 2003-2016 Cavium, Inc. 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 33 * Reads the 8-bytes of data from the mbox register 41 spin_lock(&mbox->lock); in octeon_mbox_read() 43 msg.u64 = readq(mbox->mbox_read_reg); in octeon_mbox_read() 46 spin_unlock(&mbox->lock); in octeon_mbox_read() 50 if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { in octeon_mbox_read() 51 mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = msg.u64; in octeon_mbox_read() 52 mbox->mbox_req.recv_len++; in octeon_mbox_read() 54 if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { in octeon_mbox_read() [all …]
|
/linux/fs/ocfs2/dlm/ |
H A D | dlmthread.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 40 /* will exit holding res->spinlock, but may drop in function */ 41 /* waits until flags are cleared on res->state */ 46 assert_spin_locked(&res->spinlock); in __dlm_wait_on_lockres_flags() 48 add_wait_queue(&res->wq, &wait); in __dlm_wait_on_lockres_flags() 51 if (res->state & flags) { in __dlm_wait_on_lockres_flags() 52 spin_unlock(&res->spinlock); in __dlm_wait_on_lockres_flags() 54 spin_lock(&res->spinlock); in __dlm_wait_on_lockres_flags() 57 remove_wait_queue(&res->wq, &wait); in __dlm_wait_on_lockres_flags() 63 if (list_empty(&res->granted) && in __dlm_lockres_has_locks() [all …]
|
/linux/drivers/media/mc/ |
H A D | mc-request.c | 1 // SPDX-License-Identifier: GPL-2.0 17 #include <media/media-device.h> 18 #include <media/media-request.h> 30 media_request_state_str(enum media_request_state state) in media_request_state_str() argument 34 if (WARN_ON(state >= ARRAY_SIZE(request_state))) in media_request_state_str() 36 return request_state[state]; in media_request_state_str() 44 WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); in media_request_clean() 45 WARN_ON(req->updating_count); in media_request_clean() 46 WARN_ON(req->access_count); in media_request_clean() 48 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { in media_request_clean() [all …]
|
/linux/Documentation/locking/ |
H A D | locktypes.rst | 1 .. SPDX-License-Identifier: GPL-2.0 6 Lock types and their rules 15 - Sleeping locks 16 - CPU local locks 17 - Spinning locks 19 This document conceptually describes these lock types and provides rules 23 Lock categories 27 -------------- 37 Sleeping lock types: 39 - mutex [all …]
|
/linux/drivers/media/dvb-frontends/ |
H A D | stb0899_algo.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 48 static u32 stb0899_get_srate(struct stb0899_state *state) 50 struct stb0899_internal *internal = &state->internal; 53 stb0899_read_regs(state, STB0899_SFRH, sfr, 3); 55 return stb0899_calc_srate(internal->master_clk, sfr); 66 static u32 stb0899_set_srate(struct stb0899_state *state, u32 master_clk, u32 srate) in stb0899_set_srate() argument 71 dprintk(state->verbose, FE_DEBUG, 1, "-->"); in stb0899_set_srate() 96 stb0899_write_regs(state, STB0899_SFRH, sfr, 3); in stb0899_set_srate() 103 * Compute the amount of time needed by the derotator to lock 120 long stb0899_carr_width(struct stb0899_state *state) in stb0899_carr_width() argument [all …]
|
/linux/drivers/iio/chemical/ |
H A D | sps30.c | 1 // SPDX-License-Identifier: GPL-2.0 43 /* this is fine since passed float is always non-negative */ in sps30_float_to_int_clamped() 51 exp -= 127; in sps30_float_to_int_clamped() 54 return ((((1 << 23) + mantissa) * 100) >> 23) >> (-exp); in sps30_float_to_int_clamped() 58 shift = 23 - exp; in sps30_float_to_int_clamped() 63 fraction = mantissa & GENMASK(shift - 1, 0); in sps30_float_to_int_clamped() 68 static int sps30_do_meas(struct sps30_state *state, s32 *data, int size) in sps30_do_meas() argument 72 if (state->state == RESET) { in sps30_do_meas() 73 ret = state->ops->start_meas(state); in sps30_do_meas() 77 state->state = MEASURING; in sps30_do_meas() [all …]
|
H A D | scd4x.c | 1 // SPDX-License-Identifier: GPL-2.0 67 struct mutex lock; member 73 static int scd4x_i2c_xfer(struct scd4x_state *state, char *txbuf, int txsize, in scd4x_i2c_xfer() argument 76 struct i2c_client *client = state->client; in scd4x_i2c_xfer() 84 return -EIO; in scd4x_i2c_xfer() 93 return -EIO; in scd4x_i2c_xfer() 98 static int scd4x_send_command(struct scd4x_state *state, enu argument 131 scd4x_read(struct scd4x_state * state,enum scd4x_cmd cmd,void * response,int response_sz) scd4x_read() argument 184 scd4x_write(struct scd4x_state * state,enum scd4x_cmd cmd,uint16_t arg) scd4x_write() argument 220 scd4x_write_and_fetch(struct scd4x_state * state,enum scd4x_cmd cmd,uint16_t arg,void * response,int response_sz) scd4x_write_and_fetch() argument 271 scd4x_read_meas(struct scd4x_state * state,uint16_t * meas) scd4x_read_meas() argument 286 scd4x_wait_meas_poll(struct scd4x_state * state) scd4x_wait_meas_poll() argument 316 scd4x_read_poll(struct scd4x_state * state,uint16_t * buf) scd4x_read_poll() argument 327 scd4x_read_channel(struct scd4x_state * state,int chan) scd4x_read_channel() argument 343 struct scd4x_state *state = iio_priv(indio_dev); scd4x_read_raw() local 431 struct scd4x_state *state = iio_priv(indio_dev); scd4x_write_raw() local 465 struct scd4x_state *state = iio_priv(indio_dev); calibration_auto_enable_show() local 488 struct scd4x_state *state = iio_priv(indio_dev); calibration_auto_enable_store() local 513 struct scd4x_state *state = iio_priv(indio_dev); calibration_forced_value_store() local 624 struct scd4x_state *state = iio_priv(indio_dev); scd4x_suspend() local 637 struct scd4x_state *state = iio_priv(indio_dev); scd4x_resume() local 649 scd4x_stop_meas(void * state) scd4x_stop_meas() argument 656 struct scd4x_state *state = data; scd4x_disable_regulator() local 665 struct scd4x_state *state = iio_priv(indio_dev); scd4x_trigger_handler() local 690 struct scd4x_state *state; scd4x_probe() local [all...] |
/linux/fs/btrfs/ |
H A D | extent-io-tree.c | 1 // SPDX-License-Identifier: GPL-2.0 8 #include "extent-io-tree.h" 13 static inline bool extent_state_in_tree(const struct extent_state *state) in extent_state_in_tree() argument 15 return !RB_EMPTY_NODE(&state->rb_node); in extent_state_in_tree() 22 static inline void btrfs_leak_debug_add_state(struct extent_state *state) in btrfs_leak_debug_add_state() argument 27 list_add(&state->leak_list, &states); in btrfs_leak_debug_add_state() 31 static inline void btrfs_leak_debug_del_state(struct extent_state *state) in btrfs_leak_debug_del_state() argument 36 list_del(&state->leak_list); in btrfs_leak_debug_del_state() 42 struct extent_state *state; in btrfs_extent_state_leak_debug_check() local 45 state = list_entry(states.next, struct extent_state, leak_list); in btrfs_extent_state_leak_debug_check() [all …]
|
/linux/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | l2t.c | 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. 14 * - Redistributions of source code must retain the above 18 * - Redistributions in binary form must reproduce the above 50 * Module locking notes: There is a RW lock protecting the L2 table as a 52 * under the protection of the table lock, individual entry changes happen 53 * while holding that entry's spinlock. The table lock nests outside the 54 * entry locks. Allocations of new entries take the table lock as writers so 56 * take the table lock as readers so multiple entries can be updated in 59 * can change state or increment its ref count during allocation as both of 65 return e->vlan >> 13; in vlan_prio() [all …]
|
/linux/fs/afs/ |
H A D | flock.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state) in afs_set_lock_state() argument 27 _debug("STATE %u -> %u", vnode->lock_state, state); in afs_set_lock_state() 28 vnode->lock_state = state; in afs_set_lock_state() 34 * if the callback is broken on this vnode, then the lock may now be available 38 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_lock_may_be_available() 40 spin_lock(&vnode->lock); in afs_lock_may_be_available() 41 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) in afs_lock_may_be_available() 44 spin_unlock(&vnode->lock); in afs_lock_may_be_available() 48 * the lock will time out in 5 minutes unless we extend it, so schedule [all …]
|
/linux/drivers/usb/gadget/legacy/ |
H A D | inode.c | 1 // SPDX-License-Identifier: GPL-2.0+ 3 * inode.c -- user mode filesystem api for usb gadget controllers 5 * Copyright (C) 2003-2004 David Brownell 50 * Key parts that must be USB-specific are protocols defining how the 51 * read/write operations relate to the hardware state machines. There 56 * - First, dev_config() is called when /dev/gadget/$CHIP is configured 61 * - The 122 spinlock_t lock; global() member 125 enum ep0_state state; /* P: lock */ global() member 198 struct mutex lock; global() member 199 enum ep_state state; global() member 918 enum ep0_state state; ep0_read() local [all...] |
/linux/net/bridge/ |
H A D | br_stp.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 29 void br_set_state(struct net_bridge_port *p, unsigned int state) in br_set_state() argument 32 .orig_dev = p->dev, in br_set_state() 35 .u.stp_state = state, in br_set_state() 39 /* Don't change the state of the ports if they are driven by a different in br_set_state() 42 if (p->flags & BR_MRP_AWARE) in br_set_state() 45 p->state = state; in br_set_state() 46 if (br_opt_get(p->br, BROPT_MST_ENABLED)) { in br_set_state() 47 err = br_mst_set_state(p, 0, state, NULL); in br_set_state() 49 br_warn(p->br, "error setting MST state on port %u(%s)\n", in br_set_state() [all …]
|
/linux/kernel/sched/ |
H A D | wait.c | 1 // SPDX-License-Identifier: GPL-2.0-only 10 spin_lock_init(&wq_head->lock); in __init_waitqueue_head() 11 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head() 12 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head() 21 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue() 22 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue() 24 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue() 32 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive() 33 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive() 35 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive() [all …]
|
H A D | completion.c | 1 // SPDX-License-Identifier: GPL-2.0 4 * Generic wait-for-completion handler; 7 * wait_for_completion default blocks whereas semaphore default non-block. The 20 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete_with_flags() 22 if (x->done != UINT_MAX) in complete_with_flags() 23 x->done++; in complete_with_flags() 24 swake_up_locked(&x->wait, wake_flags); in complete_with_flags() 25 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete_with_flags() 34 * complete: - signals a single thread waiting on this completion 35 * @x: holds the state of this particular completion [all …]
|
/linux/drivers/platform/surface/aggregator/ |
H A D | ssh_request_layer.c | 1 // SPDX-License-Identifier: GPL-2.0+ 5 * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com> 11 #include <linux/error-injection.h> 29 * SSH_RTL_REQUEST_TIMEOUT - Request timeout. 32 * response in this time-frame after finishing the underlying packet 33 * transmission, the request will be completed with %-ETIMEDOUT as status 39 * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity. 41 * Time-resolution for timeouts. Should be larger than one jiffy to avoid 42 * direct re-scheduling of reaper work_struct. 47 * SSH_RTL_MAX_PENDING - Maximum number of pending requests. [all …]
|
/linux/drivers/infiniband/core/ |
H A D | iwcm.c | 19 * - Redistributions of source code must retain the above 23 * - Redistributions in binary form must reproduce the above 38 #include <linux/dma-mapping.h> 73 index = -reason; in iwcm_reject_msg() 117 * The following services provide a mechanism for pre-allocating iwcm_work 118 * elements. The design pre-allocates them based on the cm_id type: 132 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up 144 if (list_empty(&cm_id_priv->work_free_list)) in get_work() 146 work = list_first_entry(&cm_id_priv->work_free_list, struct iwcm_work, in get_work() 148 list_del_init(&work->free_list); in get_work() [all …]
|