| /linux/Documentation/i2c/ |
| H A D | i2c-topology.rst | 40 mux-locked or parent-locked muxes. 43 Mux-locked muxes 46 Mux-locked muxes does not lock the entire parent adapter during the 48 adapter are locked. Mux-locked muxes are mostly interesting if the 50 their tasks. Since the parent adapter is not fully locked during the 55 Mux-locked Example 62 | root |--+--| locked | '--------' 87 Mux-locked caveats 90 When using a mux-locked mux, be aware of the following restrictions: 93 If you build a topology with a mux-locked mux being the parent [all …]
|
| /linux/kernel/locking/ |
| H A D | qspinlock_paravirt.h | 92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock() 124 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 299 if (READ_ONCE(node->locked)) in pv_wait_node() 319 if (!READ_ONCE(node->locked)) { in pv_wait_node() 340 !READ_ONCE(node->locked)); in pv_wait_node() 387 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node() 452 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock() 458 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock() 466 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock() 499 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument [all …]
|
| H A D | mcs_spinlock.h | 53 * on this node->locked until the previous lock holder sets the node->locked 62 node->locked = 0; 74 * Lock acquired, don't need to set node->locked to 1. Threads in mcs_spin_lock() 75 * only spin on its own node->locked value for lock acquisition. in mcs_spin_lock() 77 * and does not proceed to spin on its own node->locked, this in mcs_spin_lock() 79 * audit lock status, then set node->locked value here. in mcs_spin_lock() 86 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 110 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock() 20 int locked; /* 1 if lock acquired */ global() member
|
| H A D | qspinlock.c | 48 * unlock the next pending (next->locked), we compress both these: {tail, 49 * next->locked} into a single u32 value. 145 * Wait for in-progress pending->locked hand-overs with a bounded 191 * store-release that clears the locked bit and create lock 197 smp_cond_load_acquire(&lock->locked, !VAL); in clear_pending() 251 node->locked = 0; 291 arch_mcs_spin_lock_contended(&node->locked); 311 * store-release that clears the locked bit and create lock 318 * been designated yet, there is no way for the locked value to become in queued_spin_lock_slowpath() 326 goto locked; in queued_spin_lock_slowpath() [all...] |
| /linux/tools/testing/selftests/net/forwarding/ |
| H A D | bridge_locked_port.sh | 102 bridge link set dev $swp1 locked on 112 bridge link set dev $swp1 locked off 133 bridge link set dev $swp1 locked on 142 bridge link set dev $swp1 locked off 161 bridge link set dev $swp1 locked on 170 bridge link set dev $swp1 locked off 187 bridge link set dev $swp1 learning on locked on 195 bridge link set dev $swp1 learning on locked on mab on 212 bridge link set dev $swp1 learning off locked off mab off 226 bridge link set dev $swp1 learning on locked on mab on [all …]
|
| /linux/scripts/coccinelle/locks/ |
| H A D | double_lock.cocci | 17 @locked@ 42 position p1 != locked.p1; 43 position locked.p; 45 expression x <= locked.E1; 46 expression E,locked.E1; 65 expression x <= locked.E1; 66 expression locked.E1; 69 position locked.p,p1,p2;
|
| H A D | call_kern.cocci | 39 @locked exists@ 74 @depends on locked && patch@ 81 @depends on locked && !patch@ 90 p1 << locked.p1; 91 p2 << locked.p2; 101 p1 << locked.p1; 102 p2 << locked.p2;
|
| /linux/mm/ |
| H A D | gup.c | 1089 int *locked) in faultin_page() argument 1134 *locked = 0; in faultin_page() 1156 *locked = 0; in faultin_page() 1357 int *locked) in __get_user_pages() argument 1429 PTR_ERR(page) == -EMLINK, locked); in __get_user_pages() 1653 int *locked, in __get_user_pages_locked() argument 1666 if (!*locked) { in __get_user_pages_locked() 1670 *locked = 1; in __get_user_pages_locked() 1693 locked); in __get_user_pages_locked() 1701 VM_WARN_ON_ONCE(!*locked && (ret < 0 || ret >= nr_pages)); in __get_user_pages_locked() [all …]
|
| H A D | mlock.c | 614 unsigned long locked; in do_mlock() local 628 locked = len >> PAGE_SHIFT; in do_mlock() 633 locked += current->mm->locked_vm; in do_mlock() 634 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { in do_mlock() 641 locked -= count_mm_mlocked_page_nr(current->mm, in do_mlock() 646 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) in do_mlock() 793 unsigned long lock_limit, locked; in user_shm_lock() local 797 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_lock() 802 memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); in user_shm_lock() 805 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); in user_shm_lock() [all …]
|
| H A D | compaction.c | 534 unsigned long flags, bool *locked, struct compact_control *cc) in compact_unlock_should_abort() argument 536 if (*locked) { in compact_unlock_should_abort() 538 *locked = false; in compact_unlock_should_abort() 566 bool locked = false; in isolate_freepages_block() local 587 &locked, cc)) in isolate_freepages_block() 615 if (!locked) { in isolate_freepages_block() 616 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block() 651 if (locked) in isolate_freepages_block() 844 struct lruvec *locked = NULL; in isolate_migratepages_block() local 915 if (locked) { in isolate_migratepages_block() [all …]
|
| H A D | mmap_lock.c | 102 int locked; in __vma_start_write() local 104 locked = __vma_enter_locked(vma, false, state); in __vma_start_write() 105 if (locked < 0) in __vma_start_write() 106 return locked; in __vma_start_write() 116 if (locked) { in __vma_start_write()
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_dcb_lib.h | 18 int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); 24 ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); 26 void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked); 28 int ice_init_pf_dcb(struct ice_pf *pf, bool locked); 96 ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) in ice_init_pf_dcb() argument 105 bool __always_unused locked) in ice_pf_dcb_cfg() argument 134 static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { } in ice_pf_dcb_recfg() argument
|
| H A D | ice_dcb_lib.c | 283 static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked) in ice_dcb_ena_dis_vsi() argument 297 ice_ena_vsi(vsi, locked); in ice_dcb_ena_dis_vsi() 299 ice_dis_vsi(vsi, locked); in ice_dcb_ena_dis_vsi() 350 int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) in ice_pf_dcb_cfg() argument 415 if (!locked) in ice_pf_dcb_cfg() 449 if (!locked) in ice_pf_dcb_cfg() 605 static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) in ice_dcb_init_cfg() argument 620 if (ice_pf_dcb_cfg(pf, newcfg, locked)) in ice_dcb_init_cfg() 634 int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) in ice_dcb_sw_dflt_cfg() argument 667 ret = ice_pf_dcb_cfg(pf, dcbcfg, locked); in ice_dcb_sw_dflt_cfg() [all …]
|
| /linux/drivers/media/dvb-frontends/ |
| H A D | lgs8gxx.c | 295 static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_is_locked() argument 308 *locked = ((t & 0x80) == 0x80) ? 1 : 0; in lgs8gxx_is_locked() 310 *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; in lgs8gxx_is_locked() 315 static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_wait_ca_lock() argument 331 *locked = (ret == 0) ? 1 : 0; in lgs8gxx_wait_ca_lock() 359 u8 *locked) in lgs8gxx_autolock_gi() argument 384 err = lgs8gxx_wait_ca_lock(priv, locked); in lgs8gxx_autolock_gi() 385 if (err || !(*locked)) in lgs8gxx_autolock_gi() 393 *locked = 0; in lgs8gxx_autolock_gi() 403 u8 locked = 0, tmp_gi; in lgs8gxx_auto_detect() local [all …]
|
| H A D | atbm8830.c | 147 static int is_locked(struct atbm_state *priv, u8 *locked) in is_locked() argument 153 if (locked != NULL) in is_locked() 154 *locked = (status == 1); in is_locked() 261 u8 locked = 0; in atbm8830_set_fe() local 277 is_locked(priv, &locked); in atbm8830_set_fe() 278 if (locked != 0) { in atbm8830_set_fe() 329 u8 locked = 0; in atbm8830_read_status() local 335 is_locked(priv, &locked); in atbm8830_read_status() 336 if (locked) { in atbm8830_read_status()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | bpf_arena_spin_lock.h | 37 u8 locked; member 52 u8 locked; member 64 int locked; member 204 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in set_locked() 305 (void)smp_cond_load_acquire_label(&lock->locked, !VAL, release_err); in arena_spin_lock_slowpath() 345 node->locked = 0; in arena_spin_lock_slowpath() 383 (void)arch_mcs_spin_lock_contended_label(&node->locked, release_node_err); in arena_spin_lock_slowpath() 446 arch_mcs_spin_unlock_contended(&next->locked); in arena_spin_lock_slowpath() 520 smp_store_release(&lock->locked, 0); in arena_spin_unlock()
|
| /linux/rust/pin-init/examples/ |
| H A D | mutex.rs | 71 locked: Cell<bool>, field 82 locked: Cell::new(false), in new() 94 if self.locked.get() { in lock() 97 while self.locked.get() { in lock() 107 self.locked.set(true); in lock() 135 self.mtx.locked.set(false); in drop()
|
| /linux/drivers/watchdog/ |
| H A D | intel_oc_wdt.c | 45 bool locked; member 62 if (oc_wdt->locked) in intel_oc_wdt_start() 126 oc_wdt->locked = !!(val & INTEL_OC_WDT_CTL_LCK); in intel_oc_wdt_setup() 136 if (oc_wdt->locked) { in intel_oc_wdt_setup() 153 } else if (oc_wdt->locked) { in intel_oc_wdt_setup()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-platform-intel-pmc | 15 * bit 31 - global reset is locked 20 in case the register is not locked. 21 The "global reset bit" should be locked on a production
|
| /linux/drivers/gpu/drm/i915/ |
| H A D | i915_scheduler.c | 130 struct i915_sched_engine *locked, in lock_sched_engine() argument 136 GEM_BUG_ON(!locked); in lock_sched_engine() 144 while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) { in lock_sched_engine() 145 spin_unlock(&locked->lock); in lock_sched_engine() 148 locked = sched_engine; in lock_sched_engine() 151 GEM_BUG_ON(locked != sched_engine); in lock_sched_engine() 152 return locked; in lock_sched_engine()
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | mock_engine.c | 264 struct intel_engine_cs *engine, *locked; in mock_remove_from_engine() local 273 locked = READ_ONCE(rq->engine); in mock_remove_from_engine() 274 spin_lock_irq(&locked->sched_engine->lock); in mock_remove_from_engine() 275 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { in mock_remove_from_engine() 276 spin_unlock(&locked->sched_engine->lock); in mock_remove_from_engine() 278 locked = engine; in mock_remove_from_engine() 281 spin_unlock_irq(&locked->sched_engine->lock); in mock_remove_from_engine()
|
| /linux/tools/testing/selftests/mm/ |
| H A D | mlock2-tests.c | 133 bool locked; in is_vma_lock_on_fault() local 136 locked = is_vmflag_set(addr, LOCKED); in is_vma_lock_on_fault() 137 if (!locked) in is_vma_lock_on_fault() 153 bool locked; in lock_check() local 156 locked = is_vmflag_set(addr, LOCKED); in lock_check() 157 if (!locked) in lock_check()
|
| /linux/drivers/staging/vme_user/ |
| H A D | vme.c | 277 (slave_image->locked == 0)) { in vme_slave_request() 278 slave_image->locked = 1; in vme_slave_request() 302 slave_image->locked = 0; in vme_slave_request() 422 if (slave_image->locked == 0) in vme_slave_free() 425 slave_image->locked = 0; in vme_slave_free() 472 (master_image->locked == 0)) { in vme_master_request() 473 master_image->locked = 1; in vme_master_request() 499 master_image->locked = 0; in vme_master_request() 796 if (master_image->locked == 0) in vme_master_free() 799 master_image->locked = 0; in vme_master_free() [all …]
|
| /linux/tools/perf/util/ |
| H A D | disasm.c | 529 ops->locked.ops = zalloc(sizeof(*ops->locked.ops)); in lock__parse() 530 if (ops->locked.ops == NULL) in lock__parse() 533 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0) in lock__parse() 536 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name, 0); in lock__parse() 538 if (ops->locked.ins.ops == NULL) in lock__parse() 541 if (ops->locked.ins.ops->parse && in lock__parse() 542 ops->locked.ins.ops->parse(arch, ops->locked.ops, ms, NULL) < 0) in lock__parse() 548 zfree(&ops->locked.ops); in lock__parse() 557 if (ops->locked.ins.ops == NULL) in lock__scnprintf() 561 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, in lock__scnprintf() [all …]
|
| /linux/arch/powerpc/include/asm/ |
| H A D | qspinlock_types.h | 14 u16 locked; member 20 u16 locked; member
|