/linux/tools/testing/selftests/net/forwarding/ |
H A D | bridge_locked_port.sh | 102 bridge link set dev $swp1 locked on 112 bridge link set dev $swp1 locked off 118 log_test "Locked port ipv4" 133 bridge link set dev $swp1 locked on 142 bridge link set dev $swp1 locked off 150 log_test "Locked port vlan" 161 bridge link set dev $swp1 locked on 170 bridge link set dev $swp1 locked off 176 log_test "Locked port ipv6" 187 bridge link set dev $swp1 learning on locked on [all …]
|
/linux/Documentation/i2c/ |
H A D | i2c-topology.rst | 40 mux-locked or parent-locked muxes. 43 Mux-locked muxes 46 Mux-locked muxes does not lock the entire parent adapter during the 48 adapter are locked. Mux-locked muxes are mostly interesting if the 50 their tasks. Since the parent adapter is not fully locked during the 55 Mux-locked Example 62 | root |--+--| locked | '--------' 87 Mux-locked caveats 90 When using a mux-locked mux, be aware of the following restrictions: 93 If you build a topology with a mux-locked mux being the parent [all …]
|
/linux/include/drm/ |
H A D | drm_exec.h | 29 * @num_objects: number of objects locked 39 * @objects: array of the locked objects 49 * @prelocked: already locked GEM object due to contention 59 * Return: Pointer to the locked object corresponding to @index if 60 * index is within the number of locked objects. NULL otherwise. 69 * drm_exec_for_each_locked_object - iterate over all the locked objects 74 * Iterate over all the locked GEM objects inside the drm_exec object. 80 * drm_exec_for_each_locked_object_reverse - iterate over all the locked 86 * Iterate over all the locked GEM objects inside the drm_exec object in 95 * drm_exec_until_all_locked - loop until all GEM objects are locked [all …]
|
/linux/kernel/locking/ |
H A D | qspinlock_paravirt.h | 92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock() 124 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 142 * Try to clear pending bit & set locked bit in trylock_clear_pending() 286 * Wait for node->locked to become true, halt the vcpu after a short spin. 299 if (READ_ONCE(node->locked)) in pv_wait_node() 309 * Order pn->state vs pn->locked thusly: in pv_wait_node() 311 * [S] pn->state = VCPU_HALTED [S] next->locked = 1 in pv_wait_node() 313 * [L] pn->locked [RmW] pn->state = VCPU_HASHED in pv_wait_node() 319 if (!READ_ONCE(node->locked)) { in pv_wait_node() 333 * If the locked flag is still not set after wakeup, it is a in pv_wait_node() [all …]
|
H A D | mcs_spinlock.h | 20 int locked; /* 1 if lock acquired */ member 61 * on this node->locked until the previous lock holder sets the node->locked 70 node->locked = 0; in mcs_spin_lock() 82 * Lock acquired, don't need to set node->locked to 1. Threads in mcs_spin_lock() 83 * only spin on its own node->locked value for lock acquisition. in mcs_spin_lock() 85 * and does not proceed to spin on its own node->locked, this in mcs_spin_lock() 87 * audit lock status, then set node->locked value here. in mcs_spin_lock() 94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock()
|
/linux/scripts/coccinelle/locks/ |
H A D | double_lock.cocci | 17 @locked@ 42 position p1 != locked.p1; 43 position locked.p; 45 expression x <= locked.E1; 46 expression E,locked.E1; 65 expression x <= locked.E1; 66 expression locked.E1; 69 position locked.p,p1,p2;
|
H A D | call_kern.cocci | 39 @locked exists@ 74 @depends on locked && patch@ 81 @depends on locked && !patch@ 90 p1 << locked.p1; 91 p2 << locked.p2; 101 p1 << locked.p1; 102 p2 << locked.p2;
|
/linux/drivers/mtd/spi-nor/ |
H A D | otp.c | 188 /* no need to write the register if region is already locked */ in spi_nor_otp_lock_sr2() 253 int ret, locked; in spi_nor_mtd_otp_info() local 266 locked = ops->is_locked(nor, i); in spi_nor_mtd_otp_info() 267 if (locked < 0) { in spi_nor_mtd_otp_info() 268 ret = locked; in spi_nor_mtd_otp_info() 272 buf->locked = !!locked; in spi_nor_mtd_otp_info() 289 int locked; in spi_nor_mtd_otp_range_is_locked() local 292 * If any of the affected OTP regions are locked the entire range is in spi_nor_mtd_otp_range_is_locked() 293 * considered locked. in spi_nor_mtd_otp_range_is_locked() 298 locked = ops->is_locked(nor, region); in spi_nor_mtd_otp_range_is_locked() [all …]
|
H A D | swp.c | 89 * Return true if the entire region is locked (if @locked is true) or unlocked 90 * (if @locked is false); false otherwise. 93 u64 len, u8 sr, bool locked) in spi_nor_check_lock_status_sr() argument 106 if (locked) in spi_nor_check_lock_status_sr() 107 /* Requested range is a sub-range of locked range */ in spi_nor_check_lock_status_sr() 110 /* Requested range does not overlap with locked range */ in spi_nor_check_lock_status_sr() 195 /* lock_len: length of region that should end up locked */ in spi_nor_sr_lock() 266 /* If nothing in our range is locked, we don't need to do anything */ in spi_nor_sr_unlock() 270 /* If anything below us is locked, we can't use 'top' protection */ in spi_nor_sr_unlock() 274 /* If anything above us is locked, we can't use 'bottom' protection */ in spi_nor_sr_unlock() [all …]
|
/linux/tools/testing/selftests/mm/ |
H A D | mlock2-tests.c | 92 #define LOCKED "lo" in is_vmflag_set() 133 bool locked; in get_value_for_name() 136 locked = is_vmflag_set(addr, LOCKED); in get_value_for_name() 137 if (!locked) in get_value_for_name() 153 bool locked; in is_vma_lock_on_fault() 156 locked = is_vmflag_set(addr, LOCKED); in is_vma_lock_on_fault() 157 if (!locked) in is_vma_lock_on_fault() 168 if (is_vmflag_set((unsigned long)map, LOCKED)) { in lock_check() 105 #define LOCKED global() macro 146 bool locked; is_vma_lock_on_fault() local 166 bool locked; lock_check() local [all...] |
/linux/drivers/gpu/drm/ |
H A D | drm_exec.c | 15 * unlocks all previously locked GEM objects and locks the contended one first 18 * After an object is locked fences slots can optionally be reserved on the 74 * Initialize the object and make sure that we can track locked objects. 99 * Unlock all locked objects, drop the references to objects and free all memory 119 * objects locked. 140 /* Track the locked object in the array */ 161 /* Make sure the contended object is locked first */ 204 * already locked (can be suppressed by setting the DRM_EXEC_IGNORE_DUPLICATES 256 * Unlock the GEM object and remove it from the collection of locked objects. 257 * Should only be used to unlock the most recently locked objects. It's not time [all …]
|
/linux/include/linux/ |
H A D | hwspinlock.h | 178 * immediately fail if the hwspinlock is already locked. 185 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 199 * immediately fail if the hwspinlock is already locked. 205 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 224 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 241 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 261 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 271 * @hwlock: the hwspinlock to be locked 295 * @hwlock: the hwspinlock to be locked 318 * @hwlock: the hwspinlock to be locked [all …]
|
H A D | ww_mutex.h | 69 * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex. 71 * lockdep requires the lockdep_map for the first locked ww_mutex 74 * fake locked ww_mutex lockdep map between ww_acquire_init() and 106 * It is not allowed to initialize an already locked mutex. 178 * code and clearly designated the acquire phase from actually using the locked 242 * locked. The mutex must first be initialized (or statically defined) before it 243 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 274 * locked. The mutex must first be initialized (or statically defined) before it 275 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 362 * use of the mutex is forbidden. The mutex must not be locked when [all …]
|
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_dcb_lib.h | 18 int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); 24 ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); 26 void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked); 28 int ice_init_pf_dcb(struct ice_pf *pf, bool locked); 93 ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) in ice_init_pf_dcb() argument 102 bool __always_unused locked) in ice_pf_dcb_cfg() argument 131 static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { } in ice_pf_dcb_recfg() argument
|
/linux/drivers/media/dvb-frontends/ |
H A D | lgs8gxx.c | 295 static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_is_locked() argument 308 *locked = ((t & 0x80) == 0x80) ? 1 : 0; in lgs8gxx_is_locked() 310 *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; in lgs8gxx_is_locked() 315 static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_wait_ca_lock() argument 331 *locked = (ret == 0) ? 1 : 0; in lgs8gxx_wait_ca_lock() 359 u8 *locked) in lgs8gxx_autolock_gi() argument 384 err = lgs8gxx_wait_ca_lock(priv, locked); in lgs8gxx_autolock_gi() 385 if (err || !(*locked)) in lgs8gxx_autolock_gi() 393 *locked = 0; in lgs8gxx_autolock_gi() 403 u8 locked = 0, tmp_gi; in lgs8gxx_auto_detect() local [all …]
|
/linux/fs/btrfs/ |
H A D | subpage.c | 98 * and doesn't need to be locked. in btrfs_attach_subpage() 244 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); in btrfs_subpage_end_and_test_lock() 258 * This @locked_page is locked by plain lock_page(), thus its in btrfs_subpage_end_and_test_lock() 259 * subpage::locked is 0. Handle them in a special way. in btrfs_subpage_end_and_test_lock() 277 * Handle different locked folios: 282 * - folio locked but without any subpage locked 287 * - folio locked with subpage range locked. 288 * We go through the locked sectors inside the range and clear their locked 290 * the last locked range. 305 * For subpage case, there are two types of locked page. With or in btrfs_folio_end_lock() [all …]
|
/linux/include/asm-generic/ |
H A D | qspinlock_types.h | 25 u8 locked; member 40 u8 locked; member 55 * 0- 7: locked byte 62 * 0- 7: locked byte 71 #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
|
/linux/drivers/media/common/siano/ |
H A D | smscoreapi.h | 677 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ 678 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 802 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ 803 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 816 u32 modem_state; /* 0 - Acquisition, 1 - Locked */ 844 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ 845 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 858 u32 modem_state; /* 0 - Acquisition, 1 - Locked */ 942 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 946 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ [all …]
|
/linux/Documentation/filesystems/ |
H A D | directory-locking.rst | 44 * decide which of the source and target need to be locked. 45 The source needs to be locked if it's a non-directory, target - if it's 49 are non-directories - the source because it wouldn't need to be locked 66 to be read, modified or removed by method will be locked by the caller. 89 dcache trees. Lookup is already holding the parent locked. If alias is 102 all we change is the view in dcache. Moreover, holding a directory locked 185 Each operation in the minimal cycle must have locked at least 204 In other words, we have a cross-directory rename that locked 213 Consider the order in which directories are locked by the 215 Dn and D1 would have to be among those, with Dn locked before D1. [all …]
|
/linux/mm/ |
H A D | gup.c | 1154 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set 1159 int *locked) in faultin_page() argument 1204 *locked = 0; in faultin_page() 1226 *locked = 0; in faultin_page() 1376 * @locked: whether we're still with the mmap_lock held 1411 * be released. If this happens *@locked will be set to 0 on return. 1424 int *locked) in __get_user_pages() argument 1492 PTR_ERR(page) == -EMLINK, locked); in __get_user_pages() 1702 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by 1704 * set (*locked = 0). [all …]
|
/linux/arch/arc/include/asm/ |
H A D | spinlock.h | 23 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ in arch_spin_lock() 24 " scond %[LOCKED], [%[slock]] \n" /* acquire */ in arch_spin_lock() 29 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) in arch_spin_lock() 50 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ in arch_spin_trylock() 51 " scond %[LOCKED], [%[slock]] \n" /* acquire */ in arch_spin_trylock() 59 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) in arch_spin_trylock() 95 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ in arch_read_lock() 115 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ in arch_read_trylock()
|
/linux/Documentation/devicetree/bindings/i2c/ |
H A D | i2c-mux-gpmux.yaml | 49 mux-locked: 63 If mux-locked is not present, the multiplexer is assumed to be parent-locked. 66 The properties of mux-locked and parent-locked multiplexers are discussed 89 mux-locked;
|
/linux/drivers/video/fbdev/ |
H A D | ep93xx-fb.c | 27 #define EP93XXFB_VLINES_TOTAL 0x0000 /* SW locked */ 28 #define EP93XXFB_VSYNC 0x0004 /* SW locked */ 29 #define EP93XXFB_VACTIVE 0x0008 /* SW locked */ 30 #define EP93XXFB_VBLANK 0x0228 /* SW locked */ 31 #define EP93XXFB_VCLK 0x000c /* SW locked */ 34 #define EP93XXFB_HCLKS_TOTAL 0x0010 /* SW locked */ 35 #define EP93XXFB_HSYNC 0x0014 /* SW locked */ 36 #define EP93XXFB_HACTIVE 0x0018 /* SW locked */ 37 #define EP93XXFB_HBLANK 0x022c /* SW locked */ 38 #define EP93XXFB_HCLK 0x001c /* SW locked */ [all …]
|
/linux/tools/perf/pmu-events/arch/x86/broadwellde/ |
H A D | other.json | 32 "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock", 36 …L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory a…
|
/linux/tools/perf/pmu-events/arch/x86/broadwellx/ |
H A D | other.json | 32 "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock", 36 …L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory a…
|