/linux/tools/testing/selftests/net/forwarding/ |
H A D | bridge_locked_port.sh | 102 bridge link set dev $swp1 locked on 112 bridge link set dev $swp1 locked off 118 log_test "Locked port ipv4" 133 bridge link set dev $swp1 locked on 142 bridge link set dev $swp1 locked off 150 log_test "Locked port vlan" 161 bridge link set dev $swp1 locked on 170 bridge link set dev $swp1 locked off 176 log_test "Locked port ipv6" 187 bridge link set dev $swp1 learning on locked on [all …]
|
/linux/Documentation/i2c/ |
H A D | i2c-topology.rst | 40 mux-locked or parent-locked muxes. 43 Mux-locked muxes 46 Mux-locked muxes does not lock the entire parent adapter during the 48 adapter are locked. Mux-locked muxes are mostly interesting if the 50 their tasks. Since the parent adapter is not fully locked during the 55 Mux-locked Example 62 | root |--+--| locked | '--------' 87 Mux-locked caveats 90 When using a mux-locked mux, be aware of the following restrictions: 93 If you build a topology with a mux-locked mux being the parent [all …]
|
/linux/fs/resctrl/ |
H A D | pseudo_lock.c | 29 * pseudo-locked regions. 86 * @minor: The minor number of the device representing pseudo-locked region 89 * pseudo-locked region it belongs to. This is done by matching the minor 90 * number of the device to the pseudo-locked region it belongs. 92 * Minor numbers are assigned at the time a pseudo-locked region is associated 95 * Return: On success return pointer to resource group owning the pseudo-locked 113 * @list: Entry within the @pm_reqs list for a pseudo-locked region 134 * @plr: Pseudo-locked region 188 * All content of the pseudo-locked region is reset - any memory allocated 211 * Called after user provided a schemata to be pseudo-locked. From the [all …]
|
/linux/include/drm/ |
H A D | drm_exec.h | 29 * @num_objects: number of objects locked 39 * @objects: array of the locked objects 49 * @prelocked: already locked GEM object due to contention 59 * Return: Pointer to the locked object corresponding to @index if 60 * index is within the number of locked objects. NULL otherwise. 69 * drm_exec_for_each_locked_object - iterate over all the locked objects 74 * Iterate over all the locked GEM objects inside the drm_exec object. 80 * drm_exec_for_each_locked_object_reverse - iterate over all the locked 86 * Iterate over all the locked GEM objects inside the drm_exec object in 95 * drm_exec_until_all_locked - loop until all GEM objects are locked [all …]
|
/linux/kernel/locking/ |
H A D | qspinlock_paravirt.h | 92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock() 124 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 142 * Try to clear pending bit & set locked bit in trylock_clear_pending() 286 * Wait for node->locked to become true, halt the vcpu after a short spin. 299 if (READ_ONCE(node->locked)) in pv_wait_node() 309 * Order pn->state vs pn->locked thusly: in pv_wait_node() 311 * [S] pn->state = VCPU_HALTED [S] next->locked = 1 in pv_wait_node() 313 * [L] pn->locked [RmW] pn->state = VCPU_HASHED in pv_wait_node() 319 if (!READ_ONCE(node->locked)) { in pv_wait_node() 333 * If the locked flag is still not set after wakeup, it is a in pv_wait_node() [all …]
|
H A D | mcs_spinlock.h | 53 * on this node->locked until the previous lock holder sets the node->locked 62 node->locked = 0; 74 * Lock acquired, don't need to set node->locked to 1. Threads in mcs_spin_lock() 75 * only spin on its own node->locked value for lock acquisition. in mcs_spin_lock() 77 * and does not proceed to spin on its own node->locked, this in mcs_spin_lock() 79 * audit lock status, then set node->locked value here. in mcs_spin_lock() 86 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 110 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock() 20 int locked; /* 1 if lock acquired */ global() member
|
/linux/scripts/coccinelle/locks/ |
H A D | double_lock.cocci | 17 @locked@ 42 position p1 != locked.p1; 43 position locked.p; 45 expression x <= locked.E1; 46 expression E,locked.E1; 65 expression x <= locked.E1; 66 expression locked.E1; 69 position locked.p,p1,p2;
|
H A D | call_kern.cocci | 39 @locked exists@ 74 @depends on locked && patch@ 81 @depends on locked && !patch@ 90 p1 << locked.p1; 91 p2 << locked.p2; 101 p1 << locked.p1; 102 p2 << locked.p2;
|
/linux/tools/testing/selftests/mm/ |
H A D | mlock2-tests.c | 92 #define LOCKED "lo" in is_vmflag_set() 133 bool locked; in get_value_for_name() 136 locked = is_vmflag_set(addr, LOCKED); in get_value_for_name() 137 if (!locked) in get_value_for_name() 153 bool locked; in is_vma_lock_on_fault() 156 locked = is_vmflag_set(addr, LOCKED); in is_vma_lock_on_fault() 157 if (!locked) in is_vma_lock_on_fault() 168 if (is_vmflag_set((unsigned long)map, LOCKED)) { in lock_check() 105 #define LOCKED global() macro 146 bool locked; is_vma_lock_on_fault() local 166 bool locked; lock_check() local [all...] |
/linux/drivers/gpu/drm/ |
H A D | drm_exec.c | 15 * unlocks all previously locked GEM objects and locks the contended one first 18 * After an object is locked fences slots can optionally be reserved on the 74 * Initialize the object and make sure that we can track locked objects. 99 * Unlock all locked objects, drop the references to objects and free all memory 119 * objects locked. 140 /* Track the locked object in the array */ 161 /* Make sure the contended object is locked first */ 204 * already locked (can be suppressed by setting the DRM_EXEC_IGNORE_DUPLICATES 256 * Unlock the GEM object and remove it from the collection of locked objects. 257 * Should only be used to unlock the most recently locked objects. It's not time [all …]
|
/linux/drivers/media/dvb-frontends/ |
H A D | lgs8gxx.c | 295 static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_is_locked() argument 308 *locked = ((t & 0x80) == 0x80) ? 1 : 0; in lgs8gxx_is_locked() 310 *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; in lgs8gxx_is_locked() 315 static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_wait_ca_lock() argument 331 *locked = (ret == 0) ? 1 : 0; in lgs8gxx_wait_ca_lock() 359 u8 *locked) in lgs8gxx_autolock_gi() argument 384 err = lgs8gxx_wait_ca_lock(priv, locked); in lgs8gxx_autolock_gi() 385 if (err || !(*locked)) in lgs8gxx_autolock_gi() 393 *locked = 0; in lgs8gxx_autolock_gi() 403 u8 locked = 0, tmp_gi; in lgs8gxx_auto_detect() local [all …]
|
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_dcb_lib.h | 18 int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); 24 ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); 26 void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked); 28 int ice_init_pf_dcb(struct ice_pf *pf, bool locked); 96 ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) in ice_init_pf_dcb() argument 105 bool __always_unused locked) in ice_pf_dcb_cfg() argument 134 static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { } in ice_pf_dcb_recfg() argument
|
/linux/fs/btrfs/ |
H A D | subpage.c | 77 * and doesn't need to be locked. in btrfs_attach_subpage() 232 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); in btrfs_subpage_end_and_test_lock() 246 * This @locked_page is locked by plain lock_page(), thus its in btrfs_subpage_end_and_test_lock() 247 * subpage::locked is 0. Handle them in a special way. in btrfs_subpage_end_and_test_lock() 265 * Handle different locked folios: 270 * - folio locked but without any subpage locked 275 * - folio locked with subpage range locked. 276 * We go through the locked sectors inside the range and clear their locked 278 * the last locked range. 293 * For subpage case, there are two types of locked page. With or in btrfs_folio_end_lock() [all …]
|
/linux/drivers/watchdog/ |
H A D | intel_oc_wdt.c | 44 bool locked; member 61 if (oc_wdt->locked) in intel_oc_wdt_start() 126 oc_wdt->locked = !!(val & INTEL_OC_WDT_CTL_LCK); in intel_oc_wdt_setup() 136 if (oc_wdt->locked) { in intel_oc_wdt_setup() 144 * If we are locked read the current timeout value in intel_oc_wdt_setup() 151 "Register access locked, heartbeat fixed at: %u s\n", in intel_oc_wdt_setup() 154 } else if (oc_wdt->locked) { in intel_oc_wdt_setup() 156 * In case the watchdog is disabled and locked there in intel_oc_wdt_setup()
|
/linux/include/asm-generic/ |
H A D | qspinlock_types.h | 25 u8 locked; member 40 u8 locked; member 55 * 0- 7: locked byte 62 * 0- 7: locked byte 71 #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
|
/linux/drivers/media/common/siano/ |
H A D | smscoreapi.h | 677 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ 678 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 802 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ 803 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 816 u32 modem_state; /* 0 - Acquisition, 1 - Locked */ 844 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ 845 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 858 u32 modem_state; /* 0 - Acquisition, 1 - Locked */ 942 u32 is_demod_locked; /* 0 - not locked, 1 - locked */ 946 u32 is_rf_locked; /* 0 - not locked, 1 - locked */ [all …]
|
/linux/mm/ |
H A D | gup.c | 1144 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set 1149 int *locked) in faultin_page() argument 1194 *locked = 0; in faultin_page() 1216 *locked = 0; in faultin_page() 1369 * @locked: whether we're still with the mmap_lock held 1404 * be released. If this happens *@locked will be set to 0 on return. 1417 int *locked) in __get_user_pages() argument 1489 PTR_ERR(page) == -EMLINK, locked); in __get_user_pages() 1699 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by 1701 * set (*locked = 0). [all …]
|
/linux/Documentation/filesystems/ |
H A D | directory-locking.rst | 44 * decide which of the source and target need to be locked. 45 The source needs to be locked if it's a non-directory, target - if it's 49 are non-directories - the source because it wouldn't need to be locked 66 to be read, modified or removed by method will be locked by the caller. 89 dcache trees. Lookup is already holding the parent locked. If alias is 102 all we change is the view in dcache. Moreover, holding a directory locked 185 Each operation in the minimal cycle must have locked at least 204 In other words, we have a cross-directory rename that locked 213 Consider the order in which directories are locked by the 215 Dn and D1 would have to be among those, with Dn locked before D1. [all …]
|
H A D | resctrl.rst | 128 Corresponding region is pseudo-locked. No 332 When the resource group is in pseudo-locked mode this file will 334 pseudo-locked region. 356 cache pseudo-locked region is created by first writing 358 pseudo-locked region's schemata to the resource group's "schemata" 359 file. On successful pseudo-locked region creation the mode will 360 automatically change to "pseudo-locked". 709 pseudo-locked memory is made accessible to user space where an 713 The creation of a cache pseudo-locked region is triggered by a request 715 to be pseudo-locked. The cache pseudo-locked region is created as follows: [all …]
|
/linux/include/linux/ |
H A D | ww_mutex.h | 69 * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex. 71 * lockdep requires the lockdep_map for the first locked ww_mutex 74 * fake locked ww_mutex lockdep map between ww_acquire_init() and 106 * It is not allowed to initialize an already locked mutex. 178 * code and clearly designated the acquire phase from actually using the locked 242 * locked. The mutex must first be initialized (or statically defined) before it 243 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 274 * locked. The mutex must first be initialized (or statically defined) before it 275 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 362 * use of the mutex is forbidden. The mutex must not be locked when [all …]
|
/linux/arch/arc/include/asm/ |
H A D | spinlock.h | 23 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ in arch_spin_lock() 24 " scond %[LOCKED], [%[slock]] \n" /* acquire */ in arch_spin_lock() 29 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) in arch_spin_lock() 50 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ in arch_spin_trylock() 51 " scond %[LOCKED], [%[slock]] \n" /* acquire */ in arch_spin_trylock() 59 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) in arch_spin_trylock() 95 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ in arch_read_lock() 115 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ in arch_read_trylock()
|
/linux/Documentation/devicetree/bindings/i2c/ |
H A D | i2c-mux-gpmux.yaml | 49 mux-locked: 63 If mux-locked is not present, the multiplexer is assumed to be parent-locked. 66 The properties of mux-locked and parent-locked multiplexers are discussed 89 mux-locked;
|
/linux/drivers/video/fbdev/ |
H A D | ep93xx-fb.c | 27 #define EP93XXFB_VLINES_TOTAL 0x0000 /* SW locked */ 28 #define EP93XXFB_VSYNC 0x0004 /* SW locked */ 29 #define EP93XXFB_VACTIVE 0x0008 /* SW locked */ 30 #define EP93XXFB_VBLANK 0x0228 /* SW locked */ 31 #define EP93XXFB_VCLK 0x000c /* SW locked */ 34 #define EP93XXFB_HCLKS_TOTAL 0x0010 /* SW locked */ 35 #define EP93XXFB_HSYNC 0x0014 /* SW locked */ 36 #define EP93XXFB_HACTIVE 0x0018 /* SW locked */ 37 #define EP93XXFB_HBLANK 0x022c /* SW locked */ 38 #define EP93XXFB_HCLK 0x001c /* SW locked */ [all …]
|
/linux/tools/perf/pmu-events/arch/x86/broadwellde/ |
H A D | other.json | 32 "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock", 36 …L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory a…
|
/linux/tools/perf/pmu-events/arch/x86/broadwellx/ |
H A D | other.json | 32 "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock", 36 …L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory a…
|