Home
last modified time | relevance | path

Searched full:guard (Results 1 – 25 of 968) sorted by relevance

12345678910>>...39

/linux/rust/kernel/sync/
H A Dcondvar.rs8 use super::{lock::Backend, lock::Guard, LockClassKey};
59 /// let mut guard = e.value.lock();
60 /// while *guard != v {
61 /// e.value_changed.wait(&mut guard);
117 guard: &mut Guard<'_, T, B>, in wait_internal()
131 let ret = guard.do_unlocked(|| unsafe { bindings::schedule_timeout(timeout_in_jiffies) }); in wait_internal()
141 /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
145 pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) { in wait()
146 self.wait_internal(TASK_UNINTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT); in wait()
156 pub fn wait_interruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool { in wait_interruptible()
[all …]
H A Dlock.rs3 //! Generic kernel lock and guard.
5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
172 pub fn lock(&self) -> Guard<'_, T, B> { in lock()
177 unsafe { Guard::new(self, state) } in lock()
182 /// Returns a guard that can be used to access the data protected by the lock if successful.
185 pub fn try_lock(&self) -> Option<Guard<'_, T, B>> { in try_lock()
188 unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) } in try_lock()
192 /// A lock guard.
195 /// when a guard goes out of scope. It also provides a safe and convenient way to access the data
197 #[must_use = "the lock unlocks immediately when the guard is unused"]
[all …]
/linux/tools/testing/selftests/mm/
H A Dguard-regions.c372 /* Establish a guard page at the end of the mapping. */ in TEST_F()
376 /* Check that both guard pages result in SIGSEGV. */ in TEST_F()
380 /* Remove the first guard page. */ in TEST_F()
386 /* Remove the last guard page. */ in TEST_F()
395 * these be faulted in, so this also tests that we can install guard in TEST_F()
400 /* Make sure they are all guard pages. */ in TEST_F()
414 /* Remove guard pages. */ in TEST_F()
425 * Now remove all guard pages, make sure we don't remove existing in TEST_F()
478 * Now mark the whole range as guard pages and make sure all VMAs are as in TEST_F()
592 /* We want guard markers at start/end of each VMA. */ in TEST_F()
[all …]
/linux/rust/kernel/
H A Dxarray.rs37 /// let mut guard = xa.lock();
39 /// assert_eq!(guard.get(0), None);
41 /// assert_eq!(guard.store(0, dead, GFP_KERNEL)?.as_deref(), None);
42 /// assert_eq!(guard.get(0).copied(), Some(0xdead));
44 /// *guard.get_mut(0).unwrap() = 0xffff;
45 /// assert_eq!(guard.get(0).copied(), Some(0xffff));
47 /// assert_eq!(guard.store(0, beef, GFP_KERNEL)?.as_deref().copied(), Some(0xffff));
48 /// assert_eq!(guard.get(0).copied(), Some(0xbeef));
50 /// guard.remove(0);
51 /// assert_eq!(guard.get(0), None);
[all …]
H A Drevocable.rs34 /// let guard = v.try_access()?;
35 /// Some(guard.a + guard.b)
56 /// let guard = rcu::read_lock();
57 /// let e = v.try_access_with_guard(&guard)?;
96 /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
97 /// remain accessible while the guard is alive. In such cases, callers are not allowed to sleep
100 let guard = rcu::read_lock(); in try_access() localVariable
104 Some(RevocableGuard::new(self.data.get(), guard)) in try_access()
115 /// remain accessible while the rcu read side guard is alive. In such cases, callers are not
118 pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> { in try_access_with_guard()
[all …]
/linux/drivers/android/binder/
H A Dnode.rs10 sync::lock::{spinlock::SpinLockBackend, Guard},
326 guard: &mut Guard<'_, ProcessInner, SpinLockBackend>, in add_death()
328 self.inner.access_mut(guard).death_list.push_back(death); in add_death()
481 guard: &Guard<'_, ProcessInner, SpinLockBackend>, in populate_counts()
483 let inner = self.inner.access(guard); in populate_counts()
491 guard: &Guard<'_, ProcessInner, SpinLockBackend>, in populate_debug_info()
495 let inner = self.inner.access(guard); in populate_debug_info()
504 pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) { in force_has_count()
505 let inner = self.inner.access_mut(guard); in force_has_count()
520 guard: &mut Guard<'_, ProcessInner, SpinLockBackend>, in submit_oneway()
[all …]
/linux/drivers/block/rnull/
H A Dconfigfs.rs148 let mut guard = this.data.lock(); in store() localVariable
150 if !guard.powered && power_op { in store()
151 guard.disk = Some(NullBlkDevice::new( in store()
152 &guard.name, in store()
153 guard.block_size, in store()
154 guard.rotational, in store()
155 guard.capacity_mib, in store()
156 guard.irq_mode, in store()
158 guard.powered = true; in store()
159 } else if guard.powered && !power_op { in store()
[all …]
/linux/drivers/virt/coco/guest/
H A Dreport.c96 guard(rwsem_write)(&tsm_rwsem); in tsm_report_privlevel_store()
121 guard(rwsem_read)(&tsm_rwsem); in tsm_report_privlevel_floor_show()
138 guard(rwsem_write)(&tsm_rwsem); in tsm_report_service_provider_store()
162 guard(rwsem_write)(&tsm_rwsem); in tsm_report_service_guid_store()
188 guard(rwsem_write)(&tsm_rwsem); in tsm_report_service_manifest_version_store()
204 guard(rwsem_write)(&tsm_rwsem); in tsm_report_inblob_write()
220 guard(rwsem_read)(&tsm_rwsem); in tsm_report_generation_show()
227 guard(rwsem_read)(&tsm_rwsem); in tsm_report_provider_show()
267 guard(rwsem_read)(&tsm_rwsem); in read_cached_report()
295 guard(rwsem_write)(&tsm_rwsem); in tsm_report_read()
[all …]
/linux/drivers/char/tpm/
H A Dtpm_tis_i2c.c33 /* Default Guard Time of 250µs until interface capability register is read */
37 /* Guard Time of 250µs after I2C slave NACK */
41 /* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
277 * Guard Time:
282 * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
283 * a Guard Time of 250µs applies.
285 * Various flags in the same register indicate if a guard time is needed:
286 * - SR: <I2C read with repeated start> <guard time> <I2C read>
287 * - RR: <I2C read> <guard time> <I2C read>
288 * - RW: <I2C read> <guard time> <I2C write>
[all …]
/linux/include/rdma/
H A Dsignature.h33 * enum ib_t10_dif_bg_type - Signature T10-DIF block-guard types
45 * @bg_type: T10-DIF block guard type (CRC|CSUM)
47 * @bg: seed of guard computation.
48 * @app_tag: application tag of guard block
49 * @ref_tag: initial guard block reference tag.
103 * | GUARD | APPTAG | REFTAG |
/linux/Documentation/mm/
H A Dvmalloced-kernel-stacks.rst25 Virtually mapped kernel stacks with guard pages cause kernel stack
30 support for virtually mapped stacks with guard pages. This feature
53 - If the stack overflows into a guard page, something reasonable
64 with guard pages. This causes kernel stack overflows to be caught
123 Leading and trailing guard pages help detect stack overflows. When the stack
124 overflows into the guard pages, handlers have to be careful not to overflow
131 Testing VMAP allocation with guard pages
135 and trailing guard page? The following lkdtm tests can help detect any
/linux/arch/parisc/math-emu/
H A Dcnv_float.h88 #define Dbl_to_sgl_mantissa(srcA,srcB,dest,inexact,guard,sticky,odd) \ argument
90 guard = Dbit3p2(srcB); \
92 inexact = guard | sticky; \
95 #define Dbl_to_sgl_denormalized(srcA,srcB,exp,dest,inexact,guard,sticky,odd,tiny) \ argument
101 guard = inexact >> 31; \
124 if (guard && (sticky || odd)) { \
134 guard = odd; \
136 inexact |= guard; \
144 guard = inexact >> 31; \
157 guard = inexact >> 31; \
[all …]
/linux/rust/kernel/sync/lock/
H A Dglobal.rs9 sync::lock::{Backend, Guard, Lock},
94 /// A guard for a [`GlobalLock`].
98 inner: Guard<'static, B::Item, B::Backend>,
161 // SAFETY: The lock is globally unique, so there can only be one guard. in as_ref()
169 // SAFETY: The lock is globally unique, so there can only be one guard. in as_mut()
200 /// let mut guard = MY_COUNTER.lock();
201 /// *guard += 1;
202 /// *guard
238 /// fn increment(&self, guard: &mut GlobalGuard<MY_MUTEX>) -> u32 {
239 /// let my_counter = self.my_counter.as_mut(guard);
/linux/drivers/gpu/drm/i915/
H A Di915_vma_resource.h82 * @guard: The size of guard area preceding and trailing the bind.
129 u32 guard; member
194 * @guard: The size of the guard area preceding and trailing the bind.
214 u32 guard) in i915_vma_resource_init() argument
232 vma_res->guard = guard; in i915_vma_resource_init()
/linux/drivers/gpio/
H A Dgpiolib.c456 CLASS(gpio_chip_guard, guard)(desc); in gpiod_get_direction()
457 if (!guard.gc) in gpiod_get_direction()
471 if (!guard.gc->get_direction) in gpiod_get_direction()
474 ret = gpiochip_get_direction(guard.gc, offset); in gpiod_get_direction()
558 guard(srcu)(&gpio_devices_srcu); in gpio_name_to_desc()
562 guard(srcu)(&gdev->srcu); in gpio_name_to_desc()
953 guard(mutex)(&gpio_machine_hogs_mutex); in machine_gpiochip_add()
966 guard(srcu)(&gpio_devices_srcu); in gpiochip_setup_devs()
1360 guard(srcu)(&gpio_devices_srcu); in gpio_device_find()
1367 guard(srcu)(&gdev->srcu); in gpio_device_find()
[all …]
/linux/include/linux/
H A Dcleanup.h45 * dropped when the scope where guard() is invoked ends::
49 * guard(pci_dev)(dev);
51 * The lifetime of the lock obtained by the guard() helper follows the
58 * guard(pci_dev)(dev); // pci_dev_lock() invoked here
66 * The ACQUIRE() macro can be used in all places that guard() can be
77 * Now, when a function uses both __free() and guard()/ACQUIRE(), or
122 * guard(mutex)(&lock);
136 * That bug is fixed by changing init() to call guard() and define +
139 * guard(mutex)(&lock);
320 * guard(name):
[all …]
/linux/Documentation/devicetree/bindings/display/
H A Datmel,lcdc-display.yaml28 - required: [ 'atmel,guard-time' ]
39 atmel,guard-time:
41 description: lcd guard time (Delay in frame periods)
74 - atmel,guard-time
86 atmel,guard-time = <9>;
/linux/arch/x86/include/asm/
H A Dcpu_entry_area.h35 /* The exception stacks' physical storage. No guard pages required */
40 /* The effective cpu entry area mapping with guard pages. */
95 * a read-only guard page. On 32-bit the GDT must be writeable, so
96 * it needs an extra guard page.
116 * Exception stacks used for IST entries with guard pages.
/linux/scripts/
H A Dsyscallnr.sh57 guard=_ASM_$(basename "$outfile" |
62 echo "#ifndef $guard"
63 echo "#define $guard"
73 echo "#endif /* $guard */"
H A Dsyscallhdr.sh68 guard=_UAPI_ASM_$(basename "$outfile" |
73 echo "#ifndef $guard"
74 echo "#define $guard"
97 echo "#endif /* $guard */"
/linux/lib/crypto/tests/
H A Dsha3_kunit.c28 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-before guard */
33 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-after guard */
37 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-before guard */
42 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-after guard */
46 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-before guard */
53 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-after guard */
57 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-before guard */
66 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-after guard */
70 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-before guard */
73 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Write-after guard */
[all …]
/linux/drivers/media/dvb-frontends/
H A Dlgs8gxx_priv.h20 u16 curr_gi; /* current guard interval */
42 #define GI_MASK 0x03 /* Guard Interval Mask */
43 #define GI_420 0x00 /* 1/9 Guard Interval */
45 #define GI_945 0x02 /* 1/4 Guard Interval */
/linux/kernel/irq/
H A Dautoprobe.c46 guard(raw_spinlock_irq)(&desc->lock); in probe_irq_on()
67 guard(raw_spinlock_irq)(&desc->lock); in probe_irq_on()
84 guard(raw_spinlock_irq)(&desc->lock); in probe_irq_on()
119 guard(raw_spinlock_irq)(&desc->lock); in probe_irq_mask()
157 guard(raw_spinlock_irq)(&desc->lock); in probe_irq_off()
/linux/drivers/irqchip/
H A Dirq-atmel-aic5.c95 guard(raw_spinlock)(&bgc->lock); in aic5_mask()
111 guard(raw_spinlock)(&bgc->lock); in aic5_unmask()
123 guard(raw_spinlock)(&bgc->lock); in aic5_retrigger()
136 guard(raw_spinlock)(&bgc->lock); in aic5_set_type()
163 guard(raw_spinlock)(&bgc->lock); in aic5_suspend()
186 guard(raw_spinlock)(&bgc->lock); in aic5_resume()
220 guard(raw_spinlock)(&bgc->lock); in aic5_pm_shutdown()
282 guard(raw_spinlock_irqsave)(&bgc->lock); in aic5_irq_domain_xlate()
/linux/drivers/soc/qcom/
H A Dpmic_glink.c105 guard(mutex)(&pg->state_lock); in pmic_glink_client_register()
122 guard(mutex)(&pg->state_lock); in pmic_glink_send()
234 guard(mutex)(&pg->state_lock); in pmic_glink_pdr_callback()
244 guard(mutex)(&__pmic_glink_lock); in pmic_glink_rpmsg_probe()
252 guard(mutex)(&pg->state_lock); in pmic_glink_rpmsg_probe()
265 guard(mutex)(&__pmic_glink_lock); in pmic_glink_rpmsg_remove()
270 guard(mutex)(&pg->state_lock); in pmic_glink_rpmsg_remove()
382 guard(mutex)(&__pmic_glink_lock); in pmic_glink_remove()

12345678910>>...39