Lines Matching +full:lock +full:- +full:less
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
47 * Therefore, except for special cases, like non-temporal memory accesses or
63 * The open-coded number is used instead of the symbolic expression to
82 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
87 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
92 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
99 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
105 * Always use lock prefixes. The result is slightly less optimal for
106 * UP systems, but it matters less now, and sometimes UP is emulated
117 __asm __volatile("lock; " OP \
126 __asm __volatile("lock; " OP \
146 * Returns 0 on failure, non-zero on success.
155 " lock; cmpxchg %3,%1 ; " \
171 " lock; cmpxchg %3,%1 ; " \
195 " lock; xaddl %0,%1 ; " in atomic_fetchadd_int()
212 " lock; xaddq %0,%1 ; " in atomic_fetchadd_long()
226 " lock; btsl %2,%1 ; " in atomic_testandset_int()
241 " lock; btsq %2,%1 ; " in atomic_testandset_long()
256 " lock; btrl %2,%1 ; " in atomic_testandclear_int()
271 " lock; btrq %2,%1 ; " in atomic_testandclear_long()
286 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a
289 * special address for "mem". In the kernel, we use a private per-cpu
291 * (-8(%rsp)).
298 __asm __volatile("lock; addl $0,%%gs:%c0" in __storeload_barrier()
301 __asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc"); in __storeload_barrier()
470 /* Operations on 8-bit bytes. */
492 /* Operations on 16-bit words. */
514 /* Operations on 32-bit double words. */
541 /* Operations on 64-bit quad words. */