Home
last modified time | relevance | path

Searched full:barrier (Results 1 – 25 of 1132) sorted by relevance

12345678910>>...46

/linux/tools/include/asm/
H A Dbarrier.h4 #include "../../arch/x86/include/asm/barrier.h"
6 #include "../../arch/arm/include/asm/barrier.h"
8 #include "../../arch/arm64/include/asm/barrier.h"
10 #include "../../arch/powerpc/include/asm/barrier.h"
12 #include "../../arch/riscv/include/asm/barrier.h"
14 #include "../../arch/s390/include/asm/barrier.h"
16 #include "../../arch/sh/include/asm/barrier.h"
18 #include "../../arch/sparc/include/asm/barrier.h"
20 #include "../../arch/tile/include/asm/barrier.h"
22 #include "../../arch/alpha/include/asm/barrier.h"
[all …]
/linux/include/linux/
H A Dspinlock_up.h9 #include <asm/barrier.h>
32 barrier(); in arch_spin_lock()
40 barrier(); in arch_spin_trylock()
47 barrier(); in arch_spin_unlock()
54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
57 #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
58 #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
59 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
[all …]
/linux/Documentation/scheduler/
H A Dmembarrier.rst14 require each architecture to have a full memory barrier after coming from
15 user-space, before updating rq->curr. This barrier is implied by the sequence
16 rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full
17 barrier in the proximity of the membarrier system call exit, cf.
24 require each architecture to have a full memory barrier after updating rq->curr,
25 before returning to user-space. The schemes providing this barrier on the various
28 - alpha, arc, arm, hexagon, mips rely on the full barrier implied by
31 - arm64 relies on the full barrier implied by switch_to().
33 - powerpc, riscv, s390, sparc, x86 rely on the full barrier implied by
34 switch_mm(), if mm is not NULL; they rely on the full barrier implied
[all …]
/linux/Documentation/
H A Dmemory-barriers.txt29 particular barrier, and
34 for any particular barrier, but if the architecture provides less than
37 Note also that it is possible that a barrier may be a no-op for an
38 architecture because the way that arch works renders an explicit barrier
53 - Varieties of memory barrier.
57 - SMP barrier pairing.
58 - Examples of memory barrier sequences.
64 - Compiler barrier.
74 (*) Inter-CPU acquiring barrier effects.
85 (*) Kernel I/O barrier effects.
[all …]
/linux/arch/arm64/include/asm/
H A Dirqflags.h8 #include <asm/barrier.h>
25 barrier(); in __daif_local_irq_enable()
27 barrier(); in __daif_local_irq_enable()
37 barrier(); in __pmr_local_irq_enable()
40 barrier(); in __pmr_local_irq_enable()
54 barrier(); in __daif_local_irq_disable()
56 barrier(); in __daif_local_irq_disable()
66 barrier(); in __pmr_local_irq_disable()
68 barrier(); in __pmr_local_irq_disable()
174 barrier(); in __daif_local_irq_restore()
[all …]
/linux/arch/sparc/include/asm/
H A Dbarrier_64.h6 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
10 * It used to be believed that the memory barrier had to be right in the
11 * delay slot, but a case has been traced recently wherein the memory barrier
23 * the memory barrier explicitly into a "branch always, predicted taken"
44 barrier(); \
52 barrier(); \
56 #define __smp_mb__before_atomic() barrier()
57 #define __smp_mb__after_atomic() barrier()
59 #include <asm-generic/barrier.h>
/linux/arch/mips/include/asm/
H A Dsync.h11 * Two types of barrier are provided:
18 * restrictions imposed by the barrier.
31 * b) Multiple variants of ordering barrier are provided which allow the
34 * than a barrier are observed prior to stores that are younger than a
35 * barrier & don't care about the ordering of loads then the 'wmb'
36 * ordering barrier can be used. Limiting the barrier's effects to stores
49 * A full completion barrier; all memory accesses appearing prior to this sync
56 * For now we use a full completion barrier to implement all sync types, until
66 * barrier since 2010 & omit 'rmb' barriers because the CPUs don't perform
104 * don't implicitly provide a memory barrier. In general this is most MIPS
[all …]
H A Dbarrier.h86 # define __smp_mb() barrier()
87 # define __smp_rmb() barrier()
88 # define __smp_wmb() barrier()
92 * When LL/SC does imply order, it must also be a compiler barrier to avoid the
124 * a completion barrier immediately preceding the LL instruction. Therefore we
125 * can skip emitting a barrier from __smp_mb__before_atomic().
140 #include <asm-generic/barrier.h>
/linux/kernel/sched/
H A Dmembarrier.c13 * barrier before sending the IPI
19 * The memory barrier at the start of membarrier() on CPU0 is necessary in
22 * CPU1 after the IPI-induced memory barrier:
33 * barrier()
40 * point after (b). If the memory barrier at (a) is omitted, then "x = 1"
45 * The timing of the memory barrier at (a) has to ensure that it executes
46 * before the IPI-induced memory barrier on CPU1.
49 * barrier after completing the IPI
55 * The memory barrier at the end of membarrier() on CPU0 is necessary in
63 * barrier()
[all …]
/linux/tools/virtio/ringtest/
H A Dmain.h91 /* Compiler barrier - similar to what Linux uses */
92 #define barrier() asm volatile("" ::: "memory") macro
98 #define cpu_relax() barrier()
113 barrier(); in busy_wait()
130 * adds a compiler barrier.
133 barrier(); \
139 barrier(); \
143 #define smp_wmb() barrier()
163 barrier(); in __read_once_size()
165 barrier(); in __read_once_size()
[all …]
H A Dvirtio_ring_0_9.c133 /* Barrier A (for pairing) */ in add_inbuf()
140 /* Barrier A (for pairing) */ in add_inbuf()
145 /* Barrier A (for pairing) */ in add_inbuf()
163 /* Barrier B (for pairing) */ in get_buf()
169 /* Barrier B (for pairing) */ in get_buf()
221 /* Barrier D (for pairing) */ in enable_call()
231 /* Barrier C (for pairing) */ in kick_available()
253 /* Barrier C (for pairing) */ in enable_kick()
280 /* Barrier A (for pairing) */ in use_buf()
289 /* Barrier A (for pairing) */ in use_buf()
[all …]
H A Dring.c130 * add an explicit full barrier to avoid this. in add_inbuf()
132 barrier(); in add_inbuf()
136 /* Barrier A (for pairing) */ in add_inbuf()
151 /* Barrier B (for pairing) */ in get_buf()
182 /* Barrier D (for pairing) */ in enable_call()
192 /* Barrier C (for pairing) */ in kick_available()
214 /* Barrier C (for pairing) */ in enable_kick()
234 /* Barrier A (for pairing) */ in use_buf()
247 /* Barrier B (for pairing) */ in use_buf()
259 /* Barrier D (for pairing) */ in call_used()
/linux/arch/s390/include/asm/
H A Dbarrier.h32 #define __rmb() barrier()
33 #define __wmb() barrier()
43 barrier(); \
51 barrier(); \
55 #define __smp_mb__before_atomic() barrier()
56 #define __smp_mb__after_atomic() barrier()
82 #include <asm-generic/barrier.h>
/linux/arch/mips/mm/
H A Dtlb-r3k.c32 #define BARRIER \ macro
49 entry++; /* BARRIER */ in local_flush_tlb_from()
94 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_range()
99 if (idx < 0) /* BARRIER */ in local_flush_tlb_range()
131 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_kernel_range()
136 if (idx < 0) /* BARRIER */ in local_flush_tlb_kernel_range()
164 BARRIER; in local_flush_tlb_page()
169 if (idx < 0) /* BARRIER */ in local_flush_tlb_page()
203 BARRIER; in __update_tlb()
208 if (idx < 0) { /* BARRIER */ in __update_tlb()
[all …]
/linux/arch/arc/include/asm/
H A Dbarrier.h15 * Explicit barrier provided by DMB instruction
19 * - DMB guarantees SMP as well as local barrier semantics
20 * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
21 * UP: barrier(), SMP: smp_*mb == *mb)
23 * in the general case. Plus it only provides full barrier.
42 #include <asm-generic/barrier.h>
/linux/include/asm-generic/
H A Dbarrier.h3 * Generic barrier definitions.
61 #define mb() barrier()
113 #define smp_mb() barrier()
117 #define smp_rmb() barrier()
121 #define smp_wmb() barrier()
182 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
186 #define smp_mb__before_atomic() barrier()
190 #define smp_mb__after_atomic() barrier()
196 barrier(); \
205 barrier(); \
[all …]
/linux/arch/riscv/include/asm/
H A Dmembarrier.h10 * Only need the full barrier when switching between processes. in membarrier_arch_switch_mm()
11 * Barrier when switching from kernel to userspace is not in membarrier_arch_switch_mm()
12 * required here, given that it is implied by mmdrop(). Barrier in membarrier_arch_switch_mm()
23 * The membarrier system call requires a full memory barrier in membarrier_arch_switch_mm()
26 * This barrier is also needed for the SYNC_CORE command when in membarrier_arch_switch_mm()
44 * Matches a full barrier in the proximity of the membarrier in membarrier_arch_switch_mm()
/linux/drivers/md/dm-vdo/indexer/
H A Dsparse-cache.c27 * via the careful use of barrier messages sent to all the index zones by the triage queue worker
37 * and the serialization of the barrier requests from the triage queue ensures they will all
41 * are known to be blocked, waiting in the second barrier. Outside that critical section, all the
144 /* Lock for this barrier object */
146 /* Semaphore for threads waiting at this barrier */
150 /* Total number of threads using this barrier */
169 static void initialize_threads_barrier(struct threads_barrier *barrier, in initialize_threads_barrier() argument
172 sema_init(&barrier->lock, 1); in initialize_threads_barrier()
173 barrier->arrived = 0; in initialize_threads_barrier()
174 barrier->thread_count = thread_count; in initialize_threads_barrier()
[all …]
/linux/tools/arch/sparc/include/asm/
H A Dbarrier_64.h8 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
12 * It used to be believed that the memory barrier had to be right in the
13 * delay slot, but a case has been traced recently wherein the memory barrier
25 * the memory barrier explicitly into a "branch always, predicted taken"
45 barrier(); \
52 barrier(); \
/linux/arch/powerpc/kernel/
H A Dsmp-tbsync.c53 barrier(); in smp_generic_take_timebase()
59 barrier(); in smp_generic_take_timebase()
70 barrier(); in smp_generic_take_timebase()
96 barrier(); in start_contest()
99 barrier(); in start_contest()
104 barrier(); in start_contest()
125 barrier(); in smp_generic_give_timebase()
166 barrier(); in smp_generic_give_timebase()
/linux/arch/loongarch/include/asm/
H A Dbarrier.h12 * Bit3: barrier for previous read (0: true, 1: false)
13 * Bit2: barrier for previous write (0: true, 1: false)
14 * Bit1: barrier for succeeding read (0: true, 1: false)
15 * Bit0: barrier for succeeding write (0: true, 1: false)
17 * Hint 0x700: barrier for "read after read" from the same address
60 #define __smp_mb__before_atomic() barrier()
61 #define __smp_mb__after_atomic() barrier()
137 #include <asm-generic/barrier.h>
/linux/tools/perf/tests/
H A Dsigtrap.c176 pthread_barrier_t *barrier = (pthread_barrier_t *)arg; in test_thread() local
180 pthread_barrier_wait(barrier); in test_thread()
189 static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier) in run_test_threads() argument
193 pthread_barrier_wait(barrier); in run_test_threads()
200 static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier) in run_stress_test() argument
208 ret = run_test_threads(threads, barrier); in run_stress_test()
239 pthread_barrier_t barrier; in test__sigtrap() local
248 pthread_barrier_init(&barrier, NULL, NUM_THREADS + 1); in test__sigtrap()
271 if (pthread_create(&threads[i], NULL, test_thread, &barrier)) { in test__sigtrap()
277 ret = run_stress_test(fd, threads, &barrier); in test__sigtrap()
[all …]
/linux/tools/testing/selftests/bpf/prog_tests/
H A Dmap_in_map.c13 pthread_barrier_t barrier;
33 pthread_barrier_wait(&ctx->barrier); in update_map_fn()
41 pthread_barrier_wait(&ctx->barrier); in update_map_fn()
49 pthread_barrier_wait(&ctx->barrier); in update_map_fn()
64 pthread_barrier_wait(&ctx->barrier); in access_map_fn()
69 pthread_barrier_wait(&ctx->barrier); in access_map_fn()
108 pthread_barrier_init(&ctx.barrier, NULL, 2); in test_map_in_map_access()
11 pthread_barrier_t barrier; global() member
/linux/arch/alpha/include/asm/
H A Dirqflags.h35 barrier(); in arch_local_irq_disable()
41 barrier(); in arch_local_irq_save()
47 barrier(); in arch_local_irq_enable()
53 barrier(); in arch_local_irq_restore()
55 barrier(); in arch_local_irq_restore()
/linux/tools/virtio/asm/
H A Dbarrier.h4 #define barrier() asm volatile("" ::: "memory") macro
6 #define virt_rmb() barrier()
7 #define virt_wmb() barrier()
13 barrier(); \
30 #error Please fill in barrier macros

12345678910>>...46