xref: /linux/arch/arm64/include/asm/barrier.h (revision 0750b8fcf313845b21c71344b4bea8ad7d3cee84)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/barrier.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_BARRIER_H
8 #define __ASM_BARRIER_H
9 
10 #ifndef __ASSEMBLY__
11 
12 #include <linux/kasan-checks.h>
13 
14 #define __nops(n)	".rept	" #n "\nnop\n.endr\n"
15 #define nops(n)		asm volatile(__nops(n))
16 
17 #define sev()		asm volatile("sev" : : : "memory")
18 #define wfe()		asm volatile("wfe" : : : "memory")
19 #define wfi()		asm volatile("wfi" : : : "memory")
20 
21 #define isb()		asm volatile("isb" : : : "memory")
22 #define dmb(opt)	asm volatile("dmb " #opt : : : "memory")
23 #define dsb(opt)	asm volatile("dsb " #opt : : : "memory")
24 
25 #define psb_csync()	asm volatile("hint #17" : : : "memory")
26 #define __tsb_csync()	asm volatile("hint #18" : : : "memory")
27 #define csdb()		asm volatile("hint #20" : : : "memory")
28 
29 /*
30  * Data Gathering Hint:
31  * This instruction prevents merging memory accesses with Normal-NC or
32  * Device-GRE attributes before the hint instruction with any memory accesses
33  * appearing after the hint instruction.
34  */
35 #define dgh()		asm volatile("hint #6" : : : "memory")
36 
37 #ifdef CONFIG_ARM64_PSEUDO_NMI
38 #define pmr_sync()						\
39 	do {							\
40 		extern struct static_key_false gic_pmr_sync;	\
41 								\
42 		if (static_branch_unlikely(&gic_pmr_sync))	\
43 			dsb(sy);				\
44 	} while(0)
45 #else
46 #define pmr_sync()	do {} while (0)
47 #endif
48 
49 #define mb()		dsb(sy)
50 #define rmb()		dsb(ld)
51 #define wmb()		dsb(st)
52 
53 #define dma_mb()	dmb(osh)
54 #define dma_rmb()	dmb(oshld)
55 #define dma_wmb()	dmb(oshst)
56 
57 #define io_stop_wc()	dgh()
58 
59 #define tsb_csync()								\
60 	do {									\
61 		/*								\
62 		 * CPUs affected by Arm Erratum 2054223 or 2067961 needs	\
63 		 * another TSB to ensure the trace is flushed. The barriers	\
64 		 * don't have to be strictly back to back, as long as the	\
65 		 * CPU is in trace prohibited state.				\
66 		 */								\
67 		if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE))	\
68 			__tsb_csync();						\
69 		__tsb_csync();							\
70 	} while (0)
71 
72 /*
73  * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
74  * and 0 otherwise.
75  */
76 #define array_index_mask_nospec array_index_mask_nospec
77 static inline unsigned long array_index_mask_nospec(unsigned long idx,
78 						    unsigned long sz)
79 {
80 	unsigned long mask;
81 
82 	asm volatile(
83 	"	cmp	%1, %2\n"
84 	"	sbc	%0, xzr, xzr\n"
85 	: "=r" (mask)
86 	: "r" (idx), "Ir" (sz)
87 	: "cc");
88 
89 	csdb();
90 	return mask;
91 }
92 
93 /*
94  * Ensure that reads of the counter are treated the same as memory reads
95  * for the purposes of ordering by subsequent memory barriers.
96  *
97  * This insanity brought to you by speculative system register reads,
98  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
99  *
100  * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
101  */
102 #define arch_counter_enforce_ordering(val) do {				\
103 	u64 tmp, _val = (val);						\
104 									\
105 	asm volatile(							\
106 	"	eor	%0, %1, %1\n"					\
107 	"	add	%0, sp, %0\n"					\
108 	"	ldr	xzr, [%0]"					\
109 	: "=r" (tmp) : "r" (_val));					\
110 } while (0)
111 
112 #define __smp_mb()	dmb(ish)
113 #define __smp_rmb()	dmb(ishld)
114 #define __smp_wmb()	dmb(ishst)
115 
116 #define __smp_store_release(p, v)					\
117 do {									\
118 	typeof(p) __p = (p);						\
119 	union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u =	\
120 		{ .__val = (__force __unqual_scalar_typeof(*p)) (v) };	\
121 	compiletime_assert_atomic_type(*p);				\
122 	kasan_check_write(__p, sizeof(*p));				\
123 	switch (sizeof(*p)) {						\
124 	case 1:								\
125 		asm volatile ("stlrb %w1, %0"				\
126 				: "=Q" (*__p)				\
127 				: "r" (*(__u8 *)__u.__c)		\
128 				: "memory");				\
129 		break;							\
130 	case 2:								\
131 		asm volatile ("stlrh %w1, %0"				\
132 				: "=Q" (*__p)				\
133 				: "r" (*(__u16 *)__u.__c)		\
134 				: "memory");				\
135 		break;							\
136 	case 4:								\
137 		asm volatile ("stlr %w1, %0"				\
138 				: "=Q" (*__p)				\
139 				: "r" (*(__u32 *)__u.__c)		\
140 				: "memory");				\
141 		break;							\
142 	case 8:								\
143 		asm volatile ("stlr %1, %0"				\
144 				: "=Q" (*__p)				\
145 				: "r" (*(__u64 *)__u.__c)		\
146 				: "memory");				\
147 		break;							\
148 	}								\
149 } while (0)
150 
151 #define __smp_load_acquire(p)						\
152 ({									\
153 	union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u;	\
154 	typeof(p) __p = (p);						\
155 	compiletime_assert_atomic_type(*p);				\
156 	kasan_check_read(__p, sizeof(*p));				\
157 	switch (sizeof(*p)) {						\
158 	case 1:								\
159 		asm volatile ("ldarb %w0, %1"				\
160 			: "=r" (*(__u8 *)__u.__c)			\
161 			: "Q" (*__p) : "memory");			\
162 		break;							\
163 	case 2:								\
164 		asm volatile ("ldarh %w0, %1"				\
165 			: "=r" (*(__u16 *)__u.__c)			\
166 			: "Q" (*__p) : "memory");			\
167 		break;							\
168 	case 4:								\
169 		asm volatile ("ldar %w0, %1"				\
170 			: "=r" (*(__u32 *)__u.__c)			\
171 			: "Q" (*__p) : "memory");			\
172 		break;							\
173 	case 8:								\
174 		asm volatile ("ldar %0, %1"				\
175 			: "=r" (*(__u64 *)__u.__c)			\
176 			: "Q" (*__p) : "memory");			\
177 		break;							\
178 	}								\
179 	(typeof(*p))__u.__val;						\
180 })
181 
182 #define smp_cond_load_relaxed(ptr, cond_expr)				\
183 ({									\
184 	typeof(ptr) __PTR = (ptr);					\
185 	__unqual_scalar_typeof(*ptr) VAL;				\
186 	for (;;) {							\
187 		VAL = READ_ONCE(*__PTR);				\
188 		if (cond_expr)						\
189 			break;						\
190 		__cmpwait_relaxed(__PTR, VAL);				\
191 	}								\
192 	(typeof(*ptr))VAL;						\
193 })
194 
195 #define smp_cond_load_acquire(ptr, cond_expr)				\
196 ({									\
197 	typeof(ptr) __PTR = (ptr);					\
198 	__unqual_scalar_typeof(*ptr) VAL;				\
199 	for (;;) {							\
200 		VAL = smp_load_acquire(__PTR);				\
201 		if (cond_expr)						\
202 			break;						\
203 		__cmpwait_relaxed(__PTR, VAL);				\
204 	}								\
205 	(typeof(*ptr))VAL;						\
206 })
207 
208 #include <asm-generic/barrier.h>
209 
210 #endif	/* __ASSEMBLY__ */
211 
212 #endif	/* __ASM_BARRIER_H */
213