xref: /linux/tools/testing/selftests/bpf/progs/arena_atomics.c (revision 4f9786035f9e519db41375818e1d0b5f20da2f10)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <stdbool.h>
7 #include <stdatomic.h>
8 #include "bpf_arena_common.h"
9 #include "../../../include/linux/filter.h"
10 #include "bpf_misc.h"
11 
12 struct {
13 	__uint(type, BPF_MAP_TYPE_ARENA);
14 	__uint(map_flags, BPF_F_MMAPABLE);
15 	__uint(max_entries, 10); /* number of pages */
16 #ifdef __TARGET_ARCH_arm64
17 	__ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
18 #else
19 	__ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
20 #endif
21 } arena SEC(".maps");
22 
23 #if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
24 bool skip_all_tests __attribute((__section__(".data"))) = false;
25 #else
26 bool skip_all_tests = true;
27 #endif
28 
29 #if defined(ENABLE_ATOMICS_TESTS) &&		  \
30 	defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
31 	(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
32 bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
33 #else
34 bool skip_lacq_srel_tests = true;
35 #endif
36 
37 __u32 pid = 0;
38 
39 __u64 __arena_global add64_value = 1;
40 __u64 __arena_global add64_result = 0;
41 __u32 __arena_global add32_value = 1;
42 __u32 __arena_global add32_result = 0;
43 __u64 __arena_global add_stack_value_copy = 0;
44 __u64 __arena_global add_stack_result = 0;
45 __u64 __arena_global add_noreturn_value = 1;
46 
47 SEC("raw_tp/sys_enter")
48 int add(const void *ctx)
49 {
50 	if (pid != (bpf_get_current_pid_tgid() >> 32))
51 		return 0;
52 #ifdef ENABLE_ATOMICS_TESTS
53 	__u64 add_stack_value = 1;
54 
55 	add64_result = __sync_fetch_and_add(&add64_value, 2);
56 	add32_result = __sync_fetch_and_add(&add32_value, 2);
57 	add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
58 	add_stack_value_copy = add_stack_value;
59 	__sync_fetch_and_add(&add_noreturn_value, 2);
60 #endif
61 
62 	return 0;
63 }
64 
65 __s64 __arena_global sub64_value = 1;
66 __s64 __arena_global sub64_result = 0;
67 __s32 __arena_global sub32_value = 1;
68 __s32 __arena_global sub32_result = 0;
69 __s64 __arena_global sub_stack_value_copy = 0;
70 __s64 __arena_global sub_stack_result = 0;
71 __s64 __arena_global sub_noreturn_value = 1;
72 
73 SEC("raw_tp/sys_enter")
74 int sub(const void *ctx)
75 {
76 	if (pid != (bpf_get_current_pid_tgid() >> 32))
77 		return 0;
78 #ifdef ENABLE_ATOMICS_TESTS
79 	__u64 sub_stack_value = 1;
80 
81 	sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
82 	sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
83 	sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
84 	sub_stack_value_copy = sub_stack_value;
85 	__sync_fetch_and_sub(&sub_noreturn_value, 2);
86 #endif
87 
88 	return 0;
89 }
90 
91 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
92 _Atomic __u64 __arena_global and64_value = (0x110ull << 32);
93 _Atomic __u32 __arena_global and32_value = 0x110;
94 #else
95 __u64 __arena_global and64_value = (0x110ull << 32);
96 __u32 __arena_global and32_value = 0x110;
97 #endif
98 
99 SEC("raw_tp/sys_enter")
100 int and(const void *ctx)
101 {
102 	if (pid != (bpf_get_current_pid_tgid() >> 32))
103 		return 0;
104 #ifdef ENABLE_ATOMICS_TESTS
105 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
106 	__c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
107 	__c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
108 #else
109 	__sync_fetch_and_and(&and64_value, 0x011ull << 32);
110 	__sync_fetch_and_and(&and32_value, 0x011);
111 #endif
112 #endif
113 
114 	return 0;
115 }
116 
117 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
118 _Atomic __u32 __arena_global or32_value = 0x110;
119 _Atomic __u64 __arena_global or64_value = (0x110ull << 32);
120 #else
121 __u32 __arena_global or32_value = 0x110;
122 __u64 __arena_global or64_value = (0x110ull << 32);
123 #endif
124 
125 SEC("raw_tp/sys_enter")
126 int or(const void *ctx)
127 {
128 	if (pid != (bpf_get_current_pid_tgid() >> 32))
129 		return 0;
130 #ifdef ENABLE_ATOMICS_TESTS
131 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
132 	__c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
133 	__c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
134 #else
135 	__sync_fetch_and_or(&or64_value, 0x011ull << 32);
136 	__sync_fetch_and_or(&or32_value, 0x011);
137 #endif
138 #endif
139 
140 	return 0;
141 }
142 
143 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
144 _Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
145 _Atomic __u32 __arena_global xor32_value = 0x110;
146 #else
147 __u64 __arena_global xor64_value = (0x110ull << 32);
148 __u32 __arena_global xor32_value = 0x110;
149 #endif
150 
151 SEC("raw_tp/sys_enter")
152 int xor(const void *ctx)
153 {
154 	if (pid != (bpf_get_current_pid_tgid() >> 32))
155 		return 0;
156 #ifdef ENABLE_ATOMICS_TESTS
157 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
158 	__c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
159 	__c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
160 #else
161 	__sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
162 	__sync_fetch_and_xor(&xor32_value, 0x011);
163 #endif
164 #endif
165 
166 	return 0;
167 }
168 
169 __u32 __arena_global cmpxchg32_value = 1;
170 __u32 __arena_global cmpxchg32_result_fail = 0;
171 __u32 __arena_global cmpxchg32_result_succeed = 0;
172 __u64 __arena_global cmpxchg64_value = 1;
173 __u64 __arena_global cmpxchg64_result_fail = 0;
174 __u64 __arena_global cmpxchg64_result_succeed = 0;
175 
176 SEC("raw_tp/sys_enter")
177 int cmpxchg(const void *ctx)
178 {
179 	if (pid != (bpf_get_current_pid_tgid() >> 32))
180 		return 0;
181 #ifdef ENABLE_ATOMICS_TESTS
182 	cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
183 	cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
184 
185 	cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
186 	cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
187 #endif
188 
189 	return 0;
190 }
191 
192 __u64 __arena_global xchg64_value = 1;
193 __u64 __arena_global xchg64_result = 0;
194 __u32 __arena_global xchg32_value = 1;
195 __u32 __arena_global xchg32_result = 0;
196 
197 SEC("raw_tp/sys_enter")
198 int xchg(const void *ctx)
199 {
200 	if (pid != (bpf_get_current_pid_tgid() >> 32))
201 		return 0;
202 #ifdef ENABLE_ATOMICS_TESTS
203 	__u64 val64 = 2;
204 	__u32 val32 = 2;
205 
206 	xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
207 	xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
208 #endif
209 
210 	return 0;
211 }
212 
213 __u64 __arena_global uaf_sink;
214 volatile __u64 __arena_global uaf_recovery_fails;
215 
216 SEC("syscall")
217 int uaf(const void *ctx)
218 {
219 	if (pid != (bpf_get_current_pid_tgid() >> 32))
220 		return 0;
221 #if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
222     !defined(__TARGET_ARCH_x86)
223 	__u32 __arena *page32;
224 	__u64 __arena *page64;
225 	void __arena *page;
226 
227 	page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
228 	bpf_arena_free_pages(&arena, page, 1);
229 	uaf_recovery_fails = 24;
230 
231 	page32 = (__u32 __arena *)page;
232 	uaf_sink += __sync_fetch_and_add(page32, 1);
233 	uaf_recovery_fails -= 1;
234 	__sync_add_and_fetch(page32, 1);
235 	uaf_recovery_fails -= 1;
236 	uaf_sink += __sync_fetch_and_sub(page32, 1);
237 	uaf_recovery_fails -= 1;
238 	__sync_sub_and_fetch(page32, 1);
239 	uaf_recovery_fails -= 1;
240 	uaf_sink += __sync_fetch_and_and(page32, 1);
241 	uaf_recovery_fails -= 1;
242 	__sync_and_and_fetch(page32, 1);
243 	uaf_recovery_fails -= 1;
244 	uaf_sink += __sync_fetch_and_or(page32, 1);
245 	uaf_recovery_fails -= 1;
246 	__sync_or_and_fetch(page32, 1);
247 	uaf_recovery_fails -= 1;
248 	uaf_sink += __sync_fetch_and_xor(page32, 1);
249 	uaf_recovery_fails -= 1;
250 	__sync_xor_and_fetch(page32, 1);
251 	uaf_recovery_fails -= 1;
252 	uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
253 	uaf_recovery_fails -= 1;
254 	uaf_sink += __sync_lock_test_and_set(page32, 1);
255 	uaf_recovery_fails -= 1;
256 
257 	page64 = (__u64 __arena *)page;
258 	uaf_sink += __sync_fetch_and_add(page64, 1);
259 	uaf_recovery_fails -= 1;
260 	__sync_add_and_fetch(page64, 1);
261 	uaf_recovery_fails -= 1;
262 	uaf_sink += __sync_fetch_and_sub(page64, 1);
263 	uaf_recovery_fails -= 1;
264 	__sync_sub_and_fetch(page64, 1);
265 	uaf_recovery_fails -= 1;
266 	uaf_sink += __sync_fetch_and_and(page64, 1);
267 	uaf_recovery_fails -= 1;
268 	__sync_and_and_fetch(page64, 1);
269 	uaf_recovery_fails -= 1;
270 	uaf_sink += __sync_fetch_and_or(page64, 1);
271 	uaf_recovery_fails -= 1;
272 	__sync_or_and_fetch(page64, 1);
273 	uaf_recovery_fails -= 1;
274 	uaf_sink += __sync_fetch_and_xor(page64, 1);
275 	uaf_recovery_fails -= 1;
276 	__sync_xor_and_fetch(page64, 1);
277 	uaf_recovery_fails -= 1;
278 	uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
279 	uaf_recovery_fails -= 1;
280 	uaf_sink += __sync_lock_test_and_set(page64, 1);
281 	uaf_recovery_fails -= 1;
282 #endif
283 
284 	return 0;
285 }
286 
287 #if __clang_major__ >= 18
288 __u8 __arena_global load_acquire8_value = 0x12;
289 __u16 __arena_global load_acquire16_value = 0x1234;
290 __u32 __arena_global load_acquire32_value = 0x12345678;
291 __u64 __arena_global load_acquire64_value = 0x1234567890abcdef;
292 
293 __u8 __arena_global load_acquire8_result = 0;
294 __u16 __arena_global load_acquire16_result = 0;
295 __u32 __arena_global load_acquire32_result = 0;
296 __u64 __arena_global load_acquire64_result = 0;
297 #else
298 /* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
299  * this issue by defining the below variables as 64-bit.
300  */
301 __u64 __arena_global load_acquire8_value;
302 __u64 __arena_global load_acquire16_value;
303 __u64 __arena_global load_acquire32_value;
304 __u64 __arena_global load_acquire64_value;
305 
306 __u64 __arena_global load_acquire8_result;
307 __u64 __arena_global load_acquire16_result;
308 __u64 __arena_global load_acquire32_result;
309 __u64 __arena_global load_acquire64_result;
310 #endif
311 
312 SEC("raw_tp/sys_enter")
313 int load_acquire(const void *ctx)
314 {
315 #if defined(ENABLE_ATOMICS_TESTS) &&		  \
316 	defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
317 	(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
318 
319 #define LOAD_ACQUIRE_ARENA(SIZEOP, SIZE, SRC, DST)	\
320 	{ asm volatile (				\
321 	"r1 = %[" #SRC "] ll;"				\
322 	"r1 = addr_space_cast(r1, 0x0, 0x1);"		\
323 	".8byte %[load_acquire_insn];"			\
324 	"r3 = %[" #DST "] ll;"				\
325 	"r3 = addr_space_cast(r3, 0x0, 0x1);"		\
326 	"*(" #SIZE " *)(r3 + 0) = r2;"			\
327 	:						\
328 	: __imm_addr(SRC),				\
329 	  __imm_insn(load_acquire_insn,			\
330 		     BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_LOAD_ACQ,	\
331 				   BPF_REG_2, BPF_REG_1, 0)),	\
332 	  __imm_addr(DST)				\
333 	: __clobber_all); }				\
334 
335 	LOAD_ACQUIRE_ARENA(B, u8, load_acquire8_value, load_acquire8_result)
336 	LOAD_ACQUIRE_ARENA(H, u16, load_acquire16_value,
337 			   load_acquire16_result)
338 	LOAD_ACQUIRE_ARENA(W, u32, load_acquire32_value,
339 			   load_acquire32_result)
340 	LOAD_ACQUIRE_ARENA(DW, u64, load_acquire64_value,
341 			   load_acquire64_result)
342 #undef LOAD_ACQUIRE_ARENA
343 
344 #endif
345 	return 0;
346 }
347 
348 #if __clang_major__ >= 18
349 __u8 __arena_global store_release8_result = 0;
350 __u16 __arena_global store_release16_result = 0;
351 __u32 __arena_global store_release32_result = 0;
352 __u64 __arena_global store_release64_result = 0;
353 #else
354 /* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
355  * this issue by defining the below variables as 64-bit.
356  */
357 __u64 __arena_global store_release8_result;
358 __u64 __arena_global store_release16_result;
359 __u64 __arena_global store_release32_result;
360 __u64 __arena_global store_release64_result;
361 #endif
362 
363 SEC("raw_tp/sys_enter")
364 int store_release(const void *ctx)
365 {
366 #if defined(ENABLE_ATOMICS_TESTS) &&		  \
367 	defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
368 	(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
369 
370 #define STORE_RELEASE_ARENA(SIZEOP, DST, VAL)	\
371 	{ asm volatile (			\
372 	"r1 = " VAL ";"				\
373 	"r2 = %[" #DST "] ll;"			\
374 	"r2 = addr_space_cast(r2, 0x0, 0x1);"	\
375 	".8byte %[store_release_insn];"		\
376 	:					\
377 	: __imm_addr(DST),			\
378 	  __imm_insn(store_release_insn,	\
379 		     BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_STORE_REL,	\
380 				   BPF_REG_2, BPF_REG_1, 0))	\
381 	: __clobber_all); }			\
382 
383 	STORE_RELEASE_ARENA(B, store_release8_result, "0x12")
384 	STORE_RELEASE_ARENA(H, store_release16_result, "0x1234")
385 	STORE_RELEASE_ARENA(W, store_release32_result, "0x12345678")
386 	STORE_RELEASE_ARENA(DW, store_release64_result,
387 			    "0x1234567890abcdef ll")
388 #undef STORE_RELEASE_ARENA
389 
390 #endif
391 	return 0;
392 }
393 
394 char _license[] SEC("license") = "GPL";
395