xref: /linux/tools/testing/selftests/bpf/progs/arena_atomics.c (revision ae28ed4578e6d5a481e39c5a9827f27048661fdd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <stdbool.h>
7 #include <stdatomic.h>
8 #include "bpf_arena_common.h"
9 #include "../../../include/linux/filter.h"
10 #include "bpf_misc.h"
11 
12 struct {
13 	__uint(type, BPF_MAP_TYPE_ARENA);
14 	__uint(map_flags, BPF_F_MMAPABLE);
15 	__uint(max_entries, 10); /* number of pages */
16 #ifdef __TARGET_ARCH_arm64
17 	__ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
18 #else
19 	__ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
20 #endif
21 } arena SEC(".maps");
22 
23 #if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
24 bool skip_all_tests __attribute((__section__(".data"))) = false;
25 #else
26 bool skip_all_tests = true;
27 #endif
28 
29 #if defined(ENABLE_ATOMICS_TESTS) &&		  \
30 	defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
31 	(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
32 	 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
33 bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
34 #else
35 bool skip_lacq_srel_tests = true;
36 #endif
37 
38 __u32 pid = 0;
39 
40 __u64 __arena_global add64_value = 1;
41 __u64 __arena_global add64_result = 0;
42 __u32 __arena_global add32_value = 1;
43 __u32 __arena_global add32_result = 0;
44 __u64 __arena_global add_stack_value_copy = 0;
45 __u64 __arena_global add_stack_result = 0;
46 __u64 __arena_global add_noreturn_value = 1;
47 
48 SEC("raw_tp/sys_enter")
49 int add(const void *ctx)
50 {
51 	if (pid != (bpf_get_current_pid_tgid() >> 32))
52 		return 0;
53 #ifdef ENABLE_ATOMICS_TESTS
54 	__u64 add_stack_value = 1;
55 
56 	add64_result = __sync_fetch_and_add(&add64_value, 2);
57 	add32_result = __sync_fetch_and_add(&add32_value, 2);
58 	add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
59 	add_stack_value_copy = add_stack_value;
60 	__sync_fetch_and_add(&add_noreturn_value, 2);
61 #endif
62 
63 	return 0;
64 }
65 
66 __s64 __arena_global sub64_value = 1;
67 __s64 __arena_global sub64_result = 0;
68 __s32 __arena_global sub32_value = 1;
69 __s32 __arena_global sub32_result = 0;
70 __s64 __arena_global sub_stack_value_copy = 0;
71 __s64 __arena_global sub_stack_result = 0;
72 __s64 __arena_global sub_noreturn_value = 1;
73 
74 SEC("raw_tp/sys_enter")
75 int sub(const void *ctx)
76 {
77 	if (pid != (bpf_get_current_pid_tgid() >> 32))
78 		return 0;
79 #ifdef ENABLE_ATOMICS_TESTS
80 	__u64 sub_stack_value = 1;
81 
82 	sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
83 	sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
84 	sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
85 	sub_stack_value_copy = sub_stack_value;
86 	__sync_fetch_and_sub(&sub_noreturn_value, 2);
87 #endif
88 
89 	return 0;
90 }
91 
92 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
93 _Atomic __u64 __arena_global and64_value = (0x110ull << 32);
94 _Atomic __u32 __arena_global and32_value = 0x110;
95 #else
96 __u64 __arena_global and64_value = (0x110ull << 32);
97 __u32 __arena_global and32_value = 0x110;
98 #endif
99 
100 SEC("raw_tp/sys_enter")
101 int and(const void *ctx)
102 {
103 	if (pid != (bpf_get_current_pid_tgid() >> 32))
104 		return 0;
105 #ifdef ENABLE_ATOMICS_TESTS
106 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
107 	__c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
108 	__c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
109 #else
110 	__sync_fetch_and_and(&and64_value, 0x011ull << 32);
111 	__sync_fetch_and_and(&and32_value, 0x011);
112 #endif
113 #endif
114 
115 	return 0;
116 }
117 
118 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
119 _Atomic __u32 __arena_global or32_value = 0x110;
120 _Atomic __u64 __arena_global or64_value = (0x110ull << 32);
121 #else
122 __u32 __arena_global or32_value = 0x110;
123 __u64 __arena_global or64_value = (0x110ull << 32);
124 #endif
125 
126 SEC("raw_tp/sys_enter")
127 int or(const void *ctx)
128 {
129 	if (pid != (bpf_get_current_pid_tgid() >> 32))
130 		return 0;
131 #ifdef ENABLE_ATOMICS_TESTS
132 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
133 	__c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
134 	__c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
135 #else
136 	__sync_fetch_and_or(&or64_value, 0x011ull << 32);
137 	__sync_fetch_and_or(&or32_value, 0x011);
138 #endif
139 #endif
140 
141 	return 0;
142 }
143 
144 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
145 _Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
146 _Atomic __u32 __arena_global xor32_value = 0x110;
147 #else
148 __u64 __arena_global xor64_value = (0x110ull << 32);
149 __u32 __arena_global xor32_value = 0x110;
150 #endif
151 
152 SEC("raw_tp/sys_enter")
153 int xor(const void *ctx)
154 {
155 	if (pid != (bpf_get_current_pid_tgid() >> 32))
156 		return 0;
157 #ifdef ENABLE_ATOMICS_TESTS
158 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
159 	__c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
160 	__c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
161 #else
162 	__sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
163 	__sync_fetch_and_xor(&xor32_value, 0x011);
164 #endif
165 #endif
166 
167 	return 0;
168 }
169 
170 __u32 __arena_global cmpxchg32_value = 1;
171 __u32 __arena_global cmpxchg32_result_fail = 0;
172 __u32 __arena_global cmpxchg32_result_succeed = 0;
173 __u64 __arena_global cmpxchg64_value = 1;
174 __u64 __arena_global cmpxchg64_result_fail = 0;
175 __u64 __arena_global cmpxchg64_result_succeed = 0;
176 
177 SEC("raw_tp/sys_enter")
178 int cmpxchg(const void *ctx)
179 {
180 	if (pid != (bpf_get_current_pid_tgid() >> 32))
181 		return 0;
182 #ifdef ENABLE_ATOMICS_TESTS
183 	cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
184 	cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
185 
186 	cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
187 	cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
188 #endif
189 
190 	return 0;
191 }
192 
193 __u64 __arena_global xchg64_value = 1;
194 __u64 __arena_global xchg64_result = 0;
195 __u32 __arena_global xchg32_value = 1;
196 __u32 __arena_global xchg32_result = 0;
197 
198 SEC("raw_tp/sys_enter")
199 int xchg(const void *ctx)
200 {
201 	if (pid != (bpf_get_current_pid_tgid() >> 32))
202 		return 0;
203 #ifdef ENABLE_ATOMICS_TESTS
204 	__u64 val64 = 2;
205 	__u32 val32 = 2;
206 
207 	xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
208 	xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
209 #endif
210 
211 	return 0;
212 }
213 
214 __u64 __arena_global uaf_sink;
215 volatile __u64 __arena_global uaf_recovery_fails;
216 
217 SEC("syscall")
218 int uaf(const void *ctx)
219 {
220 	if (pid != (bpf_get_current_pid_tgid() >> 32))
221 		return 0;
222 #if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
223     !defined(__TARGET_ARCH_x86)
224 	__u32 __arena *page32;
225 	__u64 __arena *page64;
226 	void __arena *page;
227 
228 	page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
229 	bpf_arena_free_pages(&arena, page, 1);
230 	uaf_recovery_fails = 24;
231 
232 	page32 = (__u32 __arena *)page;
233 	uaf_sink += __sync_fetch_and_add(page32, 1);
234 	uaf_recovery_fails -= 1;
235 	__sync_add_and_fetch(page32, 1);
236 	uaf_recovery_fails -= 1;
237 	uaf_sink += __sync_fetch_and_sub(page32, 1);
238 	uaf_recovery_fails -= 1;
239 	__sync_sub_and_fetch(page32, 1);
240 	uaf_recovery_fails -= 1;
241 	uaf_sink += __sync_fetch_and_and(page32, 1);
242 	uaf_recovery_fails -= 1;
243 	__sync_and_and_fetch(page32, 1);
244 	uaf_recovery_fails -= 1;
245 	uaf_sink += __sync_fetch_and_or(page32, 1);
246 	uaf_recovery_fails -= 1;
247 	__sync_or_and_fetch(page32, 1);
248 	uaf_recovery_fails -= 1;
249 	uaf_sink += __sync_fetch_and_xor(page32, 1);
250 	uaf_recovery_fails -= 1;
251 	__sync_xor_and_fetch(page32, 1);
252 	uaf_recovery_fails -= 1;
253 	uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
254 	uaf_recovery_fails -= 1;
255 	uaf_sink += __sync_lock_test_and_set(page32, 1);
256 	uaf_recovery_fails -= 1;
257 
258 	page64 = (__u64 __arena *)page;
259 	uaf_sink += __sync_fetch_and_add(page64, 1);
260 	uaf_recovery_fails -= 1;
261 	__sync_add_and_fetch(page64, 1);
262 	uaf_recovery_fails -= 1;
263 	uaf_sink += __sync_fetch_and_sub(page64, 1);
264 	uaf_recovery_fails -= 1;
265 	__sync_sub_and_fetch(page64, 1);
266 	uaf_recovery_fails -= 1;
267 	uaf_sink += __sync_fetch_and_and(page64, 1);
268 	uaf_recovery_fails -= 1;
269 	__sync_and_and_fetch(page64, 1);
270 	uaf_recovery_fails -= 1;
271 	uaf_sink += __sync_fetch_and_or(page64, 1);
272 	uaf_recovery_fails -= 1;
273 	__sync_or_and_fetch(page64, 1);
274 	uaf_recovery_fails -= 1;
275 	uaf_sink += __sync_fetch_and_xor(page64, 1);
276 	uaf_recovery_fails -= 1;
277 	__sync_xor_and_fetch(page64, 1);
278 	uaf_recovery_fails -= 1;
279 	uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
280 	uaf_recovery_fails -= 1;
281 	uaf_sink += __sync_lock_test_and_set(page64, 1);
282 	uaf_recovery_fails -= 1;
283 #endif
284 
285 	return 0;
286 }
287 
288 #if __clang_major__ >= 18
289 __u8 __arena_global load_acquire8_value = 0x12;
290 __u16 __arena_global load_acquire16_value = 0x1234;
291 __u32 __arena_global load_acquire32_value = 0x12345678;
292 __u64 __arena_global load_acquire64_value = 0x1234567890abcdef;
293 
294 __u8 __arena_global load_acquire8_result = 0;
295 __u16 __arena_global load_acquire16_result = 0;
296 __u32 __arena_global load_acquire32_result = 0;
297 __u64 __arena_global load_acquire64_result = 0;
298 #else
299 /* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
300  * this issue by defining the below variables as 64-bit.
301  */
302 __u64 __arena_global load_acquire8_value;
303 __u64 __arena_global load_acquire16_value;
304 __u64 __arena_global load_acquire32_value;
305 __u64 __arena_global load_acquire64_value;
306 
307 __u64 __arena_global load_acquire8_result;
308 __u64 __arena_global load_acquire16_result;
309 __u64 __arena_global load_acquire32_result;
310 __u64 __arena_global load_acquire64_result;
311 #endif
312 
313 SEC("raw_tp/sys_enter")
314 int load_acquire(const void *ctx)
315 {
316 #if defined(ENABLE_ATOMICS_TESTS) &&		  \
317 	defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
318 	(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
319 	 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
320 
321 #define LOAD_ACQUIRE_ARENA(SIZEOP, SIZE, SRC, DST)	\
322 	{ asm volatile (				\
323 	"r1 = %[" #SRC "] ll;"				\
324 	"r1 = addr_space_cast(r1, 0x0, 0x1);"		\
325 	".8byte %[load_acquire_insn];"			\
326 	"r3 = %[" #DST "] ll;"				\
327 	"r3 = addr_space_cast(r3, 0x0, 0x1);"		\
328 	"*(" #SIZE " *)(r3 + 0) = r2;"			\
329 	:						\
330 	: __imm_addr(SRC),				\
331 	  __imm_insn(load_acquire_insn,			\
332 		     BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_LOAD_ACQ,	\
333 				   BPF_REG_2, BPF_REG_1, 0)),	\
334 	  __imm_addr(DST)				\
335 	: __clobber_all); }				\
336 
337 	LOAD_ACQUIRE_ARENA(B, u8, load_acquire8_value, load_acquire8_result)
338 	LOAD_ACQUIRE_ARENA(H, u16, load_acquire16_value,
339 			   load_acquire16_result)
340 	LOAD_ACQUIRE_ARENA(W, u32, load_acquire32_value,
341 			   load_acquire32_result)
342 	LOAD_ACQUIRE_ARENA(DW, u64, load_acquire64_value,
343 			   load_acquire64_result)
344 #undef LOAD_ACQUIRE_ARENA
345 
346 #endif
347 	return 0;
348 }
349 
350 #if __clang_major__ >= 18
351 __u8 __arena_global store_release8_result = 0;
352 __u16 __arena_global store_release16_result = 0;
353 __u32 __arena_global store_release32_result = 0;
354 __u64 __arena_global store_release64_result = 0;
355 #else
356 /* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
357  * this issue by defining the below variables as 64-bit.
358  */
359 __u64 __arena_global store_release8_result;
360 __u64 __arena_global store_release16_result;
361 __u64 __arena_global store_release32_result;
362 __u64 __arena_global store_release64_result;
363 #endif
364 
365 SEC("raw_tp/sys_enter")
366 int store_release(const void *ctx)
367 {
368 #if defined(ENABLE_ATOMICS_TESTS) &&		  \
369 	defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
370 	(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
371 	 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
372 
373 #define STORE_RELEASE_ARENA(SIZEOP, DST, VAL)	\
374 	{ asm volatile (			\
375 	"r1 = " VAL ";"				\
376 	"r2 = %[" #DST "] ll;"			\
377 	"r2 = addr_space_cast(r2, 0x0, 0x1);"	\
378 	".8byte %[store_release_insn];"		\
379 	:					\
380 	: __imm_addr(DST),			\
381 	  __imm_insn(store_release_insn,	\
382 		     BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_STORE_REL,	\
383 				   BPF_REG_2, BPF_REG_1, 0))	\
384 	: __clobber_all); }			\
385 
386 	STORE_RELEASE_ARENA(B, store_release8_result, "0x12")
387 	STORE_RELEASE_ARENA(H, store_release16_result, "0x1234")
388 	STORE_RELEASE_ARENA(W, store_release32_result, "0x12345678")
389 	STORE_RELEASE_ARENA(DW, store_release64_result,
390 			    "0x1234567890abcdef ll")
391 #undef STORE_RELEASE_ARENA
392 
393 #endif
394 	return 0;
395 }
396 
397 char _license[] SEC("license") = "GPL";
398