xref: /linux/tools/testing/selftests/bpf/progs/verifier_xadd.c (revision 5f60d5f6bbc12e782fac78110b0ee62698f3b576)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/xadd.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 struct {
9 	__uint(type, BPF_MAP_TYPE_HASH);
10 	__uint(max_entries, 1);
11 	__type(key, long long);
12 	__type(value, long long);
13 } map_hash_8b SEC(".maps");
14 
15 SEC("tc")
16 __description("xadd/w check unaligned stack")
17 __failure __msg("misaligned stack access off")
18 __naked void xadd_w_check_unaligned_stack(void)
19 {
20 	asm volatile ("					\
21 	r0 = 1;						\
22 	*(u64*)(r10 - 8) = r0;				\
23 	lock *(u32 *)(r10 - 7) += w0;			\
24 	r0 = *(u64*)(r10 - 8);				\
25 	exit;						\
26 "	::: __clobber_all);
27 }
28 
29 SEC("tc")
30 __description("xadd/w check unaligned map")
31 __failure __msg("misaligned value access off")
32 __naked void xadd_w_check_unaligned_map(void)
33 {
34 	asm volatile ("					\
35 	r1 = 0;						\
36 	*(u64*)(r10 - 8) = r1;				\
37 	r2 = r10;					\
38 	r2 += -8;					\
39 	r1 = %[map_hash_8b] ll;				\
40 	call %[bpf_map_lookup_elem];			\
41 	if r0 != 0 goto l0_%=;				\
42 	exit;						\
43 l0_%=:	r1 = 1;						\
44 	lock *(u32 *)(r0 + 3) += w1;			\
45 	r0 = *(u32*)(r0 + 3);				\
46 	exit;						\
47 "	:
48 	: __imm(bpf_map_lookup_elem),
49 	  __imm_addr(map_hash_8b)
50 	: __clobber_all);
51 }
52 
53 SEC("xdp")
54 __description("xadd/w check unaligned pkt")
55 __failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
56 __flag(BPF_F_ANY_ALIGNMENT)
57 __naked void xadd_w_check_unaligned_pkt(void)
58 {
59 	asm volatile ("					\
60 	r2 = *(u32*)(r1 + %[xdp_md_data]);		\
61 	r3 = *(u32*)(r1 + %[xdp_md_data_end]);		\
62 	r1 = r2;					\
63 	r1 += 8;					\
64 	if r1 < r3 goto l0_%=;				\
65 	r0 = 99;					\
66 	goto l1_%=;					\
67 l0_%=:	r0 = 1;						\
68 	r1 = 0;						\
69 	*(u32*)(r2 + 0) = r1;				\
70 	r1 = 0;						\
71 	*(u32*)(r2 + 3) = r1;				\
72 	lock *(u32 *)(r2 + 1) += w0;			\
73 	lock *(u32 *)(r2 + 2) += w0;			\
74 	r0 = *(u32*)(r2 + 1);				\
75 l1_%=:	exit;						\
76 "	:
77 	: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
78 	  __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
79 	: __clobber_all);
80 }
81 
82 SEC("tc")
83 __description("xadd/w check whether src/dst got mangled, 1")
84 __success __retval(3)
85 __naked void src_dst_got_mangled_1(void)
86 {
87 	asm volatile ("					\
88 	r0 = 1;						\
89 	r6 = r0;					\
90 	r7 = r10;					\
91 	*(u64*)(r10 - 8) = r0;				\
92 	lock *(u64 *)(r10 - 8) += r0;			\
93 	lock *(u64 *)(r10 - 8) += r0;			\
94 	if r6 != r0 goto l0_%=;				\
95 	if r7 != r10 goto l0_%=;			\
96 	r0 = *(u64*)(r10 - 8);				\
97 	exit;						\
98 l0_%=:	r0 = 42;					\
99 	exit;						\
100 "	::: __clobber_all);
101 }
102 
103 SEC("tc")
104 __description("xadd/w check whether src/dst got mangled, 2")
105 __success __retval(3)
106 __naked void src_dst_got_mangled_2(void)
107 {
108 	asm volatile ("					\
109 	r0 = 1;						\
110 	r6 = r0;					\
111 	r7 = r10;					\
112 	*(u32*)(r10 - 8) = r0;				\
113 	lock *(u32 *)(r10 - 8) += w0;			\
114 	lock *(u32 *)(r10 - 8) += w0;			\
115 	if r6 != r0 goto l0_%=;				\
116 	if r7 != r10 goto l0_%=;			\
117 	r0 = *(u32*)(r10 - 8);				\
118 	exit;						\
119 l0_%=:	r0 = 42;					\
120 	exit;						\
121 "	::: __clobber_all);
122 }
123 
124 char _license[] SEC("license") = "GPL";
125