xref: /linux/tools/testing/selftests/bpf/progs/verifier_ldsx.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "bpf_misc.h"
6 
7 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
8 	(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
9 	defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
10 	defined(__TARGET_ARCH_loongarch)) && \
11 	__clang_major__ >= 18
12 
13 SEC("socket")
14 __description("LDSX, S8")
15 __success __success_unpriv __retval(-2)
16 __naked void ldsx_s8(void)
17 {
18 	asm volatile (
19 	"r1 = 0x3fe;"
20 	"*(u64 *)(r10 - 8) = r1;"
21 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
22 	"r0 = *(s8 *)(r10 - 8);"
23 #else
24 	"r0 = *(s8 *)(r10 - 1);"
25 #endif
26 	"exit;"
27 	::: __clobber_all);
28 }
29 
30 SEC("socket")
31 __description("LDSX, S16")
32 __success __success_unpriv __retval(-2)
33 __naked void ldsx_s16(void)
34 {
35 	asm volatile (
36 	"r1 = 0x3fffe;"
37 	"*(u64 *)(r10 - 8) = r1;"
38 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
39 	"r0 = *(s16 *)(r10 - 8);"
40 #else
41 	"r0 = *(s16 *)(r10 - 2);"
42 #endif
43 	"exit;"
44 	::: __clobber_all);
45 }
46 
47 SEC("socket")
48 __description("LDSX, S32")
49 __success __success_unpriv __retval(-1)
50 __naked void ldsx_s32(void)
51 {
52 	asm volatile (
53 	"r1 = 0xfffffffe;"
54 	"*(u64 *)(r10 - 8) = r1;"
55 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
56 	"r0 = *(s32 *)(r10 - 8);"
57 #else
58 	"r0 = *(s32 *)(r10 - 4);"
59 #endif
60 	"r0 >>= 1;"
61 	"exit;"
62 	::: __clobber_all);
63 }
64 
65 SEC("socket")
66 __description("LDSX, S8 range checking, privileged")
67 __log_level(2) __success __retval(1)
68 __msg("R1_w=scalar(smin=smin32=-128,smax=smax32=127)")
69 __naked void ldsx_s8_range_priv(void)
70 {
71 	asm volatile (
72 	"call %[bpf_get_prandom_u32];"
73 	"*(u64 *)(r10 - 8) = r0;"
74 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
75 	"r1 = *(s8 *)(r10 - 8);"
76 #else
77 	"r1 = *(s8 *)(r10 - 1);"
78 #endif
79 	/* r1 with s8 range */
80 	"if r1 s> 0x7f goto l0_%=;"
81 	"if r1 s< -0x80 goto l0_%=;"
82 	"r0 = 1;"
83 "l1_%=:"
84 	"exit;"
85 "l0_%=:"
86 	"r0 = 2;"
87 	"goto l1_%=;"
88 	:
89 	: __imm(bpf_get_prandom_u32)
90 	: __clobber_all);
91 }
92 
93 SEC("socket")
94 __description("LDSX, S16 range checking")
95 __success __success_unpriv __retval(1)
96 __naked void ldsx_s16_range(void)
97 {
98 	asm volatile (
99 	"call %[bpf_get_prandom_u32];"
100 	"*(u64 *)(r10 - 8) = r0;"
101 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
102 	"r1 = *(s16 *)(r10 - 8);"
103 #else
104 	"r1 = *(s16 *)(r10 - 2);"
105 #endif
106 	/* r1 with s16 range */
107 	"if r1 s> 0x7fff goto l0_%=;"
108 	"if r1 s< -0x8000 goto l0_%=;"
109 	"r0 = 1;"
110 "l1_%=:"
111 	"exit;"
112 "l0_%=:"
113 	"r0 = 2;"
114 	"goto l1_%=;"
115 	:
116 	: __imm(bpf_get_prandom_u32)
117 	: __clobber_all);
118 }
119 
120 SEC("socket")
121 __description("LDSX, S32 range checking")
122 __success __success_unpriv __retval(1)
123 __naked void ldsx_s32_range(void)
124 {
125 	asm volatile (
126 	"call %[bpf_get_prandom_u32];"
127 	"*(u64 *)(r10 - 8) = r0;"
128 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
129 	"r1 = *(s32 *)(r10 - 8);"
130 #else
131 	"r1 = *(s32 *)(r10 - 4);"
132 #endif
133 	/* r1 with s16 range */
134 	"if r1 s> 0x7fffFFFF goto l0_%=;"
135 	"if r1 s< -0x80000000 goto l0_%=;"
136 	"r0 = 1;"
137 "l1_%=:"
138 	"exit;"
139 "l0_%=:"
140 	"r0 = 2;"
141 	"goto l1_%=;"
142 	:
143 	: __imm(bpf_get_prandom_u32)
144 	: __clobber_all);
145 }
146 
147 SEC("xdp")
148 __description("LDSX, xdp s32 xdp_md->data")
149 __failure __msg("invalid bpf_context access")
150 __naked void ldsx_ctx_1(void)
151 {
152 	asm volatile (
153 	"r2 = *(s32 *)(r1 + %[xdp_md_data]);"
154 	"r0 = 0;"
155 	"exit;"
156 	:
157 	: __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
158 	: __clobber_all);
159 }
160 
161 SEC("xdp")
162 __description("LDSX, xdp s32 xdp_md->data_end")
163 __failure __msg("invalid bpf_context access")
164 __naked void ldsx_ctx_2(void)
165 {
166 	asm volatile (
167 	"r2 = *(s32 *)(r1 + %[xdp_md_data_end]);"
168 	"r0 = 0;"
169 	"exit;"
170 	:
171 	: __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
172 	: __clobber_all);
173 }
174 
175 SEC("xdp")
176 __description("LDSX, xdp s32 xdp_md->data_meta")
177 __failure __msg("invalid bpf_context access")
178 __naked void ldsx_ctx_3(void)
179 {
180 	asm volatile (
181 	"r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);"
182 	"r0 = 0;"
183 	"exit;"
184 	:
185 	: __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
186 	: __clobber_all);
187 }
188 
189 SEC("tcx/ingress")
190 __description("LDSX, tcx s32 __sk_buff->data")
191 __failure __msg("invalid bpf_context access")
192 __naked void ldsx_ctx_4(void)
193 {
194 	asm volatile (
195 	"r2 = *(s32 *)(r1 + %[sk_buff_data]);"
196 	"r0 = 0;"
197 	"exit;"
198 	:
199 	: __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
200 	: __clobber_all);
201 }
202 
203 SEC("tcx/ingress")
204 __description("LDSX, tcx s32 __sk_buff->data_end")
205 __failure __msg("invalid bpf_context access")
206 __naked void ldsx_ctx_5(void)
207 {
208 	asm volatile (
209 	"r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
210 	"r0 = 0;"
211 	"exit;"
212 	:
213 	: __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
214 	: __clobber_all);
215 }
216 
217 SEC("tcx/ingress")
218 __description("LDSX, tcx s32 __sk_buff->data_meta")
219 __failure __msg("invalid bpf_context access")
220 __naked void ldsx_ctx_6(void)
221 {
222 	asm volatile (
223 	"r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);"
224 	"r0 = 0;"
225 	"exit;"
226 	:
227 	: __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
228 	: __clobber_all);
229 }
230 
231 SEC("flow_dissector")
232 __description("LDSX, flow_dissector s32 __sk_buff->data")
233 __failure __msg("invalid bpf_context access")
234 __naked void ldsx_ctx_7(void)
235 {
236 	asm volatile (
237 	"r2 = *(s32 *)(r1 + %[sk_buff_data]);"
238 	"r0 = 0;"
239 	"exit;"
240 	:
241 	: __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
242 	: __clobber_all);
243 }
244 
245 SEC("flow_dissector")
246 __description("LDSX, flow_dissector s32 __sk_buff->data_end")
247 __failure __msg("invalid bpf_context access")
248 __naked void ldsx_ctx_8(void)
249 {
250 	asm volatile (
251 	"r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
252 	"r0 = 0;"
253 	"exit;"
254 	:
255 	: __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
256 	: __clobber_all);
257 }
258 
259 #else
260 
261 SEC("socket")
262 __description("cpuv4 is not supported by compiler or jit, use a dummy test")
263 __success
264 int dummy_test(void)
265 {
266 	return 0;
267 }
268 
269 #endif
270 
271 char _license[] SEC("license") = "GPL";
272