xref: /linux/tools/testing/selftests/bpf/progs/verifier_ldsx.c (revision ae28ed4578e6d5a481e39c5a9827f27048661fdd)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include "bpf_misc.h"
6 #include "bpf_arena_common.h"
7 
8 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
9 	(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
10 	defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
11 	defined(__TARGET_ARCH_loongarch)) && \
12 	__clang_major__ >= 18
13 
14 struct {
15 	__uint(type, BPF_MAP_TYPE_ARENA);
16 	__uint(map_flags, BPF_F_MMAPABLE);
17 	__uint(max_entries, 1);
18 } arena SEC(".maps");
19 
20 SEC("socket")
21 __description("LDSX, S8")
22 __success __success_unpriv __retval(-2)
23 __naked void ldsx_s8(void)
24 {
25 	asm volatile (
26 	"r1 = 0x3fe;"
27 	"*(u64 *)(r10 - 8) = r1;"
28 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
29 	"r0 = *(s8 *)(r10 - 8);"
30 #else
31 	"r0 = *(s8 *)(r10 - 1);"
32 #endif
33 	"exit;"
34 	::: __clobber_all);
35 }
36 
37 SEC("socket")
38 __description("LDSX, S16")
39 __success __success_unpriv __retval(-2)
40 __naked void ldsx_s16(void)
41 {
42 	asm volatile (
43 	"r1 = 0x3fffe;"
44 	"*(u64 *)(r10 - 8) = r1;"
45 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
46 	"r0 = *(s16 *)(r10 - 8);"
47 #else
48 	"r0 = *(s16 *)(r10 - 2);"
49 #endif
50 	"exit;"
51 	::: __clobber_all);
52 }
53 
54 SEC("socket")
55 __description("LDSX, S32")
56 __success __success_unpriv __retval(-1)
57 __naked void ldsx_s32(void)
58 {
59 	asm volatile (
60 	"r1 = 0xfffffffe;"
61 	"*(u64 *)(r10 - 8) = r1;"
62 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
63 	"r0 = *(s32 *)(r10 - 8);"
64 #else
65 	"r0 = *(s32 *)(r10 - 4);"
66 #endif
67 	"r0 >>= 1;"
68 	"exit;"
69 	::: __clobber_all);
70 }
71 
72 SEC("socket")
73 __description("LDSX, S8 range checking, privileged")
74 __log_level(2) __success __retval(1)
75 __msg("R1=scalar(smin=smin32=-128,smax=smax32=127)")
76 __naked void ldsx_s8_range_priv(void)
77 {
78 	asm volatile (
79 	"call %[bpf_get_prandom_u32];"
80 	"*(u64 *)(r10 - 8) = r0;"
81 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
82 	"r1 = *(s8 *)(r10 - 8);"
83 #else
84 	"r1 = *(s8 *)(r10 - 1);"
85 #endif
86 	/* r1 with s8 range */
87 	"if r1 s> 0x7f goto l0_%=;"
88 	"if r1 s< -0x80 goto l0_%=;"
89 	"r0 = 1;"
90 "l1_%=:"
91 	"exit;"
92 "l0_%=:"
93 	"r0 = 2;"
94 	"goto l1_%=;"
95 	:
96 	: __imm(bpf_get_prandom_u32)
97 	: __clobber_all);
98 }
99 
100 SEC("socket")
101 __description("LDSX, S16 range checking")
102 __success __success_unpriv __retval(1)
103 __naked void ldsx_s16_range(void)
104 {
105 	asm volatile (
106 	"call %[bpf_get_prandom_u32];"
107 	"*(u64 *)(r10 - 8) = r0;"
108 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
109 	"r1 = *(s16 *)(r10 - 8);"
110 #else
111 	"r1 = *(s16 *)(r10 - 2);"
112 #endif
113 	/* r1 with s16 range */
114 	"if r1 s> 0x7fff goto l0_%=;"
115 	"if r1 s< -0x8000 goto l0_%=;"
116 	"r0 = 1;"
117 "l1_%=:"
118 	"exit;"
119 "l0_%=:"
120 	"r0 = 2;"
121 	"goto l1_%=;"
122 	:
123 	: __imm(bpf_get_prandom_u32)
124 	: __clobber_all);
125 }
126 
127 SEC("socket")
128 __description("LDSX, S32 range checking")
129 __success __success_unpriv __retval(1)
130 __naked void ldsx_s32_range(void)
131 {
132 	asm volatile (
133 	"call %[bpf_get_prandom_u32];"
134 	"*(u64 *)(r10 - 8) = r0;"
135 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
136 	"r1 = *(s32 *)(r10 - 8);"
137 #else
138 	"r1 = *(s32 *)(r10 - 4);"
139 #endif
140 	/* r1 with s16 range */
141 	"if r1 s> 0x7fffFFFF goto l0_%=;"
142 	"if r1 s< -0x80000000 goto l0_%=;"
143 	"r0 = 1;"
144 "l1_%=:"
145 	"exit;"
146 "l0_%=:"
147 	"r0 = 2;"
148 	"goto l1_%=;"
149 	:
150 	: __imm(bpf_get_prandom_u32)
151 	: __clobber_all);
152 }
153 
154 SEC("xdp")
155 __description("LDSX, xdp s32 xdp_md->data")
156 __failure __msg("invalid bpf_context access")
157 __naked void ldsx_ctx_1(void)
158 {
159 	asm volatile (
160 	"r2 = *(s32 *)(r1 + %[xdp_md_data]);"
161 	"r0 = 0;"
162 	"exit;"
163 	:
164 	: __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
165 	: __clobber_all);
166 }
167 
168 SEC("xdp")
169 __description("LDSX, xdp s32 xdp_md->data_end")
170 __failure __msg("invalid bpf_context access")
171 __naked void ldsx_ctx_2(void)
172 {
173 	asm volatile (
174 	"r2 = *(s32 *)(r1 + %[xdp_md_data_end]);"
175 	"r0 = 0;"
176 	"exit;"
177 	:
178 	: __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
179 	: __clobber_all);
180 }
181 
182 SEC("xdp")
183 __description("LDSX, xdp s32 xdp_md->data_meta")
184 __failure __msg("invalid bpf_context access")
185 __naked void ldsx_ctx_3(void)
186 {
187 	asm volatile (
188 	"r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);"
189 	"r0 = 0;"
190 	"exit;"
191 	:
192 	: __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
193 	: __clobber_all);
194 }
195 
196 SEC("tcx/ingress")
197 __description("LDSX, tcx s32 __sk_buff->data")
198 __failure __msg("invalid bpf_context access")
199 __naked void ldsx_ctx_4(void)
200 {
201 	asm volatile (
202 	"r2 = *(s32 *)(r1 + %[sk_buff_data]);"
203 	"r0 = 0;"
204 	"exit;"
205 	:
206 	: __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
207 	: __clobber_all);
208 }
209 
210 SEC("tcx/ingress")
211 __description("LDSX, tcx s32 __sk_buff->data_end")
212 __failure __msg("invalid bpf_context access")
213 __naked void ldsx_ctx_5(void)
214 {
215 	asm volatile (
216 	"r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
217 	"r0 = 0;"
218 	"exit;"
219 	:
220 	: __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
221 	: __clobber_all);
222 }
223 
224 SEC("tcx/ingress")
225 __description("LDSX, tcx s32 __sk_buff->data_meta")
226 __failure __msg("invalid bpf_context access")
227 __naked void ldsx_ctx_6(void)
228 {
229 	asm volatile (
230 	"r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);"
231 	"r0 = 0;"
232 	"exit;"
233 	:
234 	: __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
235 	: __clobber_all);
236 }
237 
238 SEC("flow_dissector")
239 __description("LDSX, flow_dissector s32 __sk_buff->data")
240 __failure __msg("invalid bpf_context access")
241 __naked void ldsx_ctx_7(void)
242 {
243 	asm volatile (
244 	"r2 = *(s32 *)(r1 + %[sk_buff_data]);"
245 	"r0 = 0;"
246 	"exit;"
247 	:
248 	: __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
249 	: __clobber_all);
250 }
251 
252 SEC("flow_dissector")
253 __description("LDSX, flow_dissector s32 __sk_buff->data_end")
254 __failure __msg("invalid bpf_context access")
255 __naked void ldsx_ctx_8(void)
256 {
257 	asm volatile (
258 	"r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
259 	"r0 = 0;"
260 	"exit;"
261 	:
262 	: __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
263 	: __clobber_all);
264 }
265 
266 SEC("syscall")
267 __description("Arena LDSX Disasm")
268 __success
269 __arch_x86_64
270 __jited("movslq	0x10(%rax,%r12), %r14")
271 __jited("movswq	0x18(%rax,%r12), %r14")
272 __jited("movsbq	0x20(%rax,%r12), %r14")
273 __jited("movslq	0x10(%rdi,%r12), %r15")
274 __jited("movswq	0x18(%rdi,%r12), %r15")
275 __jited("movsbq	0x20(%rdi,%r12), %r15")
276 __arch_arm64
277 __jited("add	x11, x7, x28")
278 __jited("ldrsw	x21, [x11, #0x10]")
279 __jited("add	x11, x7, x28")
280 __jited("ldrsh	x21, [x11, #0x18]")
281 __jited("add	x11, x7, x28")
282 __jited("ldrsb	x21, [x11, #0x20]")
283 __jited("add	x11, x0, x28")
284 __jited("ldrsw	x22, [x11, #0x10]")
285 __jited("add	x11, x0, x28")
286 __jited("ldrsh	x22, [x11, #0x18]")
287 __jited("add	x11, x0, x28")
288 __jited("ldrsb	x22, [x11, #0x20]")
289 __naked void arena_ldsx_disasm(void *ctx)
290 {
291 	asm volatile (
292 	"r1 = %[arena] ll;"
293 	"r2 = 0;"
294 	"r3 = 1;"
295 	"r4 = %[numa_no_node];"
296 	"r5 = 0;"
297 	"call %[bpf_arena_alloc_pages];"
298 	"r0 = addr_space_cast(r0, 0x0, 0x1);"
299 	"r1 = r0;"
300 	"r8 = *(s32 *)(r0 + 16);"
301 	"r8 = *(s16 *)(r0 + 24);"
302 	"r8 = *(s8  *)(r0 + 32);"
303 	"r9 = *(s32 *)(r1 + 16);"
304 	"r9 = *(s16 *)(r1 + 24);"
305 	"r9 = *(s8  *)(r1 + 32);"
306 	"r0 = 0;"
307 	"exit;"
308 	:: __imm(bpf_arena_alloc_pages),
309 	   __imm_addr(arena),
310 	   __imm_const(numa_no_node, NUMA_NO_NODE)
311 	:  __clobber_all
312 	);
313 }
314 
315 SEC("syscall")
316 __description("Arena LDSX Exception")
317 __success __retval(0)
318 __arch_x86_64
319 __arch_arm64
320 __naked void arena_ldsx_exception(void *ctx)
321 {
322 	asm volatile (
323 	"r1 = %[arena] ll;"
324 	"r0 = 0xdeadbeef;"
325 	"r0 = addr_space_cast(r0, 0x0, 0x1);"
326 	"r1 = 0x3fe;"
327 	"*(u64 *)(r0 + 0) = r1;"
328 	"r0 = *(s8 *)(r0 + 0);"
329 	"exit;"
330 	:
331 	:  __imm_addr(arena)
332 	:  __clobber_all
333 	);
334 }
335 
336 SEC("syscall")
337 __description("Arena LDSX, S8")
338 __success __retval(-1)
339 __arch_x86_64
340 __arch_arm64
341 __naked void arena_ldsx_s8(void *ctx)
342 {
343 	asm volatile (
344 	"r1 = %[arena] ll;"
345 	"r2 = 0;"
346 	"r3 = 1;"
347 	"r4 = %[numa_no_node];"
348 	"r5 = 0;"
349 	"call %[bpf_arena_alloc_pages];"
350 	"r0 = addr_space_cast(r0, 0x0, 0x1);"
351 	"r1 = 0x3fe;"
352 	"*(u64 *)(r0 + 0) = r1;"
353 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
354 	"r0 = *(s8 *)(r0 + 0);"
355 #else
356 	"r0 = *(s8 *)(r0 + 7);"
357 #endif
358 	"r0 >>= 1;"
359 	"exit;"
360 	:: __imm(bpf_arena_alloc_pages),
361 	   __imm_addr(arena),
362 	   __imm_const(numa_no_node, NUMA_NO_NODE)
363 	:  __clobber_all
364 	);
365 }
366 
367 SEC("syscall")
368 __description("Arena LDSX, S16")
369 __success __retval(-1)
370 __arch_x86_64
371 __arch_arm64
372 __naked void arena_ldsx_s16(void *ctx)
373 {
374 	asm volatile (
375 	"r1 = %[arena] ll;"
376 	"r2 = 0;"
377 	"r3 = 1;"
378 	"r4 = %[numa_no_node];"
379 	"r5 = 0;"
380 	"call %[bpf_arena_alloc_pages];"
381 	"r0 = addr_space_cast(r0, 0x0, 0x1);"
382 	"r1 = 0x3fffe;"
383 	"*(u64 *)(r0 + 0) = r1;"
384 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
385 	"r0 = *(s16 *)(r0 + 0);"
386 #else
387 	"r0 = *(s16 *)(r0 + 6);"
388 #endif
389 	"r0 >>= 1;"
390 	"exit;"
391 	:: __imm(bpf_arena_alloc_pages),
392 	   __imm_addr(arena),
393 	   __imm_const(numa_no_node, NUMA_NO_NODE)
394 	:  __clobber_all
395 	);
396 }
397 
398 SEC("syscall")
399 __description("Arena LDSX, S32")
400 __success __retval(-1)
401 __arch_x86_64
402 __arch_arm64
403 __naked void arena_ldsx_s32(void *ctx)
404 {
405 	asm volatile (
406 	"r1 = %[arena] ll;"
407 	"r2 = 0;"
408 	"r3 = 1;"
409 	"r4 = %[numa_no_node];"
410 	"r5 = 0;"
411 	"call %[bpf_arena_alloc_pages];"
412 	"r0 = addr_space_cast(r0, 0x0, 0x1);"
413 	"r1 = 0xfffffffe;"
414 	"*(u64 *)(r0 + 0) = r1;"
415 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
416 	"r0 = *(s32 *)(r0 + 0);"
417 #else
418 	"r0 = *(s32 *)(r0 + 4);"
419 #endif
420 	"r0 >>= 1;"
421 	"exit;"
422 	:: __imm(bpf_arena_alloc_pages),
423 	   __imm_addr(arena),
424 	   __imm_const(numa_no_node, NUMA_NO_NODE)
425 	:  __clobber_all
426 	);
427 }
428 
429 /* to retain debug info for BTF generation */
430 void kfunc_root(void)
431 {
432 	bpf_arena_alloc_pages(0, 0, 0, 0, 0);
433 }
434 
435 #else
436 
437 SEC("socket")
438 __description("cpuv4 is not supported by compiler or jit, use a dummy test")
439 __success
440 int dummy_test(void)
441 {
442 	return 0;
443 }
444 
445 #endif
446 
447 char _license[] SEC("license") = "GPL";
448