xref: /linux/tools/testing/selftests/bpf/progs/verifier_spill_fill.c (revision 8f109e91b852f159b917f5c565bcf43c26d974e2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 #include <../../../tools/include/linux/filter.h>
8 
9 struct {
10 	__uint(type, BPF_MAP_TYPE_RINGBUF);
11 	__uint(max_entries, 4096);
12 } map_ringbuf SEC(".maps");
13 
14 SEC("socket")
15 __description("check valid spill/fill")
16 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
17 __retval(POINTER_VALUE)
18 __naked void check_valid_spill_fill(void)
19 {
20 	asm volatile ("					\
21 	/* spill R1(ctx) into stack */			\
22 	*(u64*)(r10 - 8) = r1;				\
23 	/* fill it back into R2 */			\
24 	r2 = *(u64*)(r10 - 8);				\
25 	/* should be able to access R0 = *(R2 + 8) */	\
26 	/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\
27 	r0 = r2;					\
28 	exit;						\
29 "	::: __clobber_all);
30 }
31 
32 SEC("socket")
33 __description("check valid spill/fill, skb mark")
34 __success __success_unpriv __retval(0)
35 __naked void valid_spill_fill_skb_mark(void)
36 {
37 	asm volatile ("					\
38 	r6 = r1;					\
39 	*(u64*)(r10 - 8) = r6;				\
40 	r0 = *(u64*)(r10 - 8);				\
41 	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
42 	exit;						\
43 "	:
44 	: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
45 	: __clobber_all);
46 }
47 
48 SEC("socket")
49 __description("check valid spill/fill, ptr to mem")
50 __success __success_unpriv __retval(0)
51 __naked void spill_fill_ptr_to_mem(void)
52 {
53 	asm volatile ("					\
54 	/* reserve 8 byte ringbuf memory */		\
55 	r1 = 0;						\
56 	*(u64*)(r10 - 8) = r1;				\
57 	r1 = %[map_ringbuf] ll;				\
58 	r2 = 8;						\
59 	r3 = 0;						\
60 	call %[bpf_ringbuf_reserve];			\
61 	/* store a pointer to the reserved memory in R6 */\
62 	r6 = r0;					\
63 	/* check whether the reservation was successful */\
64 	if r0 == 0 goto l0_%=;				\
65 	/* spill R6(mem) into the stack */		\
66 	*(u64*)(r10 - 8) = r6;				\
67 	/* fill it back in R7 */			\
68 	r7 = *(u64*)(r10 - 8);				\
69 	/* should be able to access *(R7) = 0 */	\
70 	r1 = 0;						\
71 	*(u64*)(r7 + 0) = r1;				\
72 	/* submit the reserved ringbuf memory */	\
73 	r1 = r7;					\
74 	r2 = 0;						\
75 	call %[bpf_ringbuf_submit];			\
76 l0_%=:	r0 = 0;						\
77 	exit;						\
78 "	:
79 	: __imm(bpf_ringbuf_reserve),
80 	  __imm(bpf_ringbuf_submit),
81 	  __imm_addr(map_ringbuf)
82 	: __clobber_all);
83 }
84 
85 SEC("socket")
86 __description("check with invalid reg offset 0")
87 __failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited")
88 __failure_unpriv
89 __naked void with_invalid_reg_offset_0(void)
90 {
91 	asm volatile ("					\
92 	/* reserve 8 byte ringbuf memory */		\
93 	r1 = 0;						\
94 	*(u64*)(r10 - 8) = r1;				\
95 	r1 = %[map_ringbuf] ll;				\
96 	r2 = 8;						\
97 	r3 = 0;						\
98 	call %[bpf_ringbuf_reserve];			\
99 	/* store a pointer to the reserved memory in R6 */\
100 	r6 = r0;					\
101 	/* add invalid offset to memory or NULL */	\
102 	r0 += 1;					\
103 	/* check whether the reservation was successful */\
104 	if r0 == 0 goto l0_%=;				\
105 	/* should not be able to access *(R7) = 0 */	\
106 	r1 = 0;						\
107 	*(u32*)(r6 + 0) = r1;				\
108 	/* submit the reserved ringbuf memory */	\
109 	r1 = r6;					\
110 	r2 = 0;						\
111 	call %[bpf_ringbuf_submit];			\
112 l0_%=:	r0 = 0;						\
113 	exit;						\
114 "	:
115 	: __imm(bpf_ringbuf_reserve),
116 	  __imm(bpf_ringbuf_submit),
117 	  __imm_addr(map_ringbuf)
118 	: __clobber_all);
119 }
120 
121 SEC("socket")
122 __description("check corrupted spill/fill")
123 __failure __msg("R0 invalid mem access 'scalar'")
124 __msg_unpriv("attempt to corrupt spilled")
125 __flag(BPF_F_ANY_ALIGNMENT)
126 __naked void check_corrupted_spill_fill(void)
127 {
128 	asm volatile ("					\
129 	/* spill R1(ctx) into stack */			\
130 	*(u64*)(r10 - 8) = r1;				\
131 	/* mess up with R1 pointer on stack */		\
132 	r0 = 0x23;					\
133 	*(u8*)(r10 - 7) = r0;				\
134 	/* fill back into R0 is fine for priv.		\
135 	 * R0 now becomes SCALAR_VALUE.			\
136 	 */						\
137 	r0 = *(u64*)(r10 - 8);				\
138 	/* Load from R0 should fail. */			\
139 	r0 = *(u64*)(r0 + 8);				\
140 	exit;						\
141 "	::: __clobber_all);
142 }
143 
144 SEC("socket")
145 __description("check corrupted spill/fill, LSB")
146 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
147 __retval(POINTER_VALUE)
148 __naked void check_corrupted_spill_fill_lsb(void)
149 {
150 	asm volatile ("					\
151 	*(u64*)(r10 - 8) = r1;				\
152 	r0 = 0xcafe;					\
153 	*(u16*)(r10 - 8) = r0;				\
154 	r0 = *(u64*)(r10 - 8);				\
155 	exit;						\
156 "	::: __clobber_all);
157 }
158 
159 SEC("socket")
160 __description("check corrupted spill/fill, MSB")
161 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
162 __retval(POINTER_VALUE)
163 __naked void check_corrupted_spill_fill_msb(void)
164 {
165 	asm volatile ("					\
166 	*(u64*)(r10 - 8) = r1;				\
167 	r0 = 0x12345678;				\
168 	*(u32*)(r10 - 4) = r0;				\
169 	r0 = *(u64*)(r10 - 8);				\
170 	exit;						\
171 "	::: __clobber_all);
172 }
173 
174 SEC("tc")
175 __description("Spill and refill a u32 const scalar.  Offset to skb->data")
176 __success __retval(0)
177 __naked void scalar_offset_to_skb_data_1(void)
178 {
179 	asm volatile ("					\
180 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
181 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
182 	w4 = 20;					\
183 	*(u32*)(r10 - 8) = r4;				\
184 	r4 = *(u32*)(r10 - 8);				\
185 	r0 = r2;					\
186 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */	\
187 	r0 += r4;					\
188 	/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
189 	if r0 > r3 goto l0_%=;				\
190 	/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\
191 	r0 = *(u32*)(r2 + 0);				\
192 l0_%=:	r0 = 0;						\
193 	exit;						\
194 "	:
195 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
196 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
197 	: __clobber_all);
198 }
199 
200 SEC("socket")
201 __description("Spill a u32 const, refill from another half of the uninit u32 from the stack")
202 /* in privileged mode reads from uninitialized stack locations are permitted */
203 __success __failure_unpriv
204 __msg_unpriv("invalid read from stack off -4+0 size 4")
205 __retval(0)
206 __naked void uninit_u32_from_the_stack(void)
207 {
208 	asm volatile ("					\
209 	w4 = 20;					\
210 	*(u32*)(r10 - 8) = r4;				\
211 	/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/	\
212 	r4 = *(u32*)(r10 - 4);				\
213 	r0 = 0;						\
214 	exit;						\
215 "	::: __clobber_all);
216 }
217 
218 SEC("tc")
219 __description("Spill a u32 const scalar.  Refill as u16.  Offset to skb->data")
220 __failure __msg("invalid access to packet")
221 __naked void u16_offset_to_skb_data(void)
222 {
223 	asm volatile ("					\
224 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
225 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
226 	w4 = 20;					\
227 	*(u32*)(r10 - 8) = r4;				\
228 	r4 = *(u16*)(r10 - 8);				\
229 	r0 = r2;					\
230 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
231 	r0 += r4;					\
232 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
233 	if r0 > r3 goto l0_%=;				\
234 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
235 	r0 = *(u32*)(r2 + 0);				\
236 l0_%=:	r0 = 0;						\
237 	exit;						\
238 "	:
239 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
240 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
241 	: __clobber_all);
242 }
243 
244 SEC("tc")
245 __description("Spill u32 const scalars.  Refill as u64.  Offset to skb->data")
246 __failure __msg("math between pkt pointer and register with unbounded min value is not allowed")
247 __naked void u64_offset_to_skb_data(void)
248 {
249 	asm volatile ("					\
250 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
251 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
252 	w6 = 0;						\
253 	w7 = 20;					\
254 	*(u32*)(r10 - 4) = r6;				\
255 	*(u32*)(r10 - 8) = r7;				\
256 	r4 = *(u64*)(r10 - 8);				\
257 	r0 = r2;					\
258 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4= */	\
259 	r0 += r4;					\
260 	if r0 > r3 goto l0_%=;				\
261 	r0 = *(u32*)(r2 + 0);				\
262 l0_%=:	r0 = 0;						\
263 	exit;						\
264 "	:
265 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
266 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
267 	: __clobber_all);
268 }
269 
270 SEC("tc")
271 __description("Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data")
272 __failure __msg("invalid access to packet")
273 __naked void _6_offset_to_skb_data(void)
274 {
275 	asm volatile ("					\
276 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
277 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
278 	w4 = 20;					\
279 	*(u32*)(r10 - 8) = r4;				\
280 	r4 = *(u16*)(r10 - 6);				\
281 	r0 = r2;					\
282 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
283 	r0 += r4;					\
284 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
285 	if r0 > r3 goto l0_%=;				\
286 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
287 	r0 = *(u32*)(r2 + 0);				\
288 l0_%=:	r0 = 0;						\
289 	exit;						\
290 "	:
291 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
292 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
293 	: __clobber_all);
294 }
295 
296 SEC("tc")
297 __description("Spill and refill a u32 const scalar at non 8byte aligned stack addr.  Offset to skb->data")
298 __failure __msg("invalid access to packet")
299 __naked void addr_offset_to_skb_data(void)
300 {
301 	asm volatile ("					\
302 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
303 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
304 	w4 = 20;					\
305 	*(u32*)(r10 - 8) = r4;				\
306 	*(u32*)(r10 - 4) = r4;				\
307 	r4 = *(u32*)(r10 - 4);				\
308 	r0 = r2;					\
309 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\
310 	r0 += r4;					\
311 	/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
312 	if r0 > r3 goto l0_%=;				\
313 	/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
314 	r0 = *(u32*)(r2 + 0);				\
315 l0_%=:	r0 = 0;						\
316 	exit;						\
317 "	:
318 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
319 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
320 	: __clobber_all);
321 }
322 
323 SEC("tc")
324 __description("Spill and refill a umax=40 bounded scalar.  Offset to skb->data")
325 __success __retval(0)
326 __naked void scalar_offset_to_skb_data_2(void)
327 {
328 	asm volatile ("					\
329 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
330 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
331 	r4 = *(u64*)(r1 + %[__sk_buff_tstamp]);		\
332 	if r4 <= 40 goto l0_%=;				\
333 	r0 = 0;						\
334 	exit;						\
335 l0_%=:	/* *(u32 *)(r10 -8) = r4 R4=umax=40 */		\
336 	*(u32*)(r10 - 8) = r4;				\
337 	/* r4 = (*u32 *)(r10 - 8) */			\
338 	r4 = *(u32*)(r10 - 8);				\
339 	/* r2 += r4 R2=pkt R4=umax=40 */		\
340 	r2 += r4;					\
341 	/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */		\
342 	r0 = r2;					\
343 	/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */	\
344 	r2 += 20;					\
345 	/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\
346 	if r2 > r3 goto l1_%=;				\
347 	/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\
348 	r0 = *(u32*)(r0 + 0);				\
349 l1_%=:	r0 = 0;						\
350 	exit;						\
351 "	:
352 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
353 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
354 	  __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
355 	: __clobber_all);
356 }
357 
358 SEC("tc")
359 __description("Spill a u32 scalar at fp-4 and then at fp-8")
360 __success __retval(0)
361 __naked void and_then_at_fp_8(void)
362 {
363 	asm volatile ("					\
364 	w4 = 4321;					\
365 	*(u32*)(r10 - 4) = r4;				\
366 	*(u32*)(r10 - 8) = r4;				\
367 	r4 = *(u64*)(r10 - 8);				\
368 	r0 = 0;						\
369 	exit;						\
370 "	::: __clobber_all);
371 }
372 
373 SEC("xdp")
374 __description("32-bit spill of 64-bit reg should clear ID")
375 __failure __msg("math between ctx pointer and 4294967295 is not allowed")
376 __naked void spill_32bit_of_64bit_fail(void)
377 {
378 	asm volatile ("					\
379 	r6 = r1;					\
380 	/* Roll one bit to force the verifier to track both branches. */\
381 	call %[bpf_get_prandom_u32];			\
382 	r0 &= 0x8;					\
383 	/* Put a large number into r1. */		\
384 	r1 = 0xffffffff;				\
385 	r1 <<= 32;					\
386 	r1 += r0;					\
387 	/* Assign an ID to r1. */			\
388 	r2 = r1;					\
389 	/* 32-bit spill r1 to stack - should clear the ID! */\
390 	*(u32*)(r10 - 8) = r1;				\
391 	/* 32-bit fill r2 from stack. */		\
392 	r2 = *(u32*)(r10 - 8);				\
393 	/* Compare r2 with another register to trigger find_equal_scalars.\
394 	 * Having one random bit is important here, otherwise the verifier cuts\
395 	 * the corners. If the ID was mistakenly preserved on spill, this would\
396 	 * cause the verifier to think that r1 is also equal to zero in one of\
397 	 * the branches, and equal to eight on the other branch.\
398 	 */						\
399 	r3 = 0;						\
400 	if r2 != r3 goto l0_%=;				\
401 l0_%=:	r1 >>= 32;					\
402 	/* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
403 	 * read will happen, because it actually contains 0xffffffff.\
404 	 */						\
405 	r6 += r1;					\
406 	r0 = *(u32*)(r6 + 0);				\
407 	exit;						\
408 "	:
409 	: __imm(bpf_get_prandom_u32)
410 	: __clobber_all);
411 }
412 
413 SEC("xdp")
414 __description("16-bit spill of 32-bit reg should clear ID")
415 __failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed")
416 __naked void spill_16bit_of_32bit_fail(void)
417 {
418 	asm volatile ("					\
419 	r6 = r1;					\
420 	/* Roll one bit to force the verifier to track both branches. */\
421 	call %[bpf_get_prandom_u32];			\
422 	r0 &= 0x8;					\
423 	/* Put a large number into r1. */		\
424 	w1 = 0xffff0000;				\
425 	r1 += r0;					\
426 	/* Assign an ID to r1. */			\
427 	r2 = r1;					\
428 	/* 16-bit spill r1 to stack - should clear the ID! */\
429 	*(u16*)(r10 - 8) = r1;				\
430 	/* 16-bit fill r2 from stack. */		\
431 	r2 = *(u16*)(r10 - 8);				\
432 	/* Compare r2 with another register to trigger find_equal_scalars.\
433 	 * Having one random bit is important here, otherwise the verifier cuts\
434 	 * the corners. If the ID was mistakenly preserved on spill, this would\
435 	 * cause the verifier to think that r1 is also equal to zero in one of\
436 	 * the branches, and equal to eight on the other branch.\
437 	 */						\
438 	r3 = 0;						\
439 	if r2 != r3 goto l0_%=;				\
440 l0_%=:	r1 >>= 16;					\
441 	/* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
442 	 * read will happen, because it actually contains 0xffff.\
443 	 */						\
444 	r6 += r1;					\
445 	r0 = *(u32*)(r6 + 0);				\
446 	exit;						\
447 "	:
448 	: __imm(bpf_get_prandom_u32)
449 	: __clobber_all);
450 }
451 
452 SEC("raw_tp")
453 __log_level(2)
454 __success
455 __msg("fp-8=0m??mmmm")
456 __msg("fp-16=00mm??mm")
457 __msg("fp-24=00mm???m")
458 __naked void spill_subregs_preserve_stack_zero(void)
459 {
460 	asm volatile (
461 		"call %[bpf_get_prandom_u32];"
462 
463 		/* 32-bit subreg spill with ZERO, MISC, and INVALID */
464 		".8byte %[fp1_u8_st_zero];"   /* ZERO, LLVM-18+: *(u8 *)(r10 -1) = 0; */
465 		"*(u8 *)(r10 -2) = r0;"       /* MISC */
466 		/* fp-3 and fp-4 stay INVALID */
467 		"*(u32 *)(r10 -8) = r0;"
468 
469 		/* 16-bit subreg spill with ZERO, MISC, and INVALID */
470 		".8byte %[fp10_u16_st_zero];" /* ZERO, LLVM-18+: *(u16 *)(r10 -10) = 0; */
471 		"*(u16 *)(r10 -12) = r0;"     /* MISC */
472 		/* fp-13 and fp-14 stay INVALID */
473 		"*(u16 *)(r10 -16) = r0;"
474 
475 		/* 8-bit subreg spill with ZERO, MISC, and INVALID */
476 		".8byte %[fp18_u16_st_zero];" /* ZERO, LLVM-18+: *(u16 *)(r18 -10) = 0; */
477 		"*(u16 *)(r10 -20) = r0;"     /* MISC */
478 		/* fp-21, fp-22, and fp-23 stay INVALID */
479 		"*(u8 *)(r10 -24) = r0;"
480 
481 		"r0 = 0;"
482 		"exit;"
483 	:
484 	: __imm(bpf_get_prandom_u32),
485 	  __imm_insn(fp1_u8_st_zero, BPF_ST_MEM(BPF_B, BPF_REG_FP, -1, 0)),
486 	  __imm_insn(fp10_u16_st_zero, BPF_ST_MEM(BPF_H, BPF_REG_FP, -10, 0)),
487 	  __imm_insn(fp18_u16_st_zero, BPF_ST_MEM(BPF_H, BPF_REG_FP, -18, 0))
488 	: __clobber_all);
489 }
490 
491 char single_byte_buf[1] SEC(".data.single_byte_buf");
492 
493 SEC("raw_tp")
494 __log_level(2)
495 __success
496 /* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */
497 __msg("2: (7a) *(u64 *)(r10 -8) = 0          ; R10=fp0 fp-8_w=0")
498 /* but fp-16 is spilled IMPRECISE zero const reg */
499 __msg("4: (7b) *(u64 *)(r10 -16) = r0        ; R0_w=0 R10=fp0 fp-16_w=0")
500 /* validate that assigning R2 from STACK_SPILL with zero value  doesn't mark register
501  * precise immediately; if necessary, it will be marked precise later
502  */
503 __msg("6: (71) r2 = *(u8 *)(r10 -1)          ; R2_w=0 R10=fp0 fp-8_w=0")
504 /* similarly, when R2 is assigned from spilled register, it is initially
505  * imprecise, but will be marked precise later once it is used in precise context
506  */
507 __msg("10: (71) r2 = *(u8 *)(r10 -9)         ; R2_w=0 R10=fp0 fp-16_w=0")
508 __msg("11: (0f) r1 += r2")
509 __msg("mark_precise: frame0: last_idx 11 first_idx 0 subseq_idx -1")
510 __msg("mark_precise: frame0: regs=r2 stack= before 10: (71) r2 = *(u8 *)(r10 -9)")
511 __msg("mark_precise: frame0: regs= stack=-16 before 9: (bf) r1 = r6")
512 __msg("mark_precise: frame0: regs= stack=-16 before 8: (73) *(u8 *)(r1 +0) = r2")
513 __msg("mark_precise: frame0: regs= stack=-16 before 7: (0f) r1 += r2")
514 __msg("mark_precise: frame0: regs= stack=-16 before 6: (71) r2 = *(u8 *)(r10 -1)")
515 __msg("mark_precise: frame0: regs= stack=-16 before 5: (bf) r1 = r6")
516 __msg("mark_precise: frame0: regs= stack=-16 before 4: (7b) *(u64 *)(r10 -16) = r0")
517 __msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0")
518 __naked void partial_stack_load_preserves_zeros(void)
519 {
520 	asm volatile (
521 		/* fp-8 is value zero (represented by a zero value fake reg) */
522 		".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */
523 
524 		/* fp-16 is const zero register */
525 		"r0 = 0;"
526 		"*(u64 *)(r10 -16) = r0;"
527 
528 		/* load single U8 from non-aligned spilled value zero slot */
529 		"r1 = %[single_byte_buf];"
530 		"r2 = *(u8 *)(r10 -1);"
531 		"r1 += r2;"
532 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
533 
534 		/* load single U8 from non-aligned ZERO REG slot */
535 		"r1 = %[single_byte_buf];"
536 		"r2 = *(u8 *)(r10 -9);"
537 		"r1 += r2;"
538 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
539 
540 		/* load single U16 from non-aligned spilled value zero slot */
541 		"r1 = %[single_byte_buf];"
542 		"r2 = *(u16 *)(r10 -2);"
543 		"r1 += r2;"
544 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
545 
546 		/* load single U16 from non-aligned ZERO REG slot */
547 		"r1 = %[single_byte_buf];"
548 		"r2 = *(u16 *)(r10 -10);"
549 		"r1 += r2;"
550 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
551 
552 		/* load single U32 from non-aligned spilled value zero slot */
553 		"r1 = %[single_byte_buf];"
554 		"r2 = *(u32 *)(r10 -4);"
555 		"r1 += r2;"
556 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
557 
558 		/* load single U32 from non-aligned ZERO REG slot */
559 		"r1 = %[single_byte_buf];"
560 		"r2 = *(u32 *)(r10 -12);"
561 		"r1 += r2;"
562 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
563 
564 		/* for completeness, load U64 from STACK_ZERO slot */
565 		"r1 = %[single_byte_buf];"
566 		"r2 = *(u64 *)(r10 -8);"
567 		"r1 += r2;"
568 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
569 
570 		/* for completeness, load U64 from ZERO REG slot */
571 		"r1 = %[single_byte_buf];"
572 		"r2 = *(u64 *)(r10 -16);"
573 		"r1 += r2;"
574 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
575 
576 		"r0 = 0;"
577 		"exit;"
578 	:
579 	: __imm_ptr(single_byte_buf),
580 	  __imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0))
581 	: __clobber_common);
582 }
583 
584 SEC("raw_tp")
585 __log_level(2)
586 __success
587 /* fp-4 is STACK_ZERO */
588 __msg("2: (62) *(u32 *)(r10 -4) = 0          ; R10=fp0 fp-8=0000????")
589 __msg("4: (71) r2 = *(u8 *)(r10 -1)          ; R2_w=0 R10=fp0 fp-8=0000????")
590 __msg("5: (0f) r1 += r2")
591 __msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
592 __msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)")
593 __naked void partial_stack_load_preserves_partial_zeros(void)
594 {
595 	asm volatile (
596 		/* fp-4 is value zero */
597 		".8byte %[fp4_st_zero];" /* LLVM-18+: *(u32 *)(r10 -4) = 0; */
598 
599 		/* load single U8 from non-aligned stack zero slot */
600 		"r1 = %[single_byte_buf];"
601 		"r2 = *(u8 *)(r10 -1);"
602 		"r1 += r2;"
603 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
604 
605 		/* load single U16 from non-aligned stack zero slot */
606 		"r1 = %[single_byte_buf];"
607 		"r2 = *(u16 *)(r10 -2);"
608 		"r1 += r2;"
609 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
610 
611 		/* load single U32 from non-aligned stack zero slot */
612 		"r1 = %[single_byte_buf];"
613 		"r2 = *(u32 *)(r10 -4);"
614 		"r1 += r2;"
615 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
616 
617 		"r0 = 0;"
618 		"exit;"
619 	:
620 	: __imm_ptr(single_byte_buf),
621 	  __imm_insn(fp4_st_zero, BPF_ST_MEM(BPF_W, BPF_REG_FP, -4, 0))
622 	: __clobber_common);
623 }
624 
625 char two_byte_buf[2] SEC(".data.two_byte_buf");
626 
627 SEC("raw_tp")
628 __log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
629 __success
630 /* make sure fp-8 is IMPRECISE fake register spill */
631 __msg("3: (7a) *(u64 *)(r10 -8) = 1          ; R10=fp0 fp-8_w=1")
632 /* and fp-16 is spilled IMPRECISE const reg */
633 __msg("5: (7b) *(u64 *)(r10 -16) = r0        ; R0_w=1 R10=fp0 fp-16_w=1")
634 /* validate load from fp-8, which was initialized using BPF_ST_MEM */
635 __msg("8: (79) r2 = *(u64 *)(r10 -8)         ; R2_w=1 R10=fp0 fp-8=1")
636 __msg("9: (0f) r1 += r2")
637 __msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
638 __msg("mark_precise: frame0: regs=r2 stack= before 8: (79) r2 = *(u64 *)(r10 -8)")
639 __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
640 /* note, fp-8 is precise, fp-16 is not yet precise, we'll get there */
641 __msg("mark_precise: frame0: parent state regs= stack=-8:  R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_w=1")
642 __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
643 __msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
644 __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -16) = r0")
645 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
646 __msg("mark_precise: frame0: regs= stack=-8 before 3: (7a) *(u64 *)(r10 -8) = 1")
647 __msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
648 /* validate load from fp-16, which was initialized using BPF_STX_MEM */
649 __msg("12: (79) r2 = *(u64 *)(r10 -16)       ; R2_w=1 R10=fp0 fp-16=1")
650 __msg("13: (0f) r1 += r2")
651 __msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1")
652 __msg("mark_precise: frame0: regs=r2 stack= before 12: (79) r2 = *(u64 *)(r10 -16)")
653 __msg("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6")
654 __msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2")
655 __msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
656 __msg("mark_precise: frame0: regs= stack=-16 before 8: (79) r2 = *(u64 *)(r10 -8)")
657 __msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
658 /* now both fp-8 and fp-16 are precise, very good */
659 __msg("mark_precise: frame0: parent state regs= stack=-16:  R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_rw=P1")
660 __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
661 __msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
662 __msg("mark_precise: frame0: regs= stack=-16 before 5: (7b) *(u64 *)(r10 -16) = r0")
663 __msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
664 __msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
665 __naked void stack_load_preserves_const_precision(void)
666 {
667 	asm volatile (
668 		/* establish checkpoint with state that has no stack slots;
669 		 * if we bubble up to this state without finding desired stack
670 		 * slot, then it's a bug and should be caught
671 		 */
672 		"goto +0;"
673 
674 		/* fp-8 is const 1 *fake* register */
675 		".8byte %[fp8_st_one];" /* LLVM-18+: *(u64 *)(r10 -8) = 1; */
676 
677 		/* fp-16 is const 1 register */
678 		"r0 = 1;"
679 		"*(u64 *)(r10 -16) = r0;"
680 
681 		/* force checkpoint to check precision marks preserved in parent states */
682 		"goto +0;"
683 
684 		/* load single U64 from aligned FAKE_REG=1 slot */
685 		"r1 = %[two_byte_buf];"
686 		"r2 = *(u64 *)(r10 -8);"
687 		"r1 += r2;"
688 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
689 
690 		/* load single U64 from aligned REG=1 slot */
691 		"r1 = %[two_byte_buf];"
692 		"r2 = *(u64 *)(r10 -16);"
693 		"r1 += r2;"
694 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
695 
696 		"r0 = 0;"
697 		"exit;"
698 	:
699 	: __imm_ptr(two_byte_buf),
700 	  __imm_insn(fp8_st_one, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 1))
701 	: __clobber_common);
702 }
703 
704 SEC("raw_tp")
705 __log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
706 __success
707 /* make sure fp-8 is 32-bit FAKE subregister spill */
708 __msg("3: (62) *(u32 *)(r10 -8) = 1          ; R10=fp0 fp-8=????1")
709 /* but fp-16 is spilled IMPRECISE zero const reg */
710 __msg("5: (63) *(u32 *)(r10 -16) = r0        ; R0_w=1 R10=fp0 fp-16=????1")
711 /* validate load from fp-8, which was initialized using BPF_ST_MEM */
712 __msg("8: (61) r2 = *(u32 *)(r10 -8)         ; R2_w=1 R10=fp0 fp-8=????1")
713 __msg("9: (0f) r1 += r2")
714 __msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
715 __msg("mark_precise: frame0: regs=r2 stack= before 8: (61) r2 = *(u32 *)(r10 -8)")
716 __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
717 __msg("mark_precise: frame0: parent state regs= stack=-8:  R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16=????1")
718 __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
719 __msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
720 __msg("mark_precise: frame0: regs= stack=-8 before 5: (63) *(u32 *)(r10 -16) = r0")
721 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
722 __msg("mark_precise: frame0: regs= stack=-8 before 3: (62) *(u32 *)(r10 -8) = 1")
723 __msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
724 /* validate load from fp-16, which was initialized using BPF_STX_MEM */
725 __msg("12: (61) r2 = *(u32 *)(r10 -16)       ; R2_w=1 R10=fp0 fp-16=????1")
726 __msg("13: (0f) r1 += r2")
727 __msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1")
728 __msg("mark_precise: frame0: regs=r2 stack= before 12: (61) r2 = *(u32 *)(r10 -16)")
729 __msg("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6")
730 __msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2")
731 __msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
732 __msg("mark_precise: frame0: regs= stack=-16 before 8: (61) r2 = *(u32 *)(r10 -8)")
733 __msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
734 __msg("mark_precise: frame0: parent state regs= stack=-16:  R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16_r=????P1")
735 __msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
736 __msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
737 __msg("mark_precise: frame0: regs= stack=-16 before 5: (63) *(u32 *)(r10 -16) = r0")
738 __msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
739 __msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
740 __naked void stack_load_preserves_const_precision_subreg(void)
741 {
742 	asm volatile (
743 		/* establish checkpoint with state that has no stack slots;
744 		 * if we bubble up to this state without finding desired stack
745 		 * slot, then it's a bug and should be caught
746 		 */
747 		"goto +0;"
748 
749 		/* fp-8 is const 1 *fake* SUB-register */
750 		".8byte %[fp8_st_one];" /* LLVM-18+: *(u32 *)(r10 -8) = 1; */
751 
752 		/* fp-16 is const 1 SUB-register */
753 		"r0 = 1;"
754 		"*(u32 *)(r10 -16) = r0;"
755 
756 		/* force checkpoint to check precision marks preserved in parent states */
757 		"goto +0;"
758 
759 		/* load single U32 from aligned FAKE_REG=1 slot */
760 		"r1 = %[two_byte_buf];"
761 		"r2 = *(u32 *)(r10 -8);"
762 		"r1 += r2;"
763 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
764 
765 		/* load single U32 from aligned REG=1 slot */
766 		"r1 = %[two_byte_buf];"
767 		"r2 = *(u32 *)(r10 -16);"
768 		"r1 += r2;"
769 		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
770 
771 		"r0 = 0;"
772 		"exit;"
773 	:
774 	: __imm_ptr(two_byte_buf),
775 	  __imm_insn(fp8_st_one, BPF_ST_MEM(BPF_W, BPF_REG_FP, -8, 1)) /* 32-bit spill */
776 	: __clobber_common);
777 }
778 
779 SEC("xdp")
780 __description("32-bit spilled reg range should be tracked")
781 __success __retval(0)
782 __naked void spill_32bit_range_track(void)
783 {
784 	asm volatile("					\
785 	call %[bpf_ktime_get_ns];			\
786 	/* Make r0 bounded. */				\
787 	r0 &= 65535;					\
788 	/* Assign an ID to r0. */			\
789 	r1 = r0;					\
790 	/* 32-bit spill r0 to stack. */			\
791 	*(u32*)(r10 - 8) = r0;				\
792 	/* Boundary check on r0. */			\
793 	if r0 < 1 goto l0_%=;				\
794 	/* 32-bit fill r1 from stack. */		\
795 	r1 = *(u32*)(r10 - 8);				\
796 	/* r1 == r0 => r1 >= 1 always. */		\
797 	if r1 >= 1 goto l0_%=;				\
798 	/* Dead branch: the verifier should prune it.   \
799 	 * Do an invalid memory access if the verifier	\
800 	 * follows it.					\
801 	 */						\
802 	r0 = *(u64*)(r9 + 0);				\
803 l0_%=:	r0 = 0;						\
804 	exit;						\
805 "	:
806 	: __imm(bpf_ktime_get_ns)
807 	: __clobber_all);
808 }
809 
810 SEC("xdp")
811 __description("64-bit spill of 64-bit reg should assign ID")
812 __success __retval(0)
813 __naked void spill_64bit_of_64bit_ok(void)
814 {
815 	asm volatile ("					\
816 	/* Roll one bit to make the register inexact. */\
817 	call %[bpf_get_prandom_u32];			\
818 	r0 &= 0x80000000;				\
819 	r0 <<= 32;					\
820 	/* 64-bit spill r0 to stack - should assign an ID. */\
821 	*(u64*)(r10 - 8) = r0;				\
822 	/* 64-bit fill r1 from stack - should preserve the ID. */\
823 	r1 = *(u64*)(r10 - 8);				\
824 	/* Compare r1 with another register to trigger find_equal_scalars.\
825 	 * Having one random bit is important here, otherwise the verifier cuts\
826 	 * the corners.					\
827 	 */						\
828 	r2 = 0;						\
829 	if r1 != r2 goto l0_%=;				\
830 	/* The result of this comparison is predefined. */\
831 	if r0 == r2 goto l0_%=;				\
832 	/* Dead branch: the verifier should prune it. Do an invalid memory\
833 	 * access if the verifier follows it.		\
834 	 */						\
835 	r0 = *(u64*)(r9 + 0);				\
836 	exit;						\
837 l0_%=:	r0 = 0;						\
838 	exit;						\
839 "	:
840 	: __imm(bpf_get_prandom_u32)
841 	: __clobber_all);
842 }
843 
844 SEC("xdp")
845 __description("32-bit spill of 32-bit reg should assign ID")
846 __success __retval(0)
847 __naked void spill_32bit_of_32bit_ok(void)
848 {
849 	asm volatile ("					\
850 	/* Roll one bit to make the register inexact. */\
851 	call %[bpf_get_prandom_u32];			\
852 	w0 &= 0x80000000;				\
853 	/* 32-bit spill r0 to stack - should assign an ID. */\
854 	*(u32*)(r10 - 8) = r0;				\
855 	/* 32-bit fill r1 from stack - should preserve the ID. */\
856 	r1 = *(u32*)(r10 - 8);				\
857 	/* Compare r1 with another register to trigger find_equal_scalars.\
858 	 * Having one random bit is important here, otherwise the verifier cuts\
859 	 * the corners.					\
860 	 */						\
861 	r2 = 0;						\
862 	if r1 != r2 goto l0_%=;				\
863 	/* The result of this comparison is predefined. */\
864 	if r0 == r2 goto l0_%=;				\
865 	/* Dead branch: the verifier should prune it. Do an invalid memory\
866 	 * access if the verifier follows it.		\
867 	 */						\
868 	r0 = *(u64*)(r9 + 0);				\
869 	exit;						\
870 l0_%=:	r0 = 0;						\
871 	exit;						\
872 "	:
873 	: __imm(bpf_get_prandom_u32)
874 	: __clobber_all);
875 }
876 
877 SEC("xdp")
878 __description("16-bit spill of 16-bit reg should assign ID")
879 __success __retval(0)
880 __naked void spill_16bit_of_16bit_ok(void)
881 {
882 	asm volatile ("					\
883 	/* Roll one bit to make the register inexact. */\
884 	call %[bpf_get_prandom_u32];			\
885 	r0 &= 0x8000;					\
886 	/* 16-bit spill r0 to stack - should assign an ID. */\
887 	*(u16*)(r10 - 8) = r0;				\
888 	/* 16-bit fill r1 from stack - should preserve the ID. */\
889 	r1 = *(u16*)(r10 - 8);				\
890 	/* Compare r1 with another register to trigger find_equal_scalars.\
891 	 * Having one random bit is important here, otherwise the verifier cuts\
892 	 * the corners.					\
893 	 */						\
894 	r2 = 0;						\
895 	if r1 != r2 goto l0_%=;				\
896 	/* The result of this comparison is predefined. */\
897 	if r0 == r2 goto l0_%=;				\
898 	/* Dead branch: the verifier should prune it. Do an invalid memory\
899 	 * access if the verifier follows it.		\
900 	 */						\
901 	r0 = *(u64*)(r9 + 0);				\
902 	exit;						\
903 l0_%=:	r0 = 0;						\
904 	exit;						\
905 "	:
906 	: __imm(bpf_get_prandom_u32)
907 	: __clobber_all);
908 }
909 
910 SEC("xdp")
911 __description("8-bit spill of 8-bit reg should assign ID")
912 __success __retval(0)
913 __naked void spill_8bit_of_8bit_ok(void)
914 {
915 	asm volatile ("					\
916 	/* Roll one bit to make the register inexact. */\
917 	call %[bpf_get_prandom_u32];			\
918 	r0 &= 0x80;					\
919 	/* 8-bit spill r0 to stack - should assign an ID. */\
920 	*(u8*)(r10 - 8) = r0;				\
921 	/* 8-bit fill r1 from stack - should preserve the ID. */\
922 	r1 = *(u8*)(r10 - 8);				\
923 	/* Compare r1 with another register to trigger find_equal_scalars.\
924 	 * Having one random bit is important here, otherwise the verifier cuts\
925 	 * the corners.					\
926 	 */						\
927 	r2 = 0;						\
928 	if r1 != r2 goto l0_%=;				\
929 	/* The result of this comparison is predefined. */\
930 	if r0 == r2 goto l0_%=;				\
931 	/* Dead branch: the verifier should prune it. Do an invalid memory\
932 	 * access if the verifier follows it.		\
933 	 */						\
934 	r0 = *(u64*)(r9 + 0);				\
935 	exit;						\
936 l0_%=:	r0 = 0;						\
937 	exit;						\
938 "	:
939 	: __imm(bpf_get_prandom_u32)
940 	: __clobber_all);
941 }
942 
943 char _license[] SEC("license") = "GPL";
944