1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/bounds.c */
3
4 #include <linux/bpf.h>
5 #include <../../../include/linux/filter.h>
6 #include <bpf/bpf_helpers.h>
7 #include "bpf_misc.h"
8
9 struct {
10 __uint(type, BPF_MAP_TYPE_HASH);
11 __uint(max_entries, 1);
12 __type(key, long long);
13 __type(value, long long);
14 } map_hash_8b SEC(".maps");
15
16 SEC("socket")
17 __description("subtraction bounds (map value) variant 1")
18 __failure __msg("R0 max value is outside of the allowed memory range")
19 __failure_unpriv
bounds_map_value_variant_1(void)20 __naked void bounds_map_value_variant_1(void)
21 {
22 asm volatile (" \
23 r1 = 0; \
24 *(u64*)(r10 - 8) = r1; \
25 r2 = r10; \
26 r2 += -8; \
27 r1 = %[map_hash_8b] ll; \
28 call %[bpf_map_lookup_elem]; \
29 if r0 == 0 goto l0_%=; \
30 r1 = *(u8*)(r0 + 0); \
31 if r1 > 0xff goto l0_%=; \
32 r3 = *(u8*)(r0 + 1); \
33 if r3 > 0xff goto l0_%=; \
34 r1 -= r3; \
35 r1 >>= 56; \
36 r0 += r1; \
37 r0 = *(u8*)(r0 + 0); \
38 exit; \
39 l0_%=: r0 = 0; \
40 exit; \
41 " :
42 : __imm(bpf_map_lookup_elem),
43 __imm_addr(map_hash_8b)
44 : __clobber_all);
45 }
46
47 SEC("socket")
48 __description("subtraction bounds (map value) variant 2")
49 __failure
50 __msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.")
51 __msg_unpriv("R1 has unknown scalar with mixed signed bounds")
bounds_map_value_variant_2(void)52 __naked void bounds_map_value_variant_2(void)
53 {
54 asm volatile (" \
55 r1 = 0; \
56 *(u64*)(r10 - 8) = r1; \
57 r2 = r10; \
58 r2 += -8; \
59 r1 = %[map_hash_8b] ll; \
60 call %[bpf_map_lookup_elem]; \
61 if r0 == 0 goto l0_%=; \
62 r1 = *(u8*)(r0 + 0); \
63 if r1 > 0xff goto l0_%=; \
64 r3 = *(u8*)(r0 + 1); \
65 if r3 > 0xff goto l0_%=; \
66 r1 -= r3; \
67 r0 += r1; \
68 r0 = *(u8*)(r0 + 0); \
69 exit; \
70 l0_%=: r0 = 0; \
71 exit; \
72 " :
73 : __imm(bpf_map_lookup_elem),
74 __imm_addr(map_hash_8b)
75 : __clobber_all);
76 }
77
78 SEC("socket")
79 __description("check subtraction on pointers for unpriv")
80 __success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited")
81 __retval(0)
subtraction_on_pointers_for_unpriv(void)82 __naked void subtraction_on_pointers_for_unpriv(void)
83 {
84 asm volatile (" \
85 r0 = 0; \
86 r1 = %[map_hash_8b] ll; \
87 r2 = r10; \
88 r2 += -8; \
89 r6 = 9; \
90 *(u64*)(r2 + 0) = r6; \
91 call %[bpf_map_lookup_elem]; \
92 r9 = r10; \
93 r9 -= r0; \
94 r1 = %[map_hash_8b] ll; \
95 r2 = r10; \
96 r2 += -8; \
97 r6 = 0; \
98 *(u64*)(r2 + 0) = r6; \
99 call %[bpf_map_lookup_elem]; \
100 if r0 != 0 goto l0_%=; \
101 exit; \
102 l0_%=: *(u64*)(r0 + 0) = r9; \
103 r0 = 0; \
104 exit; \
105 " :
106 : __imm(bpf_map_lookup_elem),
107 __imm_addr(map_hash_8b)
108 : __clobber_all);
109 }
110
111 SEC("socket")
112 __description("bounds check based on zero-extended MOV")
113 __success __success_unpriv __retval(0)
based_on_zero_extended_mov(void)114 __naked void based_on_zero_extended_mov(void)
115 {
116 asm volatile (" \
117 r1 = 0; \
118 *(u64*)(r10 - 8) = r1; \
119 r2 = r10; \
120 r2 += -8; \
121 r1 = %[map_hash_8b] ll; \
122 call %[bpf_map_lookup_elem]; \
123 if r0 == 0 goto l0_%=; \
124 /* r2 = 0x0000'0000'ffff'ffff */ \
125 w2 = 0xffffffff; \
126 /* r2 = 0 */ \
127 r2 >>= 32; \
128 /* no-op */ \
129 r0 += r2; \
130 /* access at offset 0 */ \
131 r0 = *(u8*)(r0 + 0); \
132 l0_%=: /* exit */ \
133 r0 = 0; \
134 exit; \
135 " :
136 : __imm(bpf_map_lookup_elem),
137 __imm_addr(map_hash_8b)
138 : __clobber_all);
139 }
140
141 SEC("socket")
142 __description("bounds check based on sign-extended MOV. test1")
143 __failure __msg("map_value pointer and 4294967295")
144 __failure_unpriv
on_sign_extended_mov_test1(void)145 __naked void on_sign_extended_mov_test1(void)
146 {
147 asm volatile (" \
148 r1 = 0; \
149 *(u64*)(r10 - 8) = r1; \
150 r2 = r10; \
151 r2 += -8; \
152 r1 = %[map_hash_8b] ll; \
153 call %[bpf_map_lookup_elem]; \
154 if r0 == 0 goto l0_%=; \
155 /* r2 = 0xffff'ffff'ffff'ffff */ \
156 r2 = 0xffffffff; \
157 /* r2 = 0xffff'ffff */ \
158 r2 >>= 32; \
159 /* r0 = <oob pointer> */ \
160 r0 += r2; \
161 /* access to OOB pointer */ \
162 r0 = *(u8*)(r0 + 0); \
163 l0_%=: /* exit */ \
164 r0 = 0; \
165 exit; \
166 " :
167 : __imm(bpf_map_lookup_elem),
168 __imm_addr(map_hash_8b)
169 : __clobber_all);
170 }
171
172 SEC("socket")
173 __description("bounds check based on sign-extended MOV. test2")
174 __failure __msg("R0 min value is outside of the allowed memory range")
175 __failure_unpriv
on_sign_extended_mov_test2(void)176 __naked void on_sign_extended_mov_test2(void)
177 {
178 asm volatile (" \
179 r1 = 0; \
180 *(u64*)(r10 - 8) = r1; \
181 r2 = r10; \
182 r2 += -8; \
183 r1 = %[map_hash_8b] ll; \
184 call %[bpf_map_lookup_elem]; \
185 if r0 == 0 goto l0_%=; \
186 /* r2 = 0xffff'ffff'ffff'ffff */ \
187 r2 = 0xffffffff; \
188 /* r2 = 0xfff'ffff */ \
189 r2 >>= 36; \
190 /* r0 = <oob pointer> */ \
191 r0 += r2; \
192 /* access to OOB pointer */ \
193 r0 = *(u8*)(r0 + 0); \
194 l0_%=: /* exit */ \
195 r0 = 0; \
196 exit; \
197 " :
198 : __imm(bpf_map_lookup_elem),
199 __imm_addr(map_hash_8b)
200 : __clobber_all);
201 }
202
203 SEC("tc")
204 __description("bounds check based on reg_off + var_off + insn_off. test1")
205 __failure __msg("map_value pointer offset 1073741822 is not allowed")
var_off_insn_off_test1(void)206 __naked void var_off_insn_off_test1(void)
207 {
208 asm volatile (" \
209 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
210 r1 = 0; \
211 *(u64*)(r10 - 8) = r1; \
212 r2 = r10; \
213 r2 += -8; \
214 r1 = %[map_hash_8b] ll; \
215 call %[bpf_map_lookup_elem]; \
216 if r0 == 0 goto l0_%=; \
217 r6 &= 1; \
218 r6 += %[__imm_0]; \
219 r0 += r6; \
220 r0 += %[__imm_0]; \
221 l0_%=: r0 = *(u8*)(r0 + 3); \
222 r0 = 0; \
223 exit; \
224 " :
225 : __imm(bpf_map_lookup_elem),
226 __imm_addr(map_hash_8b),
227 __imm_const(__imm_0, (1 << 29) - 1),
228 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
229 : __clobber_all);
230 }
231
232 SEC("tc")
233 __description("bounds check based on reg_off + var_off + insn_off. test2")
234 __failure __msg("value 1073741823")
var_off_insn_off_test2(void)235 __naked void var_off_insn_off_test2(void)
236 {
237 asm volatile (" \
238 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
239 r1 = 0; \
240 *(u64*)(r10 - 8) = r1; \
241 r2 = r10; \
242 r2 += -8; \
243 r1 = %[map_hash_8b] ll; \
244 call %[bpf_map_lookup_elem]; \
245 if r0 == 0 goto l0_%=; \
246 r6 &= 1; \
247 r6 += %[__imm_0]; \
248 r0 += r6; \
249 r0 += %[__imm_1]; \
250 l0_%=: r0 = *(u8*)(r0 + 3); \
251 r0 = 0; \
252 exit; \
253 " :
254 : __imm(bpf_map_lookup_elem),
255 __imm_addr(map_hash_8b),
256 __imm_const(__imm_0, (1 << 30) - 1),
257 __imm_const(__imm_1, (1 << 29) - 1),
258 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
259 : __clobber_all);
260 }
261
262 SEC("socket")
263 __description("bounds check after truncation of non-boundary-crossing range")
264 __success __success_unpriv __retval(0)
of_non_boundary_crossing_range(void)265 __naked void of_non_boundary_crossing_range(void)
266 {
267 asm volatile (" \
268 r1 = 0; \
269 *(u64*)(r10 - 8) = r1; \
270 r2 = r10; \
271 r2 += -8; \
272 r1 = %[map_hash_8b] ll; \
273 call %[bpf_map_lookup_elem]; \
274 if r0 == 0 goto l0_%=; \
275 /* r1 = [0x00, 0xff] */ \
276 r1 = *(u8*)(r0 + 0); \
277 r2 = 1; \
278 /* r2 = 0x10'0000'0000 */ \
279 r2 <<= 36; \
280 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \
281 r1 += r2; \
282 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \
283 r1 += 0x7fffffff; \
284 /* r1 = [0x00, 0xff] */ \
285 w1 -= 0x7fffffff; \
286 /* r1 = 0 */ \
287 r1 >>= 8; \
288 /* no-op */ \
289 r0 += r1; \
290 /* access at offset 0 */ \
291 r0 = *(u8*)(r0 + 0); \
292 l0_%=: /* exit */ \
293 r0 = 0; \
294 exit; \
295 " :
296 : __imm(bpf_map_lookup_elem),
297 __imm_addr(map_hash_8b)
298 : __clobber_all);
299 }
300
301 SEC("socket")
302 __description("bounds check after truncation of boundary-crossing range (1)")
303 __failure
304 /* not actually fully unbounded, but the bound is very high */
305 __msg("value -4294967168 makes map_value pointer be out of bounds")
306 __failure_unpriv
of_boundary_crossing_range_1(void)307 __naked void of_boundary_crossing_range_1(void)
308 {
309 asm volatile (" \
310 r1 = 0; \
311 *(u64*)(r10 - 8) = r1; \
312 r2 = r10; \
313 r2 += -8; \
314 r1 = %[map_hash_8b] ll; \
315 call %[bpf_map_lookup_elem]; \
316 if r0 == 0 goto l0_%=; \
317 /* r1 = [0x00, 0xff] */ \
318 r1 = *(u8*)(r0 + 0); \
319 r1 += %[__imm_0]; \
320 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \
321 r1 += %[__imm_0]; \
322 /* r1 = [0xffff'ff80, 0xffff'ffff] or \
323 * [0x0000'0000, 0x0000'007f] \
324 */ \
325 w1 += 0; \
326 r1 -= %[__imm_0]; \
327 /* r1 = [0x00, 0xff] or \
328 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\
329 */ \
330 r1 -= %[__imm_0]; \
331 /* error on OOB pointer computation */ \
332 r0 += r1; \
333 /* exit */ \
334 r0 = 0; \
335 l0_%=: exit; \
336 " :
337 : __imm(bpf_map_lookup_elem),
338 __imm_addr(map_hash_8b),
339 __imm_const(__imm_0, 0xffffff80 >> 1)
340 : __clobber_all);
341 }
342
343 SEC("socket")
344 __description("bounds check after truncation of boundary-crossing range (2)")
345 __failure __msg("value -4294967168 makes map_value pointer be out of bounds")
346 __failure_unpriv
of_boundary_crossing_range_2(void)347 __naked void of_boundary_crossing_range_2(void)
348 {
349 asm volatile (" \
350 r1 = 0; \
351 *(u64*)(r10 - 8) = r1; \
352 r2 = r10; \
353 r2 += -8; \
354 r1 = %[map_hash_8b] ll; \
355 call %[bpf_map_lookup_elem]; \
356 if r0 == 0 goto l0_%=; \
357 /* r1 = [0x00, 0xff] */ \
358 r1 = *(u8*)(r0 + 0); \
359 r1 += %[__imm_0]; \
360 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \
361 r1 += %[__imm_0]; \
362 /* r1 = [0xffff'ff80, 0xffff'ffff] or \
363 * [0x0000'0000, 0x0000'007f] \
364 * difference to previous test: truncation via MOV32\
365 * instead of ALU32. \
366 */ \
367 w1 = w1; \
368 r1 -= %[__imm_0]; \
369 /* r1 = [0x00, 0xff] or \
370 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\
371 */ \
372 r1 -= %[__imm_0]; \
373 /* error on OOB pointer computation */ \
374 r0 += r1; \
375 /* exit */ \
376 r0 = 0; \
377 l0_%=: exit; \
378 " :
379 : __imm(bpf_map_lookup_elem),
380 __imm_addr(map_hash_8b),
381 __imm_const(__imm_0, 0xffffff80 >> 1)
382 : __clobber_all);
383 }
384
385 SEC("socket")
386 __description("bounds check after wrapping 32-bit addition")
387 __success __success_unpriv __retval(0)
after_wrapping_32_bit_addition(void)388 __naked void after_wrapping_32_bit_addition(void)
389 {
390 asm volatile (" \
391 r1 = 0; \
392 *(u64*)(r10 - 8) = r1; \
393 r2 = r10; \
394 r2 += -8; \
395 r1 = %[map_hash_8b] ll; \
396 call %[bpf_map_lookup_elem]; \
397 if r0 == 0 goto l0_%=; \
398 /* r1 = 0x7fff'ffff */ \
399 r1 = 0x7fffffff; \
400 /* r1 = 0xffff'fffe */ \
401 r1 += 0x7fffffff; \
402 /* r1 = 0 */ \
403 w1 += 2; \
404 /* no-op */ \
405 r0 += r1; \
406 /* access at offset 0 */ \
407 r0 = *(u8*)(r0 + 0); \
408 l0_%=: /* exit */ \
409 r0 = 0; \
410 exit; \
411 " :
412 : __imm(bpf_map_lookup_elem),
413 __imm_addr(map_hash_8b)
414 : __clobber_all);
415 }
416
417 SEC("socket")
418 __description("bounds check after shift with oversized count operand")
419 __failure __msg("R0 max value is outside of the allowed memory range")
420 __failure_unpriv
shift_with_oversized_count_operand(void)421 __naked void shift_with_oversized_count_operand(void)
422 {
423 asm volatile (" \
424 r1 = 0; \
425 *(u64*)(r10 - 8) = r1; \
426 r2 = r10; \
427 r2 += -8; \
428 r1 = %[map_hash_8b] ll; \
429 call %[bpf_map_lookup_elem]; \
430 if r0 == 0 goto l0_%=; \
431 r2 = 32; \
432 r1 = 1; \
433 /* r1 = (u32)1 << (u32)32 = ? */ \
434 w1 <<= w2; \
435 /* r1 = [0x0000, 0xffff] */ \
436 r1 &= 0xffff; \
437 /* computes unknown pointer, potentially OOB */ \
438 r0 += r1; \
439 /* potentially OOB access */ \
440 r0 = *(u8*)(r0 + 0); \
441 l0_%=: /* exit */ \
442 r0 = 0; \
443 exit; \
444 " :
445 : __imm(bpf_map_lookup_elem),
446 __imm_addr(map_hash_8b)
447 : __clobber_all);
448 }
449
450 SEC("socket")
451 __description("bounds check after right shift of maybe-negative number")
452 __failure __msg("R0 unbounded memory access")
453 __failure_unpriv
shift_of_maybe_negative_number(void)454 __naked void shift_of_maybe_negative_number(void)
455 {
456 asm volatile (" \
457 r1 = 0; \
458 *(u64*)(r10 - 8) = r1; \
459 r2 = r10; \
460 r2 += -8; \
461 r1 = %[map_hash_8b] ll; \
462 call %[bpf_map_lookup_elem]; \
463 if r0 == 0 goto l0_%=; \
464 /* r1 = [0x00, 0xff] */ \
465 r1 = *(u8*)(r0 + 0); \
466 /* r1 = [-0x01, 0xfe] */ \
467 r1 -= 1; \
468 /* r1 = 0 or 0xff'ffff'ffff'ffff */ \
469 r1 >>= 8; \
470 /* r1 = 0 or 0xffff'ffff'ffff */ \
471 r1 >>= 8; \
472 /* computes unknown pointer, potentially OOB */ \
473 r0 += r1; \
474 /* potentially OOB access */ \
475 r0 = *(u8*)(r0 + 0); \
476 l0_%=: /* exit */ \
477 r0 = 0; \
478 exit; \
479 " :
480 : __imm(bpf_map_lookup_elem),
481 __imm_addr(map_hash_8b)
482 : __clobber_all);
483 }
484
485 SEC("socket")
486 __description("bounds check after 32-bit right shift with 64-bit input")
487 __failure __msg("math between map_value pointer and 4294967294 is not allowed")
488 __failure_unpriv
shift_with_64_bit_input(void)489 __naked void shift_with_64_bit_input(void)
490 {
491 asm volatile (" \
492 r1 = 0; \
493 *(u64*)(r10 - 8) = r1; \
494 r2 = r10; \
495 r2 += -8; \
496 r1 = %[map_hash_8b] ll; \
497 call %[bpf_map_lookup_elem]; \
498 if r0 == 0 goto l0_%=; \
499 r1 = 2; \
500 /* r1 = 1<<32 */ \
501 r1 <<= 31; \
502 /* r1 = 0 (NOT 2!) */ \
503 w1 >>= 31; \
504 /* r1 = 0xffff'fffe (NOT 0!) */ \
505 w1 -= 2; \
506 /* error on computing OOB pointer */ \
507 r0 += r1; \
508 /* exit */ \
509 r0 = 0; \
510 l0_%=: exit; \
511 " :
512 : __imm(bpf_map_lookup_elem),
513 __imm_addr(map_hash_8b)
514 : __clobber_all);
515 }
516
517 SEC("socket")
518 __description("bounds check map access with off+size signed 32bit overflow. test1")
519 __failure __msg("map_value pointer and 2147483646")
520 __failure_unpriv
size_signed_32bit_overflow_test1(void)521 __naked void size_signed_32bit_overflow_test1(void)
522 {
523 asm volatile (" \
524 r1 = 0; \
525 *(u64*)(r10 - 8) = r1; \
526 r2 = r10; \
527 r2 += -8; \
528 r1 = %[map_hash_8b] ll; \
529 call %[bpf_map_lookup_elem]; \
530 if r0 != 0 goto l0_%=; \
531 exit; \
532 l0_%=: r0 += 0x7ffffffe; \
533 r0 = *(u64*)(r0 + 0); \
534 goto l1_%=; \
535 l1_%=: exit; \
536 " :
537 : __imm(bpf_map_lookup_elem),
538 __imm_addr(map_hash_8b)
539 : __clobber_all);
540 }
541
542 SEC("socket")
543 __description("bounds check map access with off+size signed 32bit overflow. test2")
544 __failure __msg("pointer offset 1073741822")
545 __msg_unpriv("R0 pointer arithmetic of map value goes out of range")
size_signed_32bit_overflow_test2(void)546 __naked void size_signed_32bit_overflow_test2(void)
547 {
548 asm volatile (" \
549 r1 = 0; \
550 *(u64*)(r10 - 8) = r1; \
551 r2 = r10; \
552 r2 += -8; \
553 r1 = %[map_hash_8b] ll; \
554 call %[bpf_map_lookup_elem]; \
555 if r0 != 0 goto l0_%=; \
556 exit; \
557 l0_%=: r0 += 0x1fffffff; \
558 r0 += 0x1fffffff; \
559 r0 += 0x1fffffff; \
560 r0 = *(u64*)(r0 + 0); \
561 goto l1_%=; \
562 l1_%=: exit; \
563 " :
564 : __imm(bpf_map_lookup_elem),
565 __imm_addr(map_hash_8b)
566 : __clobber_all);
567 }
568
569 SEC("socket")
570 __description("bounds check map access with off+size signed 32bit overflow. test3")
571 __failure __msg("pointer offset -1073741822")
572 __msg_unpriv("R0 pointer arithmetic of map value goes out of range")
size_signed_32bit_overflow_test3(void)573 __naked void size_signed_32bit_overflow_test3(void)
574 {
575 asm volatile (" \
576 r1 = 0; \
577 *(u64*)(r10 - 8) = r1; \
578 r2 = r10; \
579 r2 += -8; \
580 r1 = %[map_hash_8b] ll; \
581 call %[bpf_map_lookup_elem]; \
582 if r0 != 0 goto l0_%=; \
583 exit; \
584 l0_%=: r0 -= 0x1fffffff; \
585 r0 -= 0x1fffffff; \
586 r0 = *(u64*)(r0 + 2); \
587 goto l1_%=; \
588 l1_%=: exit; \
589 " :
590 : __imm(bpf_map_lookup_elem),
591 __imm_addr(map_hash_8b)
592 : __clobber_all);
593 }
594
595 SEC("socket")
596 __description("bounds check map access with off+size signed 32bit overflow. test4")
597 __failure __msg("map_value pointer and 1000000000000")
598 __failure_unpriv
size_signed_32bit_overflow_test4(void)599 __naked void size_signed_32bit_overflow_test4(void)
600 {
601 asm volatile (" \
602 r1 = 0; \
603 *(u64*)(r10 - 8) = r1; \
604 r2 = r10; \
605 r2 += -8; \
606 r1 = %[map_hash_8b] ll; \
607 call %[bpf_map_lookup_elem]; \
608 if r0 != 0 goto l0_%=; \
609 exit; \
610 l0_%=: r1 = 1000000; \
611 r1 *= 1000000; \
612 r0 += r1; \
613 r0 = *(u64*)(r0 + 2); \
614 goto l1_%=; \
615 l1_%=: exit; \
616 " :
617 : __imm(bpf_map_lookup_elem),
618 __imm_addr(map_hash_8b)
619 : __clobber_all);
620 }
621
622 SEC("socket")
623 __description("bounds check mixed 32bit and 64bit arithmetic. test1")
624 __success __success_unpriv
625 __retval(0)
626 #ifdef SPEC_V1
627 __xlated_unpriv("goto pc+2")
628 __xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */
629 __xlated_unpriv("goto pc-1") /* sanitized dead code */
630 __xlated_unpriv("exit")
631 #endif
_32bit_and_64bit_arithmetic_test1(void)632 __naked void _32bit_and_64bit_arithmetic_test1(void)
633 {
634 asm volatile (" \
635 r0 = 0; \
636 r1 = -1; \
637 r1 <<= 32; \
638 r1 += 1; \
639 /* r1 = 0xffffFFFF00000001 */ \
640 if w1 > 1 goto l0_%=; \
641 /* check ALU64 op keeps 32bit bounds */ \
642 r1 += 1; \
643 if w1 > 2 goto l0_%=; \
644 goto l1_%=; \
645 l0_%=: /* invalid ldx if bounds are lost above */ \
646 r0 = *(u64*)(r0 - 1); \
647 l1_%=: exit; \
648 " ::: __clobber_all);
649 }
650
651 SEC("socket")
652 __description("bounds check mixed 32bit and 64bit arithmetic. test2")
653 __success __success_unpriv
654 __retval(0)
655 #ifdef SPEC_V1
656 __xlated_unpriv("goto pc+2")
657 __xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */
658 __xlated_unpriv("goto pc-1") /* sanitized dead code */
659 __xlated_unpriv("exit")
660 #endif
_32bit_and_64bit_arithmetic_test2(void)661 __naked void _32bit_and_64bit_arithmetic_test2(void)
662 {
663 asm volatile (" \
664 r0 = 0; \
665 r1 = -1; \
666 r1 <<= 32; \
667 r1 += 1; \
668 /* r1 = 0xffffFFFF00000001 */ \
669 r2 = 3; \
670 /* r1 = 0x2 */ \
671 w1 += 1; \
672 /* check ALU32 op zero extends 64bit bounds */ \
673 if r1 > r2 goto l0_%=; \
674 goto l1_%=; \
675 l0_%=: /* invalid ldx if bounds are lost above */ \
676 r0 = *(u64*)(r0 - 1); \
677 l1_%=: exit; \
678 " ::: __clobber_all);
679 }
680
681 SEC("tc")
682 __description("assigning 32bit bounds to 64bit for wA = 0, wB = wA")
__flag(BPF_F_ANY_ALIGNMENT)683 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
684 __naked void for_wa_0_wb_wa(void)
685 {
686 asm volatile (" \
687 r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \
688 r7 = *(u32*)(r1 + %[__sk_buff_data]); \
689 w9 = 0; \
690 w2 = w9; \
691 r6 = r7; \
692 r6 += r2; \
693 r3 = r6; \
694 r3 += 8; \
695 if r3 > r8 goto l0_%=; \
696 r5 = *(u32*)(r6 + 0); \
697 l0_%=: r0 = 0; \
698 exit; \
699 " :
700 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
701 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
702 : __clobber_all);
703 }
704
705 SEC("socket")
706 __description("bounds check for reg = 0, reg xor 1")
707 __success __success_unpriv
708 __retval(0)
709 #ifdef SPEC_V1
710 __xlated_unpriv("if r1 != 0x0 goto pc+2")
711 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
712 __xlated_unpriv("goto pc-1") /* sanitized dead code */
713 __xlated_unpriv("r0 = 0")
714 #endif
reg_0_reg_xor_1(void)715 __naked void reg_0_reg_xor_1(void)
716 {
717 asm volatile (" \
718 r1 = 0; \
719 *(u64*)(r10 - 8) = r1; \
720 r2 = r10; \
721 r2 += -8; \
722 r1 = %[map_hash_8b] ll; \
723 call %[bpf_map_lookup_elem]; \
724 if r0 != 0 goto l0_%=; \
725 exit; \
726 l0_%=: r1 = 0; \
727 r1 ^= 1; \
728 if r1 != 0 goto l1_%=; \
729 r0 = *(u64*)(r0 + 8); \
730 l1_%=: r0 = 0; \
731 exit; \
732 " :
733 : __imm(bpf_map_lookup_elem),
734 __imm_addr(map_hash_8b)
735 : __clobber_all);
736 }
737
738 SEC("socket")
739 __description("bounds check for reg32 = 0, reg32 xor 1")
740 __success __success_unpriv
741 __retval(0)
742 #ifdef SPEC_V1
743 __xlated_unpriv("if w1 != 0x0 goto pc+2")
744 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
745 __xlated_unpriv("goto pc-1") /* sanitized dead code */
746 __xlated_unpriv("r0 = 0")
747 #endif
reg32_0_reg32_xor_1(void)748 __naked void reg32_0_reg32_xor_1(void)
749 {
750 asm volatile (" \
751 r1 = 0; \
752 *(u64*)(r10 - 8) = r1; \
753 r2 = r10; \
754 r2 += -8; \
755 r1 = %[map_hash_8b] ll; \
756 call %[bpf_map_lookup_elem]; \
757 if r0 != 0 goto l0_%=; \
758 exit; \
759 l0_%=: w1 = 0; \
760 w1 ^= 1; \
761 if w1 != 0 goto l1_%=; \
762 r0 = *(u64*)(r0 + 8); \
763 l1_%=: r0 = 0; \
764 exit; \
765 " :
766 : __imm(bpf_map_lookup_elem),
767 __imm_addr(map_hash_8b)
768 : __clobber_all);
769 }
770
771 SEC("socket")
772 __description("bounds check for reg = 2, reg xor 3")
773 __success __success_unpriv
774 __retval(0)
775 #ifdef SPEC_V1
776 __xlated_unpriv("if r1 > 0x0 goto pc+2")
777 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
778 __xlated_unpriv("goto pc-1") /* sanitized dead code */
779 __xlated_unpriv("r0 = 0")
780 #endif
reg_2_reg_xor_3(void)781 __naked void reg_2_reg_xor_3(void)
782 {
783 asm volatile (" \
784 r1 = 0; \
785 *(u64*)(r10 - 8) = r1; \
786 r2 = r10; \
787 r2 += -8; \
788 r1 = %[map_hash_8b] ll; \
789 call %[bpf_map_lookup_elem]; \
790 if r0 != 0 goto l0_%=; \
791 exit; \
792 l0_%=: r1 = 2; \
793 r1 ^= 3; \
794 if r1 > 0 goto l1_%=; \
795 r0 = *(u64*)(r0 + 8); \
796 l1_%=: r0 = 0; \
797 exit; \
798 " :
799 : __imm(bpf_map_lookup_elem),
800 __imm_addr(map_hash_8b)
801 : __clobber_all);
802 }
803
804 SEC("socket")
805 __description("bounds check for reg = any, reg xor 3")
806 __failure __msg("invalid access to map value")
807 __msg_unpriv("invalid access to map value")
reg_any_reg_xor_3(void)808 __naked void reg_any_reg_xor_3(void)
809 {
810 asm volatile (" \
811 r1 = 0; \
812 *(u64*)(r10 - 8) = r1; \
813 r2 = r10; \
814 r2 += -8; \
815 r1 = %[map_hash_8b] ll; \
816 call %[bpf_map_lookup_elem]; \
817 if r0 != 0 goto l0_%=; \
818 exit; \
819 l0_%=: r1 = *(u64*)(r0 + 0); \
820 r1 ^= 3; \
821 if r1 != 0 goto l1_%=; \
822 r0 = *(u64*)(r0 + 8); \
823 l1_%=: r0 = 0; \
824 exit; \
825 " :
826 : __imm(bpf_map_lookup_elem),
827 __imm_addr(map_hash_8b)
828 : __clobber_all);
829 }
830
831 SEC("socket")
832 __description("bounds check for reg32 = any, reg32 xor 3")
833 __failure __msg("invalid access to map value")
834 __msg_unpriv("invalid access to map value")
reg32_any_reg32_xor_3(void)835 __naked void reg32_any_reg32_xor_3(void)
836 {
837 asm volatile (" \
838 r1 = 0; \
839 *(u64*)(r10 - 8) = r1; \
840 r2 = r10; \
841 r2 += -8; \
842 r1 = %[map_hash_8b] ll; \
843 call %[bpf_map_lookup_elem]; \
844 if r0 != 0 goto l0_%=; \
845 exit; \
846 l0_%=: r1 = *(u64*)(r0 + 0); \
847 w1 ^= 3; \
848 if w1 != 0 goto l1_%=; \
849 r0 = *(u64*)(r0 + 8); \
850 l1_%=: r0 = 0; \
851 exit; \
852 " :
853 : __imm(bpf_map_lookup_elem),
854 __imm_addr(map_hash_8b)
855 : __clobber_all);
856 }
857
858 SEC("socket")
859 __description("bounds check for reg > 0, reg xor 3")
860 __success __success_unpriv
861 __retval(0)
862 #ifdef SPEC_V1
863 __xlated_unpriv("if r1 >= 0x0 goto pc+2")
864 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
865 __xlated_unpriv("goto pc-1") /* sanitized dead code */
866 __xlated_unpriv("r0 = 0")
867 #endif
reg_0_reg_xor_3(void)868 __naked void reg_0_reg_xor_3(void)
869 {
870 asm volatile (" \
871 r1 = 0; \
872 *(u64*)(r10 - 8) = r1; \
873 r2 = r10; \
874 r2 += -8; \
875 r1 = %[map_hash_8b] ll; \
876 call %[bpf_map_lookup_elem]; \
877 if r0 != 0 goto l0_%=; \
878 exit; \
879 l0_%=: r1 = *(u64*)(r0 + 0); \
880 if r1 <= 0 goto l1_%=; \
881 r1 ^= 3; \
882 if r1 >= 0 goto l1_%=; \
883 r0 = *(u64*)(r0 + 8); \
884 l1_%=: r0 = 0; \
885 exit; \
886 " :
887 : __imm(bpf_map_lookup_elem),
888 __imm_addr(map_hash_8b)
889 : __clobber_all);
890 }
891
892 SEC("socket")
893 __description("bounds check for reg32 > 0, reg32 xor 3")
894 __success __success_unpriv
895 __retval(0)
896 #ifdef SPEC_V1
897 __xlated_unpriv("if w1 >= 0x0 goto pc+2")
898 __xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
899 __xlated_unpriv("goto pc-1") /* sanitized dead code */
900 __xlated_unpriv("r0 = 0")
901 #endif
reg32_0_reg32_xor_3(void)902 __naked void reg32_0_reg32_xor_3(void)
903 {
904 asm volatile (" \
905 r1 = 0; \
906 *(u64*)(r10 - 8) = r1; \
907 r2 = r10; \
908 r2 += -8; \
909 r1 = %[map_hash_8b] ll; \
910 call %[bpf_map_lookup_elem]; \
911 if r0 != 0 goto l0_%=; \
912 exit; \
913 l0_%=: r1 = *(u64*)(r0 + 0); \
914 if w1 <= 0 goto l1_%=; \
915 w1 ^= 3; \
916 if w1 >= 0 goto l1_%=; \
917 r0 = *(u64*)(r0 + 8); \
918 l1_%=: r0 = 0; \
919 exit; \
920 " :
921 : __imm(bpf_map_lookup_elem),
922 __imm_addr(map_hash_8b)
923 : __clobber_all);
924 }
925
926 SEC("socket")
927 __description("bounds check for non const xor src dst")
928 __success __log_level(2)
929 __msg("5: (af) r0 ^= r6 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
non_const_xor_src_dst(void)930 __naked void non_const_xor_src_dst(void)
931 {
932 asm volatile (" \
933 call %[bpf_get_prandom_u32]; \
934 r6 = r0; \
935 call %[bpf_get_prandom_u32]; \
936 r6 &= 0xaf; \
937 r0 &= 0x1a0; \
938 r0 ^= r6; \
939 exit; \
940 " :
941 : __imm(bpf_map_lookup_elem),
942 __imm_addr(map_hash_8b),
943 __imm(bpf_get_prandom_u32)
944 : __clobber_all);
945 }
946
947 SEC("socket")
948 __description("bounds check for non const or src dst")
949 __success __log_level(2)
950 __msg("5: (4f) r0 |= r6 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
non_const_or_src_dst(void)951 __naked void non_const_or_src_dst(void)
952 {
953 asm volatile (" \
954 call %[bpf_get_prandom_u32]; \
955 r6 = r0; \
956 call %[bpf_get_prandom_u32]; \
957 r6 &= 0xaf; \
958 r0 &= 0x1a0; \
959 r0 |= r6; \
960 exit; \
961 " :
962 : __imm(bpf_map_lookup_elem),
963 __imm_addr(map_hash_8b),
964 __imm(bpf_get_prandom_u32)
965 : __clobber_all);
966 }
967
968 SEC("socket")
969 __description("bounds check for non const mul regs")
970 __success __log_level(2)
971 __msg("5: (2f) r0 *= r6 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=3825,var_off=(0x0; 0xfff))")
non_const_mul_regs(void)972 __naked void non_const_mul_regs(void)
973 {
974 asm volatile (" \
975 call %[bpf_get_prandom_u32]; \
976 r6 = r0; \
977 call %[bpf_get_prandom_u32]; \
978 r6 &= 0xff; \
979 r0 &= 0x0f; \
980 r0 *= r6; \
981 exit; \
982 " :
983 : __imm(bpf_map_lookup_elem),
984 __imm_addr(map_hash_8b),
985 __imm(bpf_get_prandom_u32)
986 : __clobber_all);
987 }
988
989 SEC("socket")
990 __description("bounds checks after 32-bit truncation. test 1")
991 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
992 __retval(0)
_32_bit_truncation_test_1(void)993 __naked void _32_bit_truncation_test_1(void)
994 {
995 asm volatile (" \
996 r1 = 0; \
997 *(u64*)(r10 - 8) = r1; \
998 r2 = r10; \
999 r2 += -8; \
1000 r1 = %[map_hash_8b] ll; \
1001 call %[bpf_map_lookup_elem]; \
1002 if r0 == 0 goto l0_%=; \
1003 r1 = *(u32*)(r0 + 0); \
1004 /* This used to reduce the max bound to 0x7fffffff */\
1005 if r1 == 0 goto l1_%=; \
1006 if r1 > 0x7fffffff goto l0_%=; \
1007 l1_%=: r0 = 0; \
1008 l0_%=: exit; \
1009 " :
1010 : __imm(bpf_map_lookup_elem),
1011 __imm_addr(map_hash_8b)
1012 : __clobber_all);
1013 }
1014
1015 SEC("socket")
1016 __description("bounds checks after 32-bit truncation. test 2")
1017 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
1018 __retval(0)
_32_bit_truncation_test_2(void)1019 __naked void _32_bit_truncation_test_2(void)
1020 {
1021 asm volatile (" \
1022 r1 = 0; \
1023 *(u64*)(r10 - 8) = r1; \
1024 r2 = r10; \
1025 r2 += -8; \
1026 r1 = %[map_hash_8b] ll; \
1027 call %[bpf_map_lookup_elem]; \
1028 if r0 == 0 goto l0_%=; \
1029 r1 = *(u32*)(r0 + 0); \
1030 if r1 s< 1 goto l1_%=; \
1031 if w1 s< 0 goto l0_%=; \
1032 l1_%=: r0 = 0; \
1033 l0_%=: exit; \
1034 " :
1035 : __imm(bpf_map_lookup_elem),
1036 __imm_addr(map_hash_8b)
1037 : __clobber_all);
1038 }
1039
1040 SEC("xdp")
1041 __description("bound check with JMP_JLT for crossing 64-bit signed boundary")
1042 __success __retval(0)
crossing_64_bit_signed_boundary_1(void)1043 __naked void crossing_64_bit_signed_boundary_1(void)
1044 {
1045 asm volatile (" \
1046 r2 = *(u32*)(r1 + %[xdp_md_data]); \
1047 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
1048 r1 = r2; \
1049 r1 += 1; \
1050 if r1 > r3 goto l0_%=; \
1051 r1 = *(u8*)(r2 + 0); \
1052 r0 = 0x7fffffffffffff10 ll; \
1053 r1 += r0; \
1054 r0 = 0x8000000000000000 ll; \
1055 l1_%=: r0 += 1; \
1056 /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\
1057 if r0 < r1 goto l1_%=; \
1058 l0_%=: r0 = 0; \
1059 exit; \
1060 " :
1061 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
1062 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
1063 : __clobber_all);
1064 }
1065
1066 SEC("xdp")
1067 __description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
1068 __success __retval(0)
crossing_64_bit_signed_boundary_2(void)1069 __naked void crossing_64_bit_signed_boundary_2(void)
1070 {
1071 asm volatile (" \
1072 r2 = *(u32*)(r1 + %[xdp_md_data]); \
1073 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
1074 r1 = r2; \
1075 r1 += 1; \
1076 if r1 > r3 goto l0_%=; \
1077 r1 = *(u8*)(r2 + 0); \
1078 r0 = 0x7fffffffffffff10 ll; \
1079 r1 += r0; \
1080 r2 = 0x8000000000000fff ll; \
1081 r0 = 0x8000000000000000 ll; \
1082 l1_%=: r0 += 1; \
1083 if r0 s> r2 goto l0_%=; \
1084 /* r1 signed range is [S64_MIN, S64_MAX] */ \
1085 if r0 s< r1 goto l1_%=; \
1086 r0 = 1; \
1087 exit; \
1088 l0_%=: r0 = 0; \
1089 exit; \
1090 " :
1091 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
1092 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
1093 : __clobber_all);
1094 }
1095
1096 SEC("xdp")
1097 __description("bound check for loop upper bound greater than U32_MAX")
1098 __success __retval(0)
bound_greater_than_u32_max(void)1099 __naked void bound_greater_than_u32_max(void)
1100 {
1101 asm volatile (" \
1102 r2 = *(u32*)(r1 + %[xdp_md_data]); \
1103 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
1104 r1 = r2; \
1105 r1 += 1; \
1106 if r1 > r3 goto l0_%=; \
1107 r1 = *(u8*)(r2 + 0); \
1108 r0 = 0x100000000 ll; \
1109 r1 += r0; \
1110 r0 = 0x100000000 ll; \
1111 l1_%=: r0 += 1; \
1112 if r0 < r1 goto l1_%=; \
1113 l0_%=: r0 = 0; \
1114 exit; \
1115 " :
1116 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
1117 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
1118 : __clobber_all);
1119 }
1120
1121 SEC("xdp")
1122 __description("bound check with JMP32_JLT for crossing 32-bit signed boundary")
1123 __success __retval(0)
crossing_32_bit_signed_boundary_1(void)1124 __naked void crossing_32_bit_signed_boundary_1(void)
1125 {
1126 asm volatile (" \
1127 r2 = *(u32*)(r1 + %[xdp_md_data]); \
1128 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
1129 r1 = r2; \
1130 r1 += 1; \
1131 if r1 > r3 goto l0_%=; \
1132 r1 = *(u8*)(r2 + 0); \
1133 w0 = 0x7fffff10; \
1134 w1 += w0; \
1135 w0 = 0x80000000; \
1136 l1_%=: w0 += 1; \
1137 /* r1 unsigned range is [0, 0x8000000f] */ \
1138 if w0 < w1 goto l1_%=; \
1139 l0_%=: r0 = 0; \
1140 exit; \
1141 " :
1142 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
1143 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
1144 : __clobber_all);
1145 }
1146
1147 SEC("xdp")
1148 __description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
1149 __success __retval(0)
crossing_32_bit_signed_boundary_2(void)1150 __naked void crossing_32_bit_signed_boundary_2(void)
1151 {
1152 asm volatile (" \
1153 r2 = *(u32*)(r1 + %[xdp_md_data]); \
1154 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
1155 r1 = r2; \
1156 r1 += 1; \
1157 if r1 > r3 goto l0_%=; \
1158 r1 = *(u8*)(r2 + 0); \
1159 w0 = 0x7fffff10; \
1160 w1 += w0; \
1161 w2 = 0x80000fff; \
1162 w0 = 0x80000000; \
1163 l1_%=: w0 += 1; \
1164 if w0 s> w2 goto l0_%=; \
1165 /* r1 signed range is [S32_MIN, S32_MAX] */ \
1166 if w0 s< w1 goto l1_%=; \
1167 r0 = 1; \
1168 exit; \
1169 l0_%=: r0 = 0; \
1170 exit; \
1171 " :
1172 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
1173 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
1174 : __clobber_all);
1175 }
1176
1177 SEC("tc")
1178 __description("bounds check with JMP_NE for reg edge")
1179 __success __retval(0)
reg_not_equal_const(void)1180 __naked void reg_not_equal_const(void)
1181 {
1182 asm volatile (" \
1183 r6 = r1; \
1184 r1 = 0; \
1185 *(u64*)(r10 - 8) = r1; \
1186 call %[bpf_get_prandom_u32]; \
1187 r4 = r0; \
1188 r4 &= 7; \
1189 if r4 != 0 goto l0_%=; \
1190 r0 = 0; \
1191 exit; \
1192 l0_%=: r1 = r6; \
1193 r2 = 0; \
1194 r3 = r10; \
1195 r3 += -8; \
1196 r5 = 0; \
1197 /* The 4th argument of bpf_skb_store_bytes is defined as \
1198 * ARG_CONST_SIZE, so 0 is not allowed. The 'r4 != 0' \
1199 * is providing us this exclusion of zero from initial \
1200 * [0, 7] range. \
1201 */ \
1202 call %[bpf_skb_store_bytes]; \
1203 r0 = 0; \
1204 exit; \
1205 " :
1206 : __imm(bpf_get_prandom_u32),
1207 __imm(bpf_skb_store_bytes)
1208 : __clobber_all);
1209 }
1210
1211 SEC("tc")
1212 __description("bounds check with JMP_EQ for reg edge")
1213 __success __retval(0)
reg_equal_const(void)1214 __naked void reg_equal_const(void)
1215 {
1216 asm volatile (" \
1217 r6 = r1; \
1218 r1 = 0; \
1219 *(u64*)(r10 - 8) = r1; \
1220 call %[bpf_get_prandom_u32]; \
1221 r4 = r0; \
1222 r4 &= 7; \
1223 if r4 == 0 goto l0_%=; \
1224 r1 = r6; \
1225 r2 = 0; \
1226 r3 = r10; \
1227 r3 += -8; \
1228 r5 = 0; \
1229 /* Just the same as what we do in reg_not_equal_const() */ \
1230 call %[bpf_skb_store_bytes]; \
1231 l0_%=: r0 = 0; \
1232 exit; \
1233 " :
1234 : __imm(bpf_get_prandom_u32),
1235 __imm(bpf_skb_store_bytes)
1236 : __clobber_all);
1237 }
1238
1239 SEC("tc")
1240 __description("multiply mixed sign bounds. test 1")
1241 __success __log_level(2)
1242 __msg("r6 *= r7 {{.*}}; R6=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
mult_mixed0_sign(void)1243 __naked void mult_mixed0_sign(void)
1244 {
1245 asm volatile (
1246 "call %[bpf_get_prandom_u32];"
1247 "r6 = r0;"
1248 "call %[bpf_get_prandom_u32];"
1249 "r7 = r0;"
1250 "r6 &= 0xf;"
1251 "r6 -= 1000000000;"
1252 "r7 &= 0xf;"
1253 "r7 -= 2000000000;"
1254 "r6 *= r7;"
1255 "exit"
1256 :
1257 : __imm(bpf_get_prandom_u32),
1258 __imm(bpf_skb_store_bytes)
1259 : __clobber_all);
1260 }
1261
1262 SEC("tc")
1263 __description("multiply mixed sign bounds. test 2")
1264 __success __log_level(2)
1265 __msg("r6 *= r7 {{.*}}; R6=scalar(smin=smin32=-100,smax=smax32=200)")
mult_mixed1_sign(void)1266 __naked void mult_mixed1_sign(void)
1267 {
1268 asm volatile (
1269 "call %[bpf_get_prandom_u32];"
1270 "r6 = r0;"
1271 "call %[bpf_get_prandom_u32];"
1272 "r7 = r0;"
1273 "r6 &= 0xf;"
1274 "r6 -= 0xa;"
1275 "r7 &= 0xf;"
1276 "r7 -= 0x14;"
1277 "r6 *= r7;"
1278 "exit"
1279 :
1280 : __imm(bpf_get_prandom_u32),
1281 __imm(bpf_skb_store_bytes)
1282 : __clobber_all);
1283 }
1284
1285 SEC("tc")
1286 __description("multiply negative bounds")
1287 __success __log_level(2)
1288 __msg("r6 *= r7 {{.*}}; R6=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))")
mult_sign_bounds(void)1289 __naked void mult_sign_bounds(void)
1290 {
1291 asm volatile (
1292 "r8 = 0x7fff;"
1293 "call %[bpf_get_prandom_u32];"
1294 "r6 = r0;"
1295 "call %[bpf_get_prandom_u32];"
1296 "r7 = r0;"
1297 "r6 &= 0xa;"
1298 "r6 -= r8;"
1299 "r7 &= 0xf;"
1300 "r7 -= r8;"
1301 "r6 *= r7;"
1302 "exit"
1303 :
1304 : __imm(bpf_get_prandom_u32),
1305 __imm(bpf_skb_store_bytes)
1306 : __clobber_all);
1307 }
1308
1309 SEC("tc")
1310 __description("multiply bounds that don't cross signed boundary")
1311 __success __log_level(2)
1312 __msg("r8 *= r6 {{.*}}; R6=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))")
mult_no_sign_crossing(void)1313 __naked void mult_no_sign_crossing(void)
1314 {
1315 asm volatile (
1316 "r6 = 0xb;"
1317 "r8 = 0xb3c3f8c99262687 ll;"
1318 "call %[bpf_get_prandom_u32];"
1319 "r7 = r0;"
1320 "r6 &= r7;"
1321 "r8 *= r6;"
1322 "exit"
1323 :
1324 : __imm(bpf_get_prandom_u32),
1325 __imm(bpf_skb_store_bytes)
1326 : __clobber_all);
1327 }
1328
1329 SEC("tc")
1330 __description("multiplication overflow, result in unbounded reg. test 1")
1331 __success __log_level(2)
1332 __msg("r6 *= r7 {{.*}}; R6=scalar()")
mult_unsign_ovf(void)1333 __naked void mult_unsign_ovf(void)
1334 {
1335 asm volatile (
1336 "r8 = 0x7ffffffffff ll;"
1337 "call %[bpf_get_prandom_u32];"
1338 "r6 = r0;"
1339 "call %[bpf_get_prandom_u32];"
1340 "r7 = r0;"
1341 "r6 &= 0x7fffffff;"
1342 "r7 &= r8;"
1343 "r6 *= r7;"
1344 "exit"
1345 :
1346 : __imm(bpf_get_prandom_u32),
1347 __imm(bpf_skb_store_bytes)
1348 : __clobber_all);
1349 }
1350
1351 SEC("tc")
1352 __description("multiplication overflow, result in unbounded reg. test 2")
1353 __success __log_level(2)
1354 __msg("r6 *= r7 {{.*}}; R6=scalar()")
mult_sign_ovf(void)1355 __naked void mult_sign_ovf(void)
1356 {
1357 asm volatile (
1358 "r8 = 0x7ffffffff ll;"
1359 "call %[bpf_get_prandom_u32];"
1360 "r6 = r0;"
1361 "call %[bpf_get_prandom_u32];"
1362 "r7 = r0;"
1363 "r6 &= 0xa;"
1364 "r6 -= r8;"
1365 "r7 &= 0x7fffffff;"
1366 "r6 *= r7;"
1367 "exit"
1368 :
1369 : __imm(bpf_get_prandom_u32),
1370 __imm(bpf_skb_store_bytes)
1371 : __clobber_all);
1372 }
1373
1374 SEC("socket")
1375 __description("64-bit addition, all outcomes overflow")
1376 __success __log_level(2)
1377 __msg("5: (0f) r3 += r3 {{.*}} R3=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)")
1378 __retval(0)
add64_full_overflow(void)1379 __naked void add64_full_overflow(void)
1380 {
1381 asm volatile (
1382 "call %[bpf_get_prandom_u32];"
1383 "r4 = r0;"
1384 "r3 = 0xa000000000000000 ll;"
1385 "r3 |= r4;"
1386 "r3 += r3;"
1387 "r0 = 0;"
1388 "exit"
1389 :
1390 : __imm(bpf_get_prandom_u32)
1391 : __clobber_all);
1392 }
1393
1394 SEC("socket")
1395 __description("64-bit addition, partial overflow, result in unbounded reg")
1396 __success __log_level(2)
1397 __msg("4: (0f) r3 += r3 {{.*}} R3=scalar()")
1398 __retval(0)
add64_partial_overflow(void)1399 __naked void add64_partial_overflow(void)
1400 {
1401 asm volatile (
1402 "call %[bpf_get_prandom_u32];"
1403 "r4 = r0;"
1404 "r3 = 2;"
1405 "r3 |= r4;"
1406 "r3 += r3;"
1407 "r0 = 0;"
1408 "exit"
1409 :
1410 : __imm(bpf_get_prandom_u32)
1411 : __clobber_all);
1412 }
1413
1414 SEC("socket")
1415 __description("32-bit addition overflow, all outcomes overflow")
1416 __success __log_level(2)
1417 __msg("4: (0c) w3 += w3 {{.*}} R3=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))")
1418 __retval(0)
add32_full_overflow(void)1419 __naked void add32_full_overflow(void)
1420 {
1421 asm volatile (
1422 "call %[bpf_get_prandom_u32];"
1423 "w4 = w0;"
1424 "w3 = 0xa0000000;"
1425 "w3 |= w4;"
1426 "w3 += w3;"
1427 "r0 = 0;"
1428 "exit"
1429 :
1430 : __imm(bpf_get_prandom_u32)
1431 : __clobber_all);
1432 }
1433
1434 SEC("socket")
1435 __description("32-bit addition, partial overflow, result in unbounded u32 bounds")
1436 __success __log_level(2)
1437 __msg("4: (0c) w3 += w3 {{.*}} R3=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
1438 __retval(0)
add32_partial_overflow(void)1439 __naked void add32_partial_overflow(void)
1440 {
1441 asm volatile (
1442 "call %[bpf_get_prandom_u32];"
1443 "w4 = w0;"
1444 "w3 = 2;"
1445 "w3 |= w4;"
1446 "w3 += w3;"
1447 "r0 = 0;"
1448 "exit"
1449 :
1450 : __imm(bpf_get_prandom_u32)
1451 : __clobber_all);
1452 }
1453
1454 SEC("socket")
1455 __description("64-bit subtraction, all outcomes underflow")
1456 __success __log_level(2)
1457 __msg("6: (1f) r3 -= r1 {{.*}} R3=scalar(umin=1,umax=0x8000000000000000)")
1458 __retval(0)
sub64_full_overflow(void)1459 __naked void sub64_full_overflow(void)
1460 {
1461 asm volatile (
1462 "call %[bpf_get_prandom_u32];"
1463 "r1 = r0;"
1464 "r2 = 0x8000000000000000 ll;"
1465 "r1 |= r2;"
1466 "r3 = 0;"
1467 "r3 -= r1;"
1468 "r0 = 0;"
1469 "exit"
1470 :
1471 : __imm(bpf_get_prandom_u32)
1472 : __clobber_all);
1473 }
1474
1475 SEC("socket")
1476 __description("64-bit subtraction, partial overflow, result in unbounded reg")
1477 __success __log_level(2)
1478 __msg("3: (1f) r3 -= r2 {{.*}} R3=scalar(id=1-1)")
1479 __retval(0)
sub64_partial_overflow(void)1480 __naked void sub64_partial_overflow(void)
1481 {
1482 asm volatile (
1483 "call %[bpf_get_prandom_u32];"
1484 "r3 = r0;"
1485 "r2 = 1;"
1486 "r3 -= r2;"
1487 "r0 = 0;"
1488 "exit"
1489 :
1490 : __imm(bpf_get_prandom_u32)
1491 : __clobber_all);
1492 }
1493
1494 SEC("socket")
1495 __description("32-bit subtraction overflow, all outcomes underflow")
1496 __success __log_level(2)
1497 __msg("5: (1c) w3 -= w1 {{.*}} R3=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))")
1498 __retval(0)
sub32_full_overflow(void)1499 __naked void sub32_full_overflow(void)
1500 {
1501 asm volatile (
1502 "call %[bpf_get_prandom_u32];"
1503 "w1 = w0;"
1504 "w2 = 0x80000000;"
1505 "w1 |= w2;"
1506 "w3 = 0;"
1507 "w3 -= w1;"
1508 "r0 = 0;"
1509 "exit"
1510 :
1511 : __imm(bpf_get_prandom_u32)
1512 : __clobber_all);
1513 }
1514
1515 SEC("socket")
1516 __description("32-bit subtraction, partial overflow, result in unbounded u32 bounds")
1517 __success __log_level(2)
1518 __msg("3: (1c) w3 -= w2 {{.*}} R3=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
1519 __retval(0)
sub32_partial_overflow(void)1520 __naked void sub32_partial_overflow(void)
1521 {
1522 asm volatile (
1523 "call %[bpf_get_prandom_u32];"
1524 "w3 = w0;"
1525 "w2 = 1;"
1526 "w3 -= w2;"
1527 "r0 = 0;"
1528 "exit"
1529 :
1530 : __imm(bpf_get_prandom_u32)
1531 : __clobber_all);
1532 }
1533
1534 SEC("socket")
1535 __description("dead branch on jset, does not result in invariants violation error")
1536 __success __log_level(2)
1537 __retval(0)
jset_range_analysis(void)1538 __naked void jset_range_analysis(void)
1539 {
1540 asm volatile (" \
1541 call %[bpf_get_netns_cookie]; \
1542 if r0 == 0 goto l0_%=; \
1543 if r0 & 0xffffffff goto +0; \
1544 l0_%=: r0 = 0; \
1545 exit; \
1546 " :
1547 : __imm(bpf_get_netns_cookie)
1548 : __clobber_all);
1549 }
1550
1551 /* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
1552 * overlap on the negative side. At instruction 7, the ranges look as follows:
1553 *
1554 * 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX
1555 * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
1556 * |----------------------------|------------------------------|
1557 * |xxxxxxxxxx] [xxxxxxxxxxxx|
1558 * 0 smax=0xeffffeee smin=-655 -1
1559 *
1560 * We should therefore deduce the following new bounds:
1561 *
1562 * 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX
1563 * | [xxx] |
1564 * |----------------------------|------------------------------|
1565 * | [xxx] |
1566 * 0 s64=[-655;-146] -1
1567 *
1568 * Without the deduction cross sign boundary, we end up with an invariant
1569 * violation error.
1570 */
1571 SEC("socket")
1572 __description("bounds deduction cross sign boundary, negative overlap")
1573 __success __log_level(2)
1574 __msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))")
1575 __retval(0)
bounds_deduct_negative_overlap(void)1576 __naked void bounds_deduct_negative_overlap(void)
1577 {
1578 asm volatile(" \
1579 call %[bpf_get_prandom_u32]; \
1580 w3 = w0; \
1581 w6 = (s8)w0; \
1582 r0 = (s8)r0; \
1583 if w6 >= 0xf0000000 goto l0_%=; \
1584 r0 += r6; \
1585 r6 += 400; \
1586 r0 -= r6; \
1587 if r3 < r0 goto l0_%=; \
1588 l0_%=: r0 = 0; \
1589 exit; \
1590 " :
1591 : __imm(bpf_get_prandom_u32)
1592 : __clobber_all);
1593 }
1594
1595 /* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
1596 * overlap on the positive side. At instruction 3, the ranges look as follows:
1597 *
1598 * 0 umin=0 umax=0xffffffffffffff00 U64_MAX
1599 * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
1600 * |----------------------------|------------------------------|
1601 * |xxxxxxxx] [xxxxxxxx|
1602 * 0 smax=127 smin=-128 -1
1603 *
1604 * We should therefore deduce the following new bounds:
1605 *
1606 * 0 u64=[0;127] U64_MAX
1607 * [xxxxxxxx] |
1608 * |----------------------------|------------------------------|
1609 * [xxxxxxxx] |
1610 * 0 s64=[0;127] -1
1611 *
1612 * Without the deduction cross sign boundary, the program is rejected due to
1613 * the frame pointer write.
1614 */
1615 SEC("socket")
1616 __description("bounds deduction cross sign boundary, positive overlap")
1617 __success __log_level(2)
1618 __msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))")
1619 __retval(0)
bounds_deduct_positive_overlap(void)1620 __naked void bounds_deduct_positive_overlap(void)
1621 {
1622 asm volatile(" \
1623 call %[bpf_get_prandom_u32]; \
1624 r0 = (s8)r0; \
1625 r1 = 0xffffffffffffff00; \
1626 if r0 > r1 goto l0_%=; \
1627 if r0 < 128 goto l0_%=; \
1628 r10 = 0; \
1629 l0_%=: r0 = 0; \
1630 exit; \
1631 " :
1632 : __imm(bpf_get_prandom_u32)
1633 : __clobber_all);
1634 }
1635
1636 /* This test is the same as above, but the s64 and u64 ranges overlap in two
1637 * places. At instruction 3, the ranges look as follows:
1638 *
1639 * 0 umin=0 umax=0xffffffffffffff80 U64_MAX
1640 * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
1641 * |----------------------------|------------------------------|
1642 * |xxxxxxxx] [xxxxxxxx|
1643 * 0 smax=127 smin=-128 -1
1644 *
1645 * 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and
1646 * the program should fail due to the frame pointer write.
1647 */
1648 SEC("socket")
1649 __description("bounds deduction cross sign boundary, two overlaps")
1650 __failure
1651 __msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)")
1652 __msg("frame pointer is read only")
bounds_deduct_two_overlaps(void)1653 __naked void bounds_deduct_two_overlaps(void)
1654 {
1655 asm volatile(" \
1656 call %[bpf_get_prandom_u32]; \
1657 r0 = (s8)r0; \
1658 r1 = 0xffffffffffffff80; \
1659 if r0 > r1 goto l0_%=; \
1660 if r0 < 128 goto l0_%=; \
1661 r10 = 0; \
1662 l0_%=: r0 = 0; \
1663 exit; \
1664 " :
1665 : __imm(bpf_get_prandom_u32)
1666 : __clobber_all);
1667 }
1668
1669 SEC("socket")
1670 __description("dead jne branch due to disagreeing tnums")
1671 __success __log_level(2)
jne_disagreeing_tnums(void * ctx)1672 __naked void jne_disagreeing_tnums(void *ctx)
1673 {
1674 asm volatile(" \
1675 call %[bpf_get_prandom_u32]; \
1676 w0 = w0; \
1677 r0 >>= 30; \
1678 r0 <<= 30; \
1679 r1 = r0; \
1680 r1 += 1024; \
1681 if r1 != r0 goto +1; \
1682 r10 = 0; \
1683 exit; \
1684 " :
1685 : __imm(bpf_get_prandom_u32)
1686 : __clobber_all);
1687 }
1688
1689 SEC("socket")
1690 __description("dead jeq branch due to disagreeing tnums")
1691 __success __log_level(2)
jeq_disagreeing_tnums(void * ctx)1692 __naked void jeq_disagreeing_tnums(void *ctx)
1693 {
1694 asm volatile(" \
1695 call %[bpf_get_prandom_u32]; \
1696 w0 = w0; \
1697 r0 >>= 30; \
1698 r0 <<= 30; \
1699 r1 = r0; \
1700 r1 += 1024; \
1701 if r1 == r0 goto +1; \
1702 exit; \
1703 r10 = 0; \
1704 exit; \
1705 " :
1706 : __imm(bpf_get_prandom_u32)
1707 : __clobber_all);
1708 }
1709
1710 SEC("socket")
1711 __description("conditional jump on same register, branch taken")
1712 __not_msg("20: (b7) r0 = 1 {{.*}} R0=1")
1713 __success __log_level(2)
1714 __retval(0)
condition_jump_on_same_register(void * ctx)1715 __naked void condition_jump_on_same_register(void *ctx)
1716 {
1717 asm volatile(" \
1718 call %[bpf_get_prandom_u32]; \
1719 w8 = 0x80000000; \
1720 r0 &= r8; \
1721 if r0 == r0 goto +1; \
1722 goto l1_%=; \
1723 if r0 >= r0 goto +1; \
1724 goto l1_%=; \
1725 if r0 s>= r0 goto +1; \
1726 goto l1_%=; \
1727 if r0 <= r0 goto +1; \
1728 goto l1_%=; \
1729 if r0 s<= r0 goto +1; \
1730 goto l1_%=; \
1731 if r0 != r0 goto l1_%=; \
1732 if r0 > r0 goto l1_%=; \
1733 if r0 s> r0 goto l1_%=; \
1734 if r0 < r0 goto l1_%=; \
1735 if r0 s< r0 goto l1_%=; \
1736 l0_%=: r0 = 0; \
1737 exit; \
1738 l1_%=: r0 = 1; \
1739 exit; \
1740 " :
1741 : __imm(bpf_get_prandom_u32)
1742 : __clobber_all);
1743 }
1744
1745 SEC("socket")
1746 __description("jset on same register, constant value branch taken")
1747 __not_msg("7: (b7) r0 = 1 {{.*}} R0=1")
1748 __success __log_level(2)
1749 __retval(0)
jset_on_same_register_1(void * ctx)1750 __naked void jset_on_same_register_1(void *ctx)
1751 {
1752 asm volatile(" \
1753 r0 = 0; \
1754 if r0 & r0 goto l1_%=; \
1755 r0 = 1; \
1756 if r0 & r0 goto +1; \
1757 goto l1_%=; \
1758 l0_%=: r0 = 0; \
1759 exit; \
1760 l1_%=: r0 = 1; \
1761 exit; \
1762 " :
1763 : __imm(bpf_get_prandom_u32)
1764 : __clobber_all);
1765 }
1766
1767 SEC("socket")
1768 __description("jset on same register, scalar value branch taken")
1769 __not_msg("12: (b7) r0 = 1 {{.*}} R0=1")
1770 __success __log_level(2)
1771 __retval(0)
jset_on_same_register_2(void * ctx)1772 __naked void jset_on_same_register_2(void *ctx)
1773 {
1774 asm volatile(" \
1775 /* range [1;2] */ \
1776 call %[bpf_get_prandom_u32]; \
1777 r0 &= 0x1; \
1778 r0 += 1; \
1779 if r0 & r0 goto +1; \
1780 goto l1_%=; \
1781 /* range [-2;-1] */ \
1782 call %[bpf_get_prandom_u32]; \
1783 r0 &= 0x1; \
1784 r0 -= 2; \
1785 if r0 & r0 goto +1; \
1786 goto l1_%=; \
1787 l0_%=: r0 = 0; \
1788 exit; \
1789 l1_%=: r0 = 1; \
1790 exit; \
1791 " :
1792 : __imm(bpf_get_prandom_u32)
1793 : __clobber_all);
1794 }
1795
1796 SEC("socket")
1797 __description("jset on same register, scalar value unknown branch 1")
1798 __msg("3: (b7) r0 = 0 {{.*}} R0=0")
1799 __msg("5: (b7) r0 = 1 {{.*}} R0=1")
1800 __success __log_level(2)
jset_on_same_register_3(void * ctx)1801 __naked void jset_on_same_register_3(void *ctx)
1802 {
1803 asm volatile(" \
1804 /* range [0;1] */ \
1805 call %[bpf_get_prandom_u32]; \
1806 r0 &= 0x1; \
1807 if r0 & r0 goto l1_%=; \
1808 l0_%=: r0 = 0; \
1809 exit; \
1810 l1_%=: r0 = 1; \
1811 exit; \
1812 " :
1813 : __imm(bpf_get_prandom_u32)
1814 : __clobber_all);
1815 }
1816
1817 SEC("socket")
1818 __description("jset on same register, scalar value unknown branch 2")
1819 __msg("4: (b7) r0 = 0 {{.*}} R0=0")
1820 __msg("6: (b7) r0 = 1 {{.*}} R0=1")
1821 __success __log_level(2)
jset_on_same_register_4(void * ctx)1822 __naked void jset_on_same_register_4(void *ctx)
1823 {
1824 asm volatile(" \
1825 /* range [-1;0] */ \
1826 call %[bpf_get_prandom_u32]; \
1827 r0 &= 0x1; \
1828 r0 -= 1; \
1829 if r0 & r0 goto l1_%=; \
1830 l0_%=: r0 = 0; \
1831 exit; \
1832 l1_%=: r0 = 1; \
1833 exit; \
1834 " :
1835 : __imm(bpf_get_prandom_u32)
1836 : __clobber_all);
1837 }
1838
1839 SEC("socket")
1840 __description("jset on same register, scalar value unknown branch 3")
1841 __msg("4: (b7) r0 = 0 {{.*}} R0=0")
1842 __msg("6: (b7) r0 = 1 {{.*}} R0=1")
1843 __success __log_level(2)
jset_on_same_register_5(void * ctx)1844 __naked void jset_on_same_register_5(void *ctx)
1845 {
1846 asm volatile(" \
1847 /* range [-1;1] */ \
1848 call %[bpf_get_prandom_u32]; \
1849 r0 &= 0x2; \
1850 r0 -= 1; \
1851 if r0 & r0 goto l1_%=; \
1852 l0_%=: r0 = 0; \
1853 exit; \
1854 l1_%=: r0 = 1; \
1855 exit; \
1856 " :
1857 : __imm(bpf_get_prandom_u32)
1858 : __clobber_all);
1859 }
1860
1861 /* This test covers the bounds deduction when the u64 range and the tnum
1862 * overlap only at umax. After instruction 3, the ranges look as follows:
1863 *
1864 * 0 umin=0xe01 umax=0xf00 U64_MAX
1865 * | [xxxxxxxxxxxxxx] |
1866 * |----------------------------|------------------------------|
1867 * | x x | tnum values
1868 *
1869 * The verifier can therefore deduce that the R0=0xf0=240.
1870 */
1871 SEC("socket")
1872 __description("bounds refinement with single-value tnum on umax")
1873 __msg("3: (15) if r0 == 0xe0 {{.*}} R0=240")
1874 __success __log_level(2)
bounds_refinement_tnum_umax(void * ctx)1875 __naked void bounds_refinement_tnum_umax(void *ctx)
1876 {
1877 asm volatile(" \
1878 call %[bpf_get_prandom_u32]; \
1879 r0 |= 0xe0; \
1880 r0 &= 0xf0; \
1881 if r0 == 0xe0 goto +2; \
1882 if r0 == 0xf0 goto +1; \
1883 r10 = 0; \
1884 exit; \
1885 " :
1886 : __imm(bpf_get_prandom_u32)
1887 : __clobber_all);
1888 }
1889
1890 /* This test covers the bounds deduction when the u64 range and the tnum
1891 * overlap only at umin. After instruction 3, the ranges look as follows:
1892 *
1893 * 0 umin=0xe00 umax=0xeff U64_MAX
1894 * | [xxxxxxxxxxxxxx] |
1895 * |----------------------------|------------------------------|
1896 * | x x | tnum values
1897 *
1898 * The verifier can therefore deduce that the R0=0xe0=224.
1899 */
1900 SEC("socket")
1901 __description("bounds refinement with single-value tnum on umin")
1902 __msg("3: (15) if r0 == 0xf0 {{.*}} R0=224")
1903 __success __log_level(2)
bounds_refinement_tnum_umin(void * ctx)1904 __naked void bounds_refinement_tnum_umin(void *ctx)
1905 {
1906 asm volatile(" \
1907 call %[bpf_get_prandom_u32]; \
1908 r0 |= 0xe0; \
1909 r0 &= 0xf0; \
1910 if r0 == 0xf0 goto +2; \
1911 if r0 == 0xe0 goto +1; \
1912 r10 = 0; \
1913 exit; \
1914 " :
1915 : __imm(bpf_get_prandom_u32)
1916 : __clobber_all);
1917 }
1918
1919 /* This test covers the bounds deduction when the only possible tnum value is
1920 * in the middle of the u64 range. After instruction 3, the ranges look as
1921 * follows:
1922 *
1923 * 0 umin=0x7cf umax=0x7df U64_MAX
1924 * | [xxxxxxxxxxxx] |
1925 * |----------------------------|------------------------------|
1926 * | x x x x x | tnum values
1927 * | +--- 0x7e0
1928 * +--- 0x7d0
1929 *
1930 * Since the lower four bits are zero, the tnum and the u64 range only overlap
1931 * in R0=0x7d0=2000. Instruction 5 is therefore dead code.
1932 */
1933 SEC("socket")
1934 __description("bounds refinement with single-value tnum in middle of range")
1935 __msg("3: (a5) if r0 < 0x7cf {{.*}} R0=2000")
1936 __success __log_level(2)
bounds_refinement_tnum_middle(void * ctx)1937 __naked void bounds_refinement_tnum_middle(void *ctx)
1938 {
1939 asm volatile(" \
1940 call %[bpf_get_prandom_u32]; \
1941 if r0 & 0x0f goto +4; \
1942 if r0 > 0x7df goto +3; \
1943 if r0 < 0x7cf goto +2; \
1944 if r0 == 0x7d0 goto +1; \
1945 r10 = 0; \
1946 exit; \
1947 " :
1948 : __imm(bpf_get_prandom_u32)
1949 : __clobber_all);
1950 }
1951
1952 /* This test cover the negative case for the tnum/u64 overlap. Since
1953 * they contain the same two values (i.e., {0, 1}), we can't deduce
1954 * anything more.
1955 */
1956 SEC("socket")
1957 __description("bounds refinement: several overlaps between tnum and u64")
1958 __msg("2: (25) if r0 > 0x1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=1,var_off=(0x0; 0x1))")
1959 __failure __log_level(2)
bounds_refinement_several_overlaps(void * ctx)1960 __naked void bounds_refinement_several_overlaps(void *ctx)
1961 {
1962 asm volatile(" \
1963 call %[bpf_get_prandom_u32]; \
1964 if r0 < 0 goto +3; \
1965 if r0 > 1 goto +2; \
1966 if r0 == 1 goto +1; \
1967 r10 = 0; \
1968 exit; \
1969 " :
1970 : __imm(bpf_get_prandom_u32)
1971 : __clobber_all);
1972 }
1973
1974 /* This test cover the negative case for the tnum/u64 overlap. Since
1975 * they overlap in the two values contained by the u64 range (i.e.,
1976 * {0xf, 0x10}), we can't deduce anything more.
1977 */
1978 SEC("socket")
1979 __description("bounds refinement: multiple overlaps between tnum and u64")
1980 __msg("2: (25) if r0 > 0x10 {{.*}} R0=scalar(smin=umin=smin32=umin32=15,smax=umax=smax32=umax32=16,var_off=(0x0; 0x1f))")
1981 __failure __log_level(2)
bounds_refinement_multiple_overlaps(void * ctx)1982 __naked void bounds_refinement_multiple_overlaps(void *ctx)
1983 {
1984 asm volatile(" \
1985 call %[bpf_get_prandom_u32]; \
1986 if r0 < 0xf goto +3; \
1987 if r0 > 0x10 goto +2; \
1988 if r0 == 0x10 goto +1; \
1989 r10 = 0; \
1990 exit; \
1991 " :
1992 : __imm(bpf_get_prandom_u32)
1993 : __clobber_all);
1994 }
1995
1996 SEC("socket")
1997 __success
signed_unsigned_intersection32_case1(void * ctx)1998 __naked void signed_unsigned_intersection32_case1(void *ctx)
1999 {
2000 asm volatile(" \
2001 call %[bpf_get_prandom_u32]; \
2002 w0 &= 0xffffffff; \
2003 if w0 < 0x3 goto 1f; /* on fall-through u32 range [3..U32_MAX] */ \
2004 if w0 s> 0x1 goto 1f; /* on fall-through s32 range [S32_MIN..1] */ \
2005 if w0 s< 0x0 goto 1f; /* range can be narrowed to [S32_MIN..-1] */ \
2006 r10 = 0; /* thus predicting the jump. */ \
2007 1: exit; \
2008 " :
2009 : __imm(bpf_get_prandom_u32)
2010 : __clobber_all);
2011 }
2012
2013 SEC("socket")
2014 __success
signed_unsigned_intersection32_case2(void * ctx)2015 __naked void signed_unsigned_intersection32_case2(void *ctx)
2016 {
2017 asm volatile(" \
2018 call %[bpf_get_prandom_u32]; \
2019 w0 &= 0xffffffff; \
2020 if w0 > 0x80000003 goto 1f; /* on fall-through u32 range [0..S32_MIN+3] */ \
2021 if w0 s< -3 goto 1f; /* on fall-through s32 range [-3..S32_MAX] */ \
2022 if w0 s> 5 goto 1f; /* on fall-through s32 range [-3..5] */ \
2023 if w0 <= 5 goto 1f; /* range can be narrowed to [0..5] */ \
2024 r10 = 0; /* thus predicting the jump */ \
2025 1: exit; \
2026 " :
2027 : __imm(bpf_get_prandom_u32)
2028 : __clobber_all);
2029 }
2030
2031 /*
2032 * After instruction 3, the u64 and s64 ranges look as follows:
2033 * 0 umin=2 umax=0xff..ff00..03 U64_MAX
2034 * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
2035 * |----------------------------|------------------------------|
2036 * |xx] [xxxxxxxxxxxxxxxxxxxxxxxxxxxx|
2037 * 0 smax=2 smin=0x800..02 -1
2038 *
2039 * The two ranges can't be refined because they overlap in two places. Once we
2040 * add an upper-bound to u64 at instruction 4, the refinement can happen. This
2041 * test validates that this refinement does happen and is not overwritten by
2042 * the less-precise 32bits ranges.
2043 */
2044 SEC("socket")
2045 __description("bounds refinement: 64bits ranges not overwritten by 32bits ranges")
2046 __msg("3: (65) if r0 s> 0x2 {{.*}} R0=scalar(smin=0x8000000000000002,smax=2,umin=smin32=umin32=2,umax=0xffffffff00000003,smax32=umax32=3")
2047 __msg("4: (25) if r0 > 0x13 {{.*}} R0=2")
2048 __success __log_level(2)
refinement_32bounds_not_overwriting_64bounds(void * ctx)2049 __naked void refinement_32bounds_not_overwriting_64bounds(void *ctx)
2050 {
2051 asm volatile(" \
2052 call %[bpf_get_prandom_u32]; \
2053 if w0 < 2 goto +5; \
2054 if w0 > 3 goto +4; \
2055 if r0 s> 2 goto +3; \
2056 if r0 > 19 goto +2; \
2057 if r0 == 2 goto +1; \
2058 r10 = 0; \
2059 exit; \
2060 " :
2061 : __imm(bpf_get_prandom_u32)
2062 : __clobber_all);
2063 }
2064
2065 SEC("socket")
2066 __description("maybe_fork_scalars: OR with constant rejects OOB")
2067 __failure __msg("invalid access to map value")
or_scalar_fork_rejects_oob(void)2068 __naked void or_scalar_fork_rejects_oob(void)
2069 {
2070 asm volatile (" \
2071 r1 = 0; \
2072 *(u64*)(r10 - 8) = r1; \
2073 r2 = r10; \
2074 r2 += -8; \
2075 r1 = %[map_hash_8b] ll; \
2076 call %[bpf_map_lookup_elem]; \
2077 if r0 == 0 goto l0_%=; \
2078 r9 = r0; \
2079 r6 = *(u64*)(r9 + 0); \
2080 r6 s>>= 63; \
2081 r6 |= 8; \
2082 /* r6 is -1 (current) or 8 (pushed) */ \
2083 if r6 s< 0 goto l0_%=; \
2084 /* pushed path: r6 = 8, OOB for value_size=8 */ \
2085 r9 += r6; \
2086 r0 = *(u8*)(r9 + 0); \
2087 l0_%=: r0 = 0; \
2088 exit; \
2089 " :
2090 : __imm(bpf_map_lookup_elem),
2091 __imm_addr(map_hash_8b)
2092 : __clobber_all);
2093 }
2094
2095 SEC("socket")
2096 __description("maybe_fork_scalars: AND with constant still works")
2097 __success __retval(0)
and_scalar_fork_still_works(void)2098 __naked void and_scalar_fork_still_works(void)
2099 {
2100 asm volatile (" \
2101 r1 = 0; \
2102 *(u64*)(r10 - 8) = r1; \
2103 r2 = r10; \
2104 r2 += -8; \
2105 r1 = %[map_hash_8b] ll; \
2106 call %[bpf_map_lookup_elem]; \
2107 if r0 == 0 goto l0_%=; \
2108 r9 = r0; \
2109 r6 = *(u64*)(r9 + 0); \
2110 r6 s>>= 63; \
2111 r6 &= 4; \
2112 /* \
2113 * r6 is 0 (pushed, 0&4==0) or 4 (current) \
2114 * both within value_size=8 \
2115 */ \
2116 if r6 s< 0 goto l0_%=; \
2117 r9 += r6; \
2118 r0 = *(u8*)(r9 + 0); \
2119 l0_%=: r0 = 0; \
2120 exit; \
2121 " :
2122 : __imm(bpf_map_lookup_elem),
2123 __imm_addr(map_hash_8b)
2124 : __clobber_all);
2125 }
2126
2127 SEC("socket")
2128 __description("maybe_fork_scalars: OR with constant allows in-bounds")
2129 __success __retval(0)
or_scalar_fork_allows_inbounds(void)2130 __naked void or_scalar_fork_allows_inbounds(void)
2131 {
2132 asm volatile (" \
2133 r1 = 0; \
2134 *(u64*)(r10 - 8) = r1; \
2135 r2 = r10; \
2136 r2 += -8; \
2137 r1 = %[map_hash_8b] ll; \
2138 call %[bpf_map_lookup_elem]; \
2139 if r0 == 0 goto l0_%=; \
2140 r9 = r0; \
2141 r6 = *(u64*)(r9 + 0); \
2142 r6 s>>= 63; \
2143 r6 |= 4; \
2144 /* \
2145 * r6 is -1 (current) or 4 (pushed) \
2146 * pushed path: r6 = 4, within value_size=8 \
2147 */ \
2148 if r6 s< 0 goto l0_%=; \
2149 r9 += r6; \
2150 r0 = *(u8*)(r9 + 0); \
2151 l0_%=: r0 = 0; \
2152 exit; \
2153 " :
2154 : __imm(bpf_map_lookup_elem),
2155 __imm_addr(map_hash_8b)
2156 : __clobber_all);
2157 }
2158
2159 /*
2160 * Last jump can be detected as always taken because the intersection of R5 and
2161 * R7 32bit tnums produces a constant that isn't within R7's s32 bounds.
2162 */
2163 SEC("socket")
2164 __description("dead branch: tnums give impossible constant if equal")
2165 __success
tnums_equal_impossible_constant(void * ctx)2166 __naked void tnums_equal_impossible_constant(void *ctx)
2167 {
2168 asm volatile(" \
2169 call %[bpf_get_prandom_u32]; \
2170 r5 = r0; \
2171 /* Set r5's var_off32 to (0; 0xfffffffc) */ \
2172 r5 &= 0xfffffffffffffffc; \
2173 r7 = r0; \
2174 /* Set r7's var_off32 to (0x0; 0x1) */ \
2175 r7 &= 0x1; \
2176 /* Now, s32=[-43; -42], var_off32=(0xffffffd4; 0x3) */ \
2177 r7 += -43; \
2178 /* On fallthrough, var_off32=-44, not in s32 */ \
2179 if w5 != w7 goto +1; \
2180 r10 = 0; \
2181 exit; \
2182 " :
2183 : __imm(bpf_get_prandom_u32)
2184 : __clobber_all);
2185 }
2186
2187 char _license[] SEC("license") = "GPL";
2188