xref: /linux/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c (revision 015e7b0b0e8e51f7321ec2aafc1d7fc0a8a5536f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */
3 
4 #include <linux/if_ether.h>
5 #include <linux/bpf.h>
6 #include <bpf/bpf_helpers.h>
7 #include "bpf_misc.h"
8 
9 SEC("tc")
10 __description("pkt_end - pkt_start is allowed")
11 __success __retval(TEST_DATA_LEN)
12 __naked void end_pkt_start_is_allowed(void)
13 {
14 	asm volatile ("					\
15 	r0 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
16 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
17 	r0 -= r2;					\
18 	exit;						\
19 "	:
20 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
21 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
22 	: __clobber_all);
23 }
24 
25 SEC("tc")
26 __description("direct packet access: test1")
27 __success __retval(0)
28 __naked void direct_packet_access_test1(void)
29 {
30 	asm volatile ("					\
31 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
32 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
33 	r0 = r2;					\
34 	r0 += 8;					\
35 	if r0 > r3 goto l0_%=;				\
36 	r0 = *(u8*)(r2 + 0);				\
37 l0_%=:	r0 = 0;						\
38 	exit;						\
39 "	:
40 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
41 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
42 	: __clobber_all);
43 }
44 
45 SEC("tc")
46 __description("direct packet access: test2")
47 __success __retval(0)
48 __naked void direct_packet_access_test2(void)
49 {
50 	asm volatile ("					\
51 	r0 = 1;						\
52 	r4 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
53 	r3 = *(u32*)(r1 + %[__sk_buff_data]);		\
54 	r5 = r3;					\
55 	r5 += 14;					\
56 	if r5 > r4 goto l0_%=;				\
57 	r0 = *(u8*)(r3 + 7);				\
58 	r4 = *(u8*)(r3 + 12);				\
59 	r4 *= 14;					\
60 	r3 = *(u32*)(r1 + %[__sk_buff_data]);		\
61 	r3 += r4;					\
62 	r2 = *(u32*)(r1 + %[__sk_buff_len]);		\
63 	r2 <<= 49;					\
64 	r2 >>= 49;					\
65 	r3 += r2;					\
66 	r2 = r3;					\
67 	r2 += 8;					\
68 	r1 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
69 	if r2 > r1 goto l1_%=;				\
70 	r1 = *(u8*)(r3 + 4);				\
71 l1_%=:	r0 = 0;						\
72 l0_%=:	exit;						\
73 "	:
74 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
75 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
76 	  __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
77 	: __clobber_all);
78 }
79 
80 SEC("socket")
81 __description("direct packet access: test3")
82 __failure __msg("invalid bpf_context access off=76")
83 __failure_unpriv
84 __naked void direct_packet_access_test3(void)
85 {
86 	asm volatile ("					\
87 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
88 	r0 = 0;						\
89 	exit;						\
90 "	:
91 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
92 	: __clobber_all);
93 }
94 
95 SEC("tc")
96 __description("direct packet access: test4 (write)")
97 __success __retval(0)
98 __naked void direct_packet_access_test4_write(void)
99 {
100 	asm volatile ("					\
101 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
102 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
103 	r0 = r2;					\
104 	r0 += 8;					\
105 	if r0 > r3 goto l0_%=;				\
106 	*(u8*)(r2 + 0) = r2;				\
107 l0_%=:	r0 = 0;						\
108 	exit;						\
109 "	:
110 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
111 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
112 	: __clobber_all);
113 }
114 
115 SEC("tc")
116 __description("direct packet access: test5 (pkt_end >= reg, good access)")
117 __success __retval(0)
118 __naked void pkt_end_reg_good_access(void)
119 {
120 	asm volatile ("					\
121 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
122 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
123 	r0 = r2;					\
124 	r0 += 8;					\
125 	if r3 >= r0 goto l0_%=;				\
126 	r0 = 1;						\
127 	exit;						\
128 l0_%=:	r0 = *(u8*)(r2 + 0);				\
129 	r0 = 0;						\
130 	exit;						\
131 "	:
132 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
133 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
134 	: __clobber_all);
135 }
136 
137 SEC("tc")
138 __description("direct packet access: test6 (pkt_end >= reg, bad access)")
139 __failure __msg("invalid access to packet")
140 __naked void pkt_end_reg_bad_access(void)
141 {
142 	asm volatile ("					\
143 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
144 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
145 	r0 = r2;					\
146 	r0 += 8;					\
147 	if r3 >= r0 goto l0_%=;				\
148 	r0 = *(u8*)(r2 + 0);				\
149 	r0 = 1;						\
150 	exit;						\
151 l0_%=:	r0 = 0;						\
152 	exit;						\
153 "	:
154 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
155 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
156 	: __clobber_all);
157 }
158 
159 SEC("tc")
160 __description("direct packet access: test7 (pkt_end >= reg, both accesses)")
161 __failure __msg("invalid access to packet")
162 __naked void pkt_end_reg_both_accesses(void)
163 {
164 	asm volatile ("					\
165 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
166 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
167 	r0 = r2;					\
168 	r0 += 8;					\
169 	if r3 >= r0 goto l0_%=;				\
170 	r0 = *(u8*)(r2 + 0);				\
171 	r0 = 1;						\
172 	exit;						\
173 l0_%=:	r0 = *(u8*)(r2 + 0);				\
174 	r0 = 0;						\
175 	exit;						\
176 "	:
177 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
178 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
179 	: __clobber_all);
180 }
181 
182 SEC("tc")
183 __description("direct packet access: test8 (double test, variant 1)")
184 __success __retval(0)
185 __naked void test8_double_test_variant_1(void)
186 {
187 	asm volatile ("					\
188 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
189 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
190 	r0 = r2;					\
191 	r0 += 8;					\
192 	if r3 >= r0 goto l0_%=;				\
193 	if r0 > r3 goto l1_%=;				\
194 	r0 = *(u8*)(r2 + 0);				\
195 l1_%=:	r0 = 1;						\
196 	exit;						\
197 l0_%=:	r0 = *(u8*)(r2 + 0);				\
198 	r0 = 0;						\
199 	exit;						\
200 "	:
201 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
202 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
203 	: __clobber_all);
204 }
205 
206 SEC("tc")
207 __description("direct packet access: test9 (double test, variant 2)")
208 __success __retval(0)
209 __naked void test9_double_test_variant_2(void)
210 {
211 	asm volatile ("					\
212 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
213 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
214 	r0 = r2;					\
215 	r0 += 8;					\
216 	if r3 >= r0 goto l0_%=;				\
217 	r0 = 1;						\
218 	exit;						\
219 l0_%=:	if r0 > r3 goto l1_%=;				\
220 	r0 = *(u8*)(r2 + 0);				\
221 l1_%=:	r0 = *(u8*)(r2 + 0);				\
222 	r0 = 0;						\
223 	exit;						\
224 "	:
225 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
226 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
227 	: __clobber_all);
228 }
229 
230 SEC("tc")
231 __description("direct packet access: test10 (write invalid)")
232 __failure __msg("invalid access to packet")
233 __naked void packet_access_test10_write_invalid(void)
234 {
235 	asm volatile ("					\
236 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
237 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
238 	r0 = r2;					\
239 	r0 += 8;					\
240 	if r0 > r3 goto l0_%=;				\
241 	r0 = 0;						\
242 	exit;						\
243 l0_%=:	*(u8*)(r2 + 0) = r2;				\
244 	r0 = 0;						\
245 	exit;						\
246 "	:
247 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
248 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
249 	: __clobber_all);
250 }
251 
252 SEC("tc")
253 __description("direct packet access: test11 (shift, good access)")
254 __success __retval(1)
255 __naked void access_test11_shift_good_access(void)
256 {
257 	asm volatile ("					\
258 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
259 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
260 	r0 = r2;					\
261 	r0 += 22;					\
262 	if r0 > r3 goto l0_%=;				\
263 	r3 = 144;					\
264 	r5 = r3;					\
265 	r5 += 23;					\
266 	r5 >>= 3;					\
267 	r6 = r2;					\
268 	r6 += r5;					\
269 	r0 = 1;						\
270 	exit;						\
271 l0_%=:	r0 = 0;						\
272 	exit;						\
273 "	:
274 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
275 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
276 	: __clobber_all);
277 }
278 
279 SEC("tc")
280 __description("direct packet access: test12 (and, good access)")
281 __success __retval(1)
282 __naked void access_test12_and_good_access(void)
283 {
284 	asm volatile ("					\
285 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
286 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
287 	r0 = r2;					\
288 	r0 += 22;					\
289 	if r0 > r3 goto l0_%=;				\
290 	r3 = 144;					\
291 	r5 = r3;					\
292 	r5 += 23;					\
293 	r5 &= 15;					\
294 	r6 = r2;					\
295 	r6 += r5;					\
296 	r0 = 1;						\
297 	exit;						\
298 l0_%=:	r0 = 0;						\
299 	exit;						\
300 "	:
301 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
302 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
303 	: __clobber_all);
304 }
305 
306 SEC("tc")
307 __description("direct packet access: test13 (branches, good access)")
308 __success __retval(1)
309 __naked void access_test13_branches_good_access(void)
310 {
311 	asm volatile ("					\
312 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
313 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
314 	r0 = r2;					\
315 	r0 += 22;					\
316 	if r0 > r3 goto l0_%=;				\
317 	r3 = *(u32*)(r1 + %[__sk_buff_mark]);		\
318 	r4 = 1;						\
319 	if r3 > r4 goto l1_%=;				\
320 	r3 = 14;					\
321 	goto l2_%=;					\
322 l1_%=:	r3 = 24;					\
323 l2_%=:	r5 = r3;					\
324 	r5 += 23;					\
325 	r5 &= 15;					\
326 	r6 = r2;					\
327 	r6 += r5;					\
328 	r0 = 1;						\
329 	exit;						\
330 l0_%=:	r0 = 0;						\
331 	exit;						\
332 "	:
333 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
334 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
335 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
336 	: __clobber_all);
337 }
338 
339 SEC("tc")
340 __description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)")
341 __success __retval(1)
342 __naked void _0_const_imm_good_access(void)
343 {
344 	asm volatile ("					\
345 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
346 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
347 	r0 = r2;					\
348 	r0 += 22;					\
349 	if r0 > r3 goto l0_%=;				\
350 	r5 = 12;					\
351 	r5 >>= 4;					\
352 	r6 = r2;					\
353 	r6 += r5;					\
354 	r0 = *(u8*)(r6 + 0);				\
355 	r0 = 1;						\
356 	exit;						\
357 l0_%=:	r0 = 0;						\
358 	exit;						\
359 "	:
360 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
361 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
362 	: __clobber_all);
363 }
364 
365 SEC("tc")
366 __description("direct packet access: test15 (spill with xadd)")
367 __failure __msg("R2 invalid mem access 'scalar'")
368 __flag(BPF_F_ANY_ALIGNMENT)
369 __naked void access_test15_spill_with_xadd(void)
370 {
371 	asm volatile ("					\
372 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
373 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
374 	r0 = r2;					\
375 	r0 += 8;					\
376 	if r0 > r3 goto l0_%=;				\
377 	r5 = 4096;					\
378 	r4 = r10;					\
379 	r4 += -8;					\
380 	*(u64*)(r4 + 0) = r2;				\
381 	lock *(u64 *)(r4 + 0) += r5;			\
382 	r2 = *(u64*)(r4 + 0);				\
383 	*(u32*)(r2 + 0) = r5;				\
384 	r0 = 0;						\
385 l0_%=:	exit;						\
386 "	:
387 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
388 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
389 	: __clobber_all);
390 }
391 
392 SEC("tc")
393 __description("direct packet access: test16 (arith on data_end)")
394 __failure __msg("R3 pointer arithmetic on pkt_end")
395 __naked void test16_arith_on_data_end(void)
396 {
397 	asm volatile ("					\
398 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
399 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
400 	r0 = r2;					\
401 	r0 += 8;					\
402 	r3 += 16;					\
403 	if r0 > r3 goto l0_%=;				\
404 	*(u8*)(r2 + 0) = r2;				\
405 l0_%=:	r0 = 0;						\
406 	exit;						\
407 "	:
408 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
409 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
410 	: __clobber_all);
411 }
412 
413 SEC("tc")
414 __description("direct packet access: test17 (pruning, alignment)")
415 __failure __msg("misaligned packet access off 2+0+15+-4 size 4")
416 __flag(BPF_F_STRICT_ALIGNMENT)
417 __naked void packet_access_test17_pruning_alignment(void)
418 {
419 	asm volatile ("					\
420 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
421 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
422 	r7 = *(u32*)(r1 + %[__sk_buff_mark]);		\
423 	r0 = r2;					\
424 	r0 += 14;					\
425 	if r7 > 1 goto l0_%=;				\
426 l2_%=:	if r0 > r3 goto l1_%=;				\
427 	*(u32*)(r0 - 4) = r0;				\
428 l1_%=:	r0 = 0;						\
429 	exit;						\
430 l0_%=:	r0 += 1;					\
431 	goto l2_%=;					\
432 "	:
433 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
434 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
435 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
436 	: __clobber_all);
437 }
438 
439 SEC("tc")
440 __description("direct packet access: test18 (imm += pkt_ptr, 1)")
441 __success __retval(0)
442 __naked void test18_imm_pkt_ptr_1(void)
443 {
444 	asm volatile ("					\
445 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
446 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
447 	r0 = 8;						\
448 	r0 += r2;					\
449 	if r0 > r3 goto l0_%=;				\
450 	*(u8*)(r2 + 0) = r2;				\
451 l0_%=:	r0 = 0;						\
452 	exit;						\
453 "	:
454 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
455 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
456 	: __clobber_all);
457 }
458 
459 SEC("tc")
460 __description("direct packet access: test19 (imm += pkt_ptr, 2)")
461 __success __retval(0)
462 __naked void test19_imm_pkt_ptr_2(void)
463 {
464 	asm volatile ("					\
465 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
466 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
467 	r0 = r2;					\
468 	r0 += 8;					\
469 	if r0 > r3 goto l0_%=;				\
470 	r4 = 4;						\
471 	r4 += r2;					\
472 	*(u8*)(r4 + 0) = r4;				\
473 l0_%=:	r0 = 0;						\
474 	exit;						\
475 "	:
476 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
477 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
478 	: __clobber_all);
479 }
480 
481 SEC("tc")
482 __description("direct packet access: test20 (x += pkt_ptr, 1)")
483 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
484 __naked void test20_x_pkt_ptr_1(void)
485 {
486 	asm volatile ("					\
487 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
488 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
489 	r0 = 0xffffffff;				\
490 	*(u64*)(r10 - 8) = r0;				\
491 	r0 = *(u64*)(r10 - 8);				\
492 	r0 &= 0x7fff;					\
493 	r4 = r0;					\
494 	r4 += r2;					\
495 	r5 = r4;					\
496 	r4 += %[__imm_0];				\
497 	if r4 > r3 goto l0_%=;				\
498 	*(u64*)(r5 + 0) = r4;				\
499 l0_%=:	r0 = 0;						\
500 	exit;						\
501 "	:
502 	: __imm_const(__imm_0, 0x7fff - 1),
503 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
504 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
505 	: __clobber_all);
506 }
507 
508 SEC("tc")
509 __description("direct packet access: test21 (x += pkt_ptr, 2)")
510 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
511 __naked void test21_x_pkt_ptr_2(void)
512 {
513 	asm volatile ("					\
514 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
515 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
516 	r0 = r2;					\
517 	r0 += 8;					\
518 	if r0 > r3 goto l0_%=;				\
519 	r4 = 0xffffffff;				\
520 	*(u64*)(r10 - 8) = r4;				\
521 	r4 = *(u64*)(r10 - 8);				\
522 	r4 &= 0x7fff;					\
523 	r4 += r2;					\
524 	r5 = r4;					\
525 	r4 += %[__imm_0];				\
526 	if r4 > r3 goto l0_%=;				\
527 	*(u64*)(r5 + 0) = r4;				\
528 l0_%=:	r0 = 0;						\
529 	exit;						\
530 "	:
531 	: __imm_const(__imm_0, 0x7fff - 1),
532 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
533 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
534 	: __clobber_all);
535 }
536 
537 SEC("tc")
538 __description("direct packet access: test22 (x += pkt_ptr, 3)")
539 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
540 __naked void test22_x_pkt_ptr_3(void)
541 {
542 	asm volatile ("					\
543 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
544 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
545 	r0 = r2;					\
546 	r0 += 8;					\
547 	*(u64*)(r10 - 8) = r2;				\
548 	*(u64*)(r10 - 16) = r3;				\
549 	r3 = *(u64*)(r10 - 16);				\
550 	if r0 > r3 goto l0_%=;				\
551 	r2 = *(u64*)(r10 - 8);				\
552 	r4 = 0xffffffff;				\
553 	lock *(u64 *)(r10 - 8) += r4;			\
554 	r4 = *(u64*)(r10 - 8);				\
555 	r4 >>= 49;					\
556 	r4 += r2;					\
557 	r0 = r4;					\
558 	r0 += 2;					\
559 	if r0 > r3 goto l0_%=;				\
560 	r2 = 1;						\
561 	*(u16*)(r4 + 0) = r2;				\
562 l0_%=:	r0 = 0;						\
563 	exit;						\
564 "	:
565 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
566 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
567 	: __clobber_all);
568 }
569 
570 SEC("tc")
571 __description("direct packet access: test23 (x += pkt_ptr, 4)")
572 __failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)")
573 __flag(BPF_F_ANY_ALIGNMENT)
574 __naked void test23_x_pkt_ptr_4(void)
575 {
576 	asm volatile ("					\
577 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
578 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
579 	r0 = *(u32*)(r1 + %[__sk_buff_mark]);		\
580 	*(u64*)(r10 - 8) = r0;				\
581 	r0 = *(u64*)(r10 - 8);				\
582 	r0 &= 0xffff;					\
583 	r4 = r0;					\
584 	r0 = 31;					\
585 	r0 += r4;					\
586 	r0 += r2;					\
587 	r5 = r0;					\
588 	r0 += %[__imm_0];				\
589 	if r0 > r3 goto l0_%=;				\
590 	*(u64*)(r5 + 0) = r0;				\
591 l0_%=:	r0 = 0;						\
592 	exit;						\
593 "	:
594 	: __imm_const(__imm_0, 0xffff - 1),
595 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
596 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
597 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
598 	: __clobber_all);
599 }
600 
601 SEC("tc")
602 __description("direct packet access: test24 (x += pkt_ptr, 5)")
603 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
604 __naked void test24_x_pkt_ptr_5(void)
605 {
606 	asm volatile ("					\
607 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
608 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
609 	r0 = 0xffffffff;				\
610 	*(u64*)(r10 - 8) = r0;				\
611 	r0 = *(u64*)(r10 - 8);				\
612 	r0 &= 0xff;					\
613 	r4 = r0;					\
614 	r0 = 64;					\
615 	r0 += r4;					\
616 	r0 += r2;					\
617 	r5 = r0;					\
618 	r0 += %[__imm_0];				\
619 	if r0 > r3 goto l0_%=;				\
620 	*(u64*)(r5 + 0) = r0;				\
621 l0_%=:	r0 = 0;						\
622 	exit;						\
623 "	:
624 	: __imm_const(__imm_0, 0x7fff - 1),
625 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
626 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
627 	: __clobber_all);
628 }
629 
630 SEC("tc")
631 __description("direct packet access: test25 (marking on <, good access)")
632 __success __retval(0)
633 __naked void test25_marking_on_good_access(void)
634 {
635 	asm volatile ("					\
636 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
637 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
638 	r0 = r2;					\
639 	r0 += 8;					\
640 	if r0 < r3 goto l0_%=;				\
641 l1_%=:	r0 = 0;						\
642 	exit;						\
643 l0_%=:	r0 = *(u8*)(r2 + 0);				\
644 	goto l1_%=;					\
645 "	:
646 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
647 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
648 	: __clobber_all);
649 }
650 
651 SEC("tc")
652 __description("direct packet access: test26 (marking on <, bad access)")
653 __failure __msg("invalid access to packet")
654 __naked void test26_marking_on_bad_access(void)
655 {
656 	asm volatile ("					\
657 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
658 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
659 	r0 = r2;					\
660 	r0 += 8;					\
661 	if r0 < r3 goto l0_%=;				\
662 	r0 = *(u8*)(r2 + 0);				\
663 l1_%=:	r0 = 0;						\
664 	exit;						\
665 l0_%=:	goto l1_%=;					\
666 "	:
667 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
668 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
669 	: __clobber_all);
670 }
671 
672 SEC("tc")
673 __description("direct packet access: test27 (marking on <=, good access)")
674 __success __retval(1)
675 __naked void test27_marking_on_good_access(void)
676 {
677 	asm volatile ("					\
678 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
679 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
680 	r0 = r2;					\
681 	r0 += 8;					\
682 	if r3 <= r0 goto l0_%=;				\
683 	r0 = *(u8*)(r2 + 0);				\
684 l0_%=:	r0 = 1;						\
685 	exit;						\
686 "	:
687 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
688 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
689 	: __clobber_all);
690 }
691 
692 SEC("tc")
693 __description("direct packet access: test28 (marking on <=, bad access)")
694 __failure __msg("invalid access to packet")
695 __naked void test28_marking_on_bad_access(void)
696 {
697 	asm volatile ("					\
698 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
699 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
700 	r0 = r2;					\
701 	r0 += 8;					\
702 	if r3 <= r0 goto l0_%=;				\
703 l1_%=:	r0 = 1;						\
704 	exit;						\
705 l0_%=:	r0 = *(u8*)(r2 + 0);				\
706 	goto l1_%=;					\
707 "	:
708 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
709 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
710 	: __clobber_all);
711 }
712 
713 SEC("tc")
714 __description("direct packet access: test29 (reg > pkt_end in subprog)")
715 __success __retval(0)
716 __naked void reg_pkt_end_in_subprog(void)
717 {
718 	asm volatile ("					\
719 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
720 	r2 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
721 	r3 = r6;					\
722 	r3 += 8;					\
723 	call reg_pkt_end_in_subprog__1;			\
724 	if r0 == 0 goto l0_%=;				\
725 	r0 = *(u8*)(r6 + 0);				\
726 l0_%=:	r0 = 0;						\
727 	exit;						\
728 "	:
729 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
730 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
731 	: __clobber_all);
732 }
733 
734 static __naked __noinline __attribute__((used))
735 void reg_pkt_end_in_subprog__1(void)
736 {
737 	asm volatile ("					\
738 	r0 = 0;						\
739 	if r3 > r2 goto l0_%=;				\
740 	r0 = 1;						\
741 l0_%=:	exit;						\
742 "	::: __clobber_all);
743 }
744 
745 SEC("tc")
746 __description("direct packet access: test30 (check_id() in regsafe(), bad access)")
747 __failure __msg("invalid access to packet, off=0 size=1, R2")
748 __flag(BPF_F_TEST_STATE_FREQ)
749 __naked void id_in_regsafe_bad_access(void)
750 {
751 	asm volatile ("					\
752 	/* r9 = ctx */					\
753 	r9 = r1;					\
754 	/* r7 = ktime_get_ns() */			\
755 	call %[bpf_ktime_get_ns];			\
756 	r7 = r0;					\
757 	/* r6 = ktime_get_ns() */			\
758 	call %[bpf_ktime_get_ns];			\
759 	r6 = r0;					\
760 	/* r2 = ctx->data				\
761 	 * r3 = ctx->data				\
762 	 * r4 = ctx->data_end				\
763 	 */						\
764 	r2 = *(u32*)(r9 + %[__sk_buff_data]);		\
765 	r3 = *(u32*)(r9 + %[__sk_buff_data]);		\
766 	r4 = *(u32*)(r9 + %[__sk_buff_data_end]);	\
767 	/* if r6 > 100 goto exit			\
768 	 * if r7 > 100 goto exit			\
769 	 */						\
770 	if r6 > 100 goto l0_%=;				\
771 	if r7 > 100 goto l0_%=;				\
772 	/* r2 += r6              ; this forces assignment of ID to r2\
773 	 * r2 += 1               ; get some fixed off for r2\
774 	 * r3 += r7              ; this forces assignment of ID to r3\
775 	 * r3 += 1               ; get some fixed off for r3\
776 	 */						\
777 	r2 += r6;					\
778 	r2 += 1;					\
779 	r3 += r7;					\
780 	r3 += 1;					\
781 	/* if r6 > r7 goto +1    ; no new information about the state is derived from\
782 	 *                       ; this check, thus produced verifier states differ\
783 	 *                       ; only in 'insn_idx'	\
784 	 * r2 = r3               ; optionally share ID between r2 and r3\
785 	 */						\
786 	if r6 != r7 goto l1_%=;				\
787 	r2 = r3;					\
788 l1_%=:	/* if r3 > ctx->data_end goto exit */		\
789 	if r3 > r4 goto l0_%=;				\
790 	/* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\
791 	 *                       ; this is not always safe\
792 	 */						\
793 	r5 = *(u8*)(r2 - 1);				\
794 l0_%=:	/* exit(0) */					\
795 	r0 = 0;						\
796 	exit;						\
797 "	:
798 	: __imm(bpf_ktime_get_ns),
799 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
800 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
801 	: __clobber_all);
802 }
803 
804 #define access_test_non_linear(name, type, desc, retval, linear_sz, off)			\
805 	SEC(type)										\
806 	__description("direct packet access: " #name " (non-linear, " type ", " desc ")")	\
807 	__success __retval(retval)								\
808 	__linear_size(linear_sz)								\
809 	__naked void access_non_linear_##name(void)						\
810 	{											\
811 		asm volatile ("									\
812 		r2 = *(u32*)(r1 + %[skb_data]);							\
813 		r3 = *(u32*)(r1 + %[skb_data_end]);						\
814 		r0 = r2;									\
815 		r0 += %[offset];								\
816 		if r0 > r3 goto l0_%=;								\
817 		r0 = *(u8*)(r0 - 1);								\
818 		r0 = 0;										\
819 		exit;										\
820 	l0_%=:	r0 = 1;										\
821 		exit;										\
822 	"	:										\
823 		: __imm_const(skb_data, offsetof(struct __sk_buff, data)),			\
824 		  __imm_const(skb_data_end, offsetof(struct __sk_buff, data_end)),		\
825 		  __imm_const(offset, off)							\
826 		: __clobber_all);								\
827 	}
828 
829 access_test_non_linear(test31, "tc", "too short eth", 1, ETH_HLEN, 22);
830 access_test_non_linear(test32, "tc", "too short 1", 1, 1, 22);
831 access_test_non_linear(test33, "tc", "long enough", 0, 22, 22);
832 access_test_non_linear(test34, "cgroup_skb/ingress", "too short eth", 1, ETH_HLEN, 8);
833 access_test_non_linear(test35, "cgroup_skb/ingress", "too short 1", 1, 1, 8);
834 access_test_non_linear(test36, "cgroup_skb/ingress", "long enough", 0, 22, 8);
835 
836 SEC("tc")
837 __description("direct packet access: test37 (non-linear, linearized)")
838 __success __retval(0)
839 __linear_size(ETH_HLEN)
840 __naked void access_non_linear_linearized(void)
841 {
842 	asm volatile ("				\
843 	r6 = r1;				\
844 	r2 = 22;				\
845 	call %[bpf_skb_pull_data];		\
846 	r2 = *(u32*)(r6 + %[skb_data]);		\
847 	r3 = *(u32*)(r6 + %[skb_data_end]);	\
848 	r0 = r2;				\
849 	r0 += 22;				\
850 	if r0 > r3 goto l0_%=;			\
851 	r0 = *(u8*)(r0 - 1);			\
852 	exit;					\
853 l0_%=:	r0 = 1;					\
854 	exit;					\
855 "	:
856 	: __imm(bpf_skb_pull_data),
857 	  __imm_const(skb_data, offsetof(struct __sk_buff, data)),
858 	  __imm_const(skb_data_end, offsetof(struct __sk_buff, data_end))
859 	: __clobber_all);
860 }
861 
862 char _license[] SEC("license") = "GPL";
863