xref: /linux/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/helper_packet_access.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 struct {
9 	__uint(type, BPF_MAP_TYPE_HASH);
10 	__uint(max_entries, 1);
11 	__type(key, long long);
12 	__type(value, long long);
13 } map_hash_8b SEC(".maps");
14 
15 SEC("xdp")
16 __description("helper access to packet: test1, valid packet_ptr range")
17 __success __retval(0)
18 __naked void test1_valid_packet_ptr_range(void)
19 {
20 	asm volatile ("					\
21 	r2 = *(u32*)(r1 + %[xdp_md_data]);		\
22 	r3 = *(u32*)(r1 + %[xdp_md_data_end]);		\
23 	r1 = r2;					\
24 	r1 += 8;					\
25 	if r1 > r3 goto l0_%=;				\
26 	r1 = %[map_hash_8b] ll;				\
27 	r3 = r2;					\
28 	r4 = 0;						\
29 	call %[bpf_map_update_elem];			\
30 l0_%=:	r0 = 0;						\
31 	exit;						\
32 "	:
33 	: __imm(bpf_map_update_elem),
34 	  __imm_addr(map_hash_8b),
35 	  __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
36 	  __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
37 	: __clobber_all);
38 }
39 
40 SEC("xdp")
41 __description("helper access to packet: test2, unchecked packet_ptr")
42 __failure __msg("invalid access to packet")
43 __naked void packet_test2_unchecked_packet_ptr(void)
44 {
45 	asm volatile ("					\
46 	r2 = *(u32*)(r1 + %[xdp_md_data]);		\
47 	r1 = %[map_hash_8b] ll;				\
48 	call %[bpf_map_lookup_elem];			\
49 	r0 = 0;						\
50 	exit;						\
51 "	:
52 	: __imm(bpf_map_lookup_elem),
53 	  __imm_addr(map_hash_8b),
54 	  __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
55 	: __clobber_all);
56 }
57 
58 SEC("xdp")
59 __description("helper access to packet: test3, variable add")
60 __success __retval(0)
61 __naked void to_packet_test3_variable_add(void)
62 {
63 	asm volatile ("					\
64 	r2 = *(u32*)(r1 + %[xdp_md_data]);		\
65 	r3 = *(u32*)(r1 + %[xdp_md_data_end]);		\
66 	r4 = r2;					\
67 	r4 += 8;					\
68 	if r4 > r3 goto l0_%=;				\
69 	r5 = *(u8*)(r2 + 0);				\
70 	r4 = r2;					\
71 	r4 += r5;					\
72 	r5 = r4;					\
73 	r5 += 8;					\
74 	if r5 > r3 goto l0_%=;				\
75 	r1 = %[map_hash_8b] ll;				\
76 	r2 = r4;					\
77 	call %[bpf_map_lookup_elem];			\
78 l0_%=:	r0 = 0;						\
79 	exit;						\
80 "	:
81 	: __imm(bpf_map_lookup_elem),
82 	  __imm_addr(map_hash_8b),
83 	  __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
84 	  __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
85 	: __clobber_all);
86 }
87 
88 SEC("xdp")
89 __description("helper access to packet: test4, packet_ptr with bad range")
90 __failure __msg("invalid access to packet")
91 __naked void packet_ptr_with_bad_range_1(void)
92 {
93 	asm volatile ("					\
94 	r2 = *(u32*)(r1 + %[xdp_md_data]);		\
95 	r3 = *(u32*)(r1 + %[xdp_md_data_end]);		\
96 	r4 = r2;					\
97 	r4 += 4;					\
98 	if r4 > r3 goto l0_%=;				\
99 	r0 = 0;						\
100 	exit;						\
101 l0_%=:	r1 = %[map_hash_8b] ll;				\
102 	call %[bpf_map_lookup_elem];			\
103 	r0 = 0;						\
104 	exit;						\
105 "	:
106 	: __imm(bpf_map_lookup_elem),
107 	  __imm_addr(map_hash_8b),
108 	  __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
109 	  __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
110 	: __clobber_all);
111 }
112 
113 SEC("xdp")
114 __description("helper access to packet: test5, packet_ptr with too short range")
115 __failure __msg("invalid access to packet")
116 __naked void ptr_with_too_short_range_1(void)
117 {
118 	asm volatile ("					\
119 	r2 = *(u32*)(r1 + %[xdp_md_data]);		\
120 	r3 = *(u32*)(r1 + %[xdp_md_data_end]);		\
121 	r2 += 1;					\
122 	r4 = r2;					\
123 	r4 += 7;					\
124 	if r4 > r3 goto l0_%=;				\
125 	r1 = %[map_hash_8b] ll;				\
126 	call %[bpf_map_lookup_elem];			\
127 l0_%=:	r0 = 0;						\
128 	exit;						\
129 "	:
130 	: __imm(bpf_map_lookup_elem),
131 	  __imm_addr(map_hash_8b),
132 	  __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
133 	  __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
134 	: __clobber_all);
135 }
136 
137 SEC("tc")
138 __description("helper access to packet: test6, cls valid packet_ptr range")
139 __success __retval(0)
140 __naked void cls_valid_packet_ptr_range(void)
141 {
142 	asm volatile ("					\
143 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
144 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
145 	r1 = r2;					\
146 	r1 += 8;					\
147 	if r1 > r3 goto l0_%=;				\
148 	r1 = %[map_hash_8b] ll;				\
149 	r3 = r2;					\
150 	r4 = 0;						\
151 	call %[bpf_map_update_elem];			\
152 l0_%=:	r0 = 0;						\
153 	exit;						\
154 "	:
155 	: __imm(bpf_map_update_elem),
156 	  __imm_addr(map_hash_8b),
157 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
158 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
159 	: __clobber_all);
160 }
161 
162 SEC("tc")
163 __description("helper access to packet: test7, cls unchecked packet_ptr")
164 __failure __msg("invalid access to packet")
165 __naked void test7_cls_unchecked_packet_ptr(void)
166 {
167 	asm volatile ("					\
168 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
169 	r1 = %[map_hash_8b] ll;				\
170 	call %[bpf_map_lookup_elem];			\
171 	r0 = 0;						\
172 	exit;						\
173 "	:
174 	: __imm(bpf_map_lookup_elem),
175 	  __imm_addr(map_hash_8b),
176 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
177 	: __clobber_all);
178 }
179 
180 SEC("tc")
181 __description("helper access to packet: test8, cls variable add")
182 __success __retval(0)
183 __naked void packet_test8_cls_variable_add(void)
184 {
185 	asm volatile ("					\
186 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
187 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
188 	r4 = r2;					\
189 	r4 += 8;					\
190 	if r4 > r3 goto l0_%=;				\
191 	r5 = *(u8*)(r2 + 0);				\
192 	r4 = r2;					\
193 	r4 += r5;					\
194 	r5 = r4;					\
195 	r5 += 8;					\
196 	if r5 > r3 goto l0_%=;				\
197 	r1 = %[map_hash_8b] ll;				\
198 	r2 = r4;					\
199 	call %[bpf_map_lookup_elem];			\
200 l0_%=:	r0 = 0;						\
201 	exit;						\
202 "	:
203 	: __imm(bpf_map_lookup_elem),
204 	  __imm_addr(map_hash_8b),
205 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
206 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
207 	: __clobber_all);
208 }
209 
210 SEC("tc")
211 __description("helper access to packet: test9, cls packet_ptr with bad range")
212 __failure __msg("invalid access to packet")
213 __naked void packet_ptr_with_bad_range_2(void)
214 {
215 	asm volatile ("					\
216 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
217 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
218 	r4 = r2;					\
219 	r4 += 4;					\
220 	if r4 > r3 goto l0_%=;				\
221 	r0 = 0;						\
222 	exit;						\
223 l0_%=:	r1 = %[map_hash_8b] ll;				\
224 	call %[bpf_map_lookup_elem];			\
225 	r0 = 0;						\
226 	exit;						\
227 "	:
228 	: __imm(bpf_map_lookup_elem),
229 	  __imm_addr(map_hash_8b),
230 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
231 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
232 	: __clobber_all);
233 }
234 
235 SEC("tc")
236 __description("helper access to packet: test10, cls packet_ptr with too short range")
237 __failure __msg("invalid access to packet")
238 __naked void ptr_with_too_short_range_2(void)
239 {
240 	asm volatile ("					\
241 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
242 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
243 	r2 += 1;					\
244 	r4 = r2;					\
245 	r4 += 7;					\
246 	if r4 > r3 goto l0_%=;				\
247 	r1 = %[map_hash_8b] ll;				\
248 	call %[bpf_map_lookup_elem];			\
249 l0_%=:	r0 = 0;						\
250 	exit;						\
251 "	:
252 	: __imm(bpf_map_lookup_elem),
253 	  __imm_addr(map_hash_8b),
254 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
255 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
256 	: __clobber_all);
257 }
258 
259 SEC("tc")
260 __description("helper access to packet: test11, cls unsuitable helper 1")
261 __failure __msg("helper access to the packet")
262 __naked void test11_cls_unsuitable_helper_1(void)
263 {
264 	asm volatile ("					\
265 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
266 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
267 	r6 += 1;					\
268 	r3 = r6;					\
269 	r3 += 7;					\
270 	if r3 > r7 goto l0_%=;				\
271 	r2 = 0;						\
272 	r4 = 42;					\
273 	r5 = 0;						\
274 	call %[bpf_skb_store_bytes];			\
275 l0_%=:	r0 = 0;						\
276 	exit;						\
277 "	:
278 	: __imm(bpf_skb_store_bytes),
279 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
280 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
281 	: __clobber_all);
282 }
283 
284 SEC("tc")
285 __description("helper access to packet: test12, cls unsuitable helper 2")
286 __failure __msg("helper access to the packet")
287 __naked void test12_cls_unsuitable_helper_2(void)
288 {
289 	asm volatile ("					\
290 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
291 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
292 	r3 = r6;					\
293 	r6 += 8;					\
294 	if r6 > r7 goto l0_%=;				\
295 	r2 = 0;						\
296 	r4 = 4;						\
297 	call %[bpf_skb_load_bytes];			\
298 l0_%=:	r0 = 0;						\
299 	exit;						\
300 "	:
301 	: __imm(bpf_skb_load_bytes),
302 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
303 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
304 	: __clobber_all);
305 }
306 
307 SEC("tc")
308 __description("helper access to packet: test13, cls helper ok")
309 __success __retval(0)
310 __naked void packet_test13_cls_helper_ok(void)
311 {
312 	asm volatile ("					\
313 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
314 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
315 	r6 += 1;					\
316 	r1 = r6;					\
317 	r1 += 7;					\
318 	if r1 > r7 goto l0_%=;				\
319 	r1 = r6;					\
320 	r2 = 4;						\
321 	r3 = 0;						\
322 	r4 = 0;						\
323 	r5 = 0;						\
324 	call %[bpf_csum_diff];				\
325 l0_%=:	r0 = 0;						\
326 	exit;						\
327 "	:
328 	: __imm(bpf_csum_diff),
329 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
330 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
331 	: __clobber_all);
332 }
333 
334 SEC("tc")
335 __description("helper access to packet: test14, cls helper ok sub")
336 __success __retval(0)
337 __naked void test14_cls_helper_ok_sub(void)
338 {
339 	asm volatile ("					\
340 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
341 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
342 	r6 += 1;					\
343 	r1 = r6;					\
344 	r1 += 7;					\
345 	if r1 > r7 goto l0_%=;				\
346 	r1 -= 4;					\
347 	r2 = 4;						\
348 	r3 = 0;						\
349 	r4 = 0;						\
350 	r5 = 0;						\
351 	call %[bpf_csum_diff];				\
352 l0_%=:	r0 = 0;						\
353 	exit;						\
354 "	:
355 	: __imm(bpf_csum_diff),
356 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
357 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
358 	: __clobber_all);
359 }
360 
361 SEC("tc")
362 __description("helper access to packet: test15, cls helper fail sub")
363 __failure __msg("invalid access to packet")
364 __naked void test15_cls_helper_fail_sub(void)
365 {
366 	asm volatile ("					\
367 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
368 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
369 	r6 += 1;					\
370 	r1 = r6;					\
371 	r1 += 7;					\
372 	if r1 > r7 goto l0_%=;				\
373 	r1 -= 12;					\
374 	r2 = 4;						\
375 	r3 = 0;						\
376 	r4 = 0;						\
377 	r5 = 0;						\
378 	call %[bpf_csum_diff];				\
379 l0_%=:	r0 = 0;						\
380 	exit;						\
381 "	:
382 	: __imm(bpf_csum_diff),
383 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
384 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
385 	: __clobber_all);
386 }
387 
388 SEC("tc")
389 __description("helper access to packet: test16, cls helper fail range 1")
390 __failure __msg("invalid access to packet")
391 __naked void cls_helper_fail_range_1(void)
392 {
393 	asm volatile ("					\
394 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
395 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
396 	r6 += 1;					\
397 	r1 = r6;					\
398 	r1 += 7;					\
399 	if r1 > r7 goto l0_%=;				\
400 	r1 = r6;					\
401 	r2 = 8;						\
402 	r3 = 0;						\
403 	r4 = 0;						\
404 	r5 = 0;						\
405 	call %[bpf_csum_diff];				\
406 l0_%=:	r0 = 0;						\
407 	exit;						\
408 "	:
409 	: __imm(bpf_csum_diff),
410 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
411 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
412 	: __clobber_all);
413 }
414 
415 SEC("tc")
416 __description("helper access to packet: test17, cls helper fail range 2")
417 __failure __msg("R2 min value is negative")
418 __naked void cls_helper_fail_range_2(void)
419 {
420 	asm volatile ("					\
421 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
422 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
423 	r6 += 1;					\
424 	r1 = r6;					\
425 	r1 += 7;					\
426 	if r1 > r7 goto l0_%=;				\
427 	r1 = r6;					\
428 	r2 = -9;					\
429 	r3 = 0;						\
430 	r4 = 0;						\
431 	r5 = 0;						\
432 	call %[bpf_csum_diff];				\
433 l0_%=:	r0 = 0;						\
434 	exit;						\
435 "	:
436 	: __imm(bpf_csum_diff),
437 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
438 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
439 	: __clobber_all);
440 }
441 
442 SEC("tc")
443 __description("helper access to packet: test18, cls helper fail range 3")
444 __failure __msg("R2 min value is negative")
445 __naked void cls_helper_fail_range_3(void)
446 {
447 	asm volatile ("					\
448 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
449 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
450 	r6 += 1;					\
451 	r1 = r6;					\
452 	r1 += 7;					\
453 	if r1 > r7 goto l0_%=;				\
454 	r1 = r6;					\
455 	r2 = %[__imm_0];				\
456 	r3 = 0;						\
457 	r4 = 0;						\
458 	r5 = 0;						\
459 	call %[bpf_csum_diff];				\
460 l0_%=:	r0 = 0;						\
461 	exit;						\
462 "	:
463 	: __imm(bpf_csum_diff),
464 	  __imm_const(__imm_0, ~0),
465 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
466 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
467 	: __clobber_all);
468 }
469 
470 SEC("tc")
471 __description("helper access to packet: test19, cls helper range zero")
472 __success __retval(0)
473 __naked void test19_cls_helper_range_zero(void)
474 {
475 	asm volatile ("					\
476 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
477 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
478 	r6 += 1;					\
479 	r1 = r6;					\
480 	r1 += 7;					\
481 	if r1 > r7 goto l0_%=;				\
482 	r1 = r6;					\
483 	r2 = 0;						\
484 	r3 = 0;						\
485 	r4 = 0;						\
486 	r5 = 0;						\
487 	call %[bpf_csum_diff];				\
488 l0_%=:	r0 = 0;						\
489 	exit;						\
490 "	:
491 	: __imm(bpf_csum_diff),
492 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
493 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
494 	: __clobber_all);
495 }
496 
497 SEC("tc")
498 __description("helper access to packet: test20, pkt end as input")
499 __failure __msg("R1 type=pkt_end expected=fp")
500 __naked void test20_pkt_end_as_input(void)
501 {
502 	asm volatile ("					\
503 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
504 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
505 	r6 += 1;					\
506 	r1 = r6;					\
507 	r1 += 7;					\
508 	if r1 > r7 goto l0_%=;				\
509 	r1 = r7;					\
510 	r2 = 4;						\
511 	r3 = 0;						\
512 	r4 = 0;						\
513 	r5 = 0;						\
514 	call %[bpf_csum_diff];				\
515 l0_%=:	r0 = 0;						\
516 	exit;						\
517 "	:
518 	: __imm(bpf_csum_diff),
519 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
520 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
521 	: __clobber_all);
522 }
523 
524 SEC("tc")
525 __description("helper access to packet: test21, wrong reg")
526 __failure __msg("invalid access to packet")
527 __naked void to_packet_test21_wrong_reg(void)
528 {
529 	asm volatile ("					\
530 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
531 	r7 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
532 	r6 += 1;					\
533 	r1 = r6;					\
534 	r1 += 7;					\
535 	if r1 > r7 goto l0_%=;				\
536 	r2 = 4;						\
537 	r3 = 0;						\
538 	r4 = 0;						\
539 	r5 = 0;						\
540 	call %[bpf_csum_diff];				\
541 	r0 = 0;						\
542 l0_%=:	exit;						\
543 "	:
544 	: __imm(bpf_csum_diff),
545 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
546 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
547 	: __clobber_all);
548 }
549 
550 char _license[] SEC("license") = "GPL";
551