xref: /linux/tools/testing/selftests/bpf/progs/verifier_sock.c (revision ae28ed4578e6d5a481e39c5a9827f27048661fdd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3 
4 #include "vmlinux.h"
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 struct {
9 	__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
10 	__uint(max_entries, 1);
11 	__type(key, __u32);
12 	__type(value, __u64);
13 } map_reuseport_array SEC(".maps");
14 
15 struct {
16 	__uint(type, BPF_MAP_TYPE_SOCKHASH);
17 	__uint(max_entries, 1);
18 	__type(key, int);
19 	__type(value, int);
20 } map_sockhash SEC(".maps");
21 
22 struct {
23 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
24 	__uint(max_entries, 1);
25 	__type(key, int);
26 	__type(value, int);
27 } map_sockmap SEC(".maps");
28 
29 struct {
30 	__uint(type, BPF_MAP_TYPE_XSKMAP);
31 	__uint(max_entries, 1);
32 	__type(key, int);
33 	__type(value, int);
34 } map_xskmap SEC(".maps");
35 
36 struct val {
37 	int cnt;
38 	struct bpf_spin_lock l;
39 };
40 
41 struct {
42 	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
43 	__uint(max_entries, 0);
44 	__type(key, int);
45 	__type(value, struct val);
46 	__uint(map_flags, BPF_F_NO_PREALLOC);
47 } sk_storage_map SEC(".maps");
48 
49 struct {
50 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
51 	__uint(max_entries, 1);
52 	__uint(key_size, sizeof(__u32));
53 	__uint(value_size, sizeof(__u32));
54 } jmp_table SEC(".maps");
55 
56 SEC("cgroup/skb")
57 __description("skb->sk: no NULL check")
58 __failure __msg("invalid mem access 'sock_common_or_null'")
59 __failure_unpriv
60 __naked void skb_sk_no_null_check(void)
61 {
62 	asm volatile ("					\
63 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
64 	r0 = *(u32*)(r1 + 0);				\
65 	r0 = 0;						\
66 	exit;						\
67 "	:
68 	: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
69 	: __clobber_all);
70 }
71 
72 SEC("cgroup/skb")
73 __description("skb->sk: sk->family [non fullsock field]")
74 __success __success_unpriv __retval(0)
75 __naked void sk_family_non_fullsock_field_1(void)
76 {
77 	asm volatile ("					\
78 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
79 	if r1 != 0 goto l0_%=;				\
80 	r0 = 0;						\
81 	exit;						\
82 l0_%=:	r0 = *(u32*)(r1 + %[bpf_sock_family]);		\
83 	r0 = 0;						\
84 	exit;						\
85 "	:
86 	: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
87 	  __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
88 	: __clobber_all);
89 }
90 
91 SEC("cgroup/skb")
92 __description("skb->sk: sk->type [fullsock field]")
93 __failure __msg("invalid sock_common access")
94 __failure_unpriv
95 __naked void sk_sk_type_fullsock_field_1(void)
96 {
97 	asm volatile ("					\
98 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
99 	if r1 != 0 goto l0_%=;				\
100 	r0 = 0;						\
101 	exit;						\
102 l0_%=:	r0 = *(u32*)(r1 + %[bpf_sock_type]);		\
103 	r0 = 0;						\
104 	exit;						\
105 "	:
106 	: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
107 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
108 	: __clobber_all);
109 }
110 
111 SEC("cgroup/skb")
112 __description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
113 __failure __msg("type=sock_common_or_null expected=sock_common")
114 __failure_unpriv
115 __naked void sk_no_skb_sk_check_1(void)
116 {
117 	asm volatile ("					\
118 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
119 	call %[bpf_sk_fullsock];			\
120 	r0 = 0;						\
121 	exit;						\
122 "	:
123 	: __imm(bpf_sk_fullsock),
124 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
125 	: __clobber_all);
126 }
127 
128 SEC("cgroup/skb")
129 __description("sk_fullsock(skb->sk): no NULL check on ret")
130 __failure __msg("invalid mem access 'sock_or_null'")
131 __failure_unpriv
132 __naked void no_null_check_on_ret_1(void)
133 {
134 	asm volatile ("					\
135 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
136 	if r1 != 0 goto l0_%=;				\
137 	r0 = 0;						\
138 	exit;						\
139 l0_%=:	call %[bpf_sk_fullsock];			\
140 	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
141 	r0 = 0;						\
142 	exit;						\
143 "	:
144 	: __imm(bpf_sk_fullsock),
145 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
146 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
147 	: __clobber_all);
148 }
149 
150 SEC("cgroup/skb")
151 __description("sk_fullsock(skb->sk): sk->type [fullsock field]")
152 __success __success_unpriv __retval(0)
153 __naked void sk_sk_type_fullsock_field_2(void)
154 {
155 	asm volatile ("					\
156 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
157 	if r1 != 0 goto l0_%=;				\
158 	r0 = 0;						\
159 	exit;						\
160 l0_%=:	call %[bpf_sk_fullsock];			\
161 	if r0 != 0 goto l1_%=;				\
162 	r0 = 0;						\
163 	exit;						\
164 l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
165 	r0 = 0;						\
166 	exit;						\
167 "	:
168 	: __imm(bpf_sk_fullsock),
169 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
170 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
171 	: __clobber_all);
172 }
173 
174 SEC("cgroup/skb")
175 __description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
176 __success __success_unpriv __retval(0)
177 __naked void sk_family_non_fullsock_field_2(void)
178 {
179 	asm volatile ("					\
180 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
181 	if r1 != 0 goto l0_%=;				\
182 	r0 = 0;						\
183 	exit;						\
184 l0_%=:	call %[bpf_sk_fullsock];			\
185 	if r0 != 0 goto l1_%=;				\
186 	exit;						\
187 l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_family]);		\
188 	r0 = 0;						\
189 	exit;						\
190 "	:
191 	: __imm(bpf_sk_fullsock),
192 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
193 	  __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
194 	: __clobber_all);
195 }
196 
197 SEC("cgroup/skb")
198 __description("sk_fullsock(skb->sk): sk->state [narrow load]")
199 __success __success_unpriv __retval(0)
200 __naked void sk_sk_state_narrow_load(void)
201 {
202 	asm volatile ("					\
203 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
204 	if r1 != 0 goto l0_%=;				\
205 	r0 = 0;						\
206 	exit;						\
207 l0_%=:	call %[bpf_sk_fullsock];			\
208 	if r0 != 0 goto l1_%=;				\
209 	r0 = 0;						\
210 	exit;						\
211 l1_%=:	r0 = *(u8*)(r0 + %[bpf_sock_state]);		\
212 	r0 = 0;						\
213 	exit;						\
214 "	:
215 	: __imm(bpf_sk_fullsock),
216 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
217 	  __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
218 	: __clobber_all);
219 }
220 
221 SEC("cgroup/skb")
222 __description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
223 __success __success_unpriv __retval(0)
224 __naked void port_word_load_backward_compatibility(void)
225 {
226 	asm volatile ("					\
227 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
228 	if r1 != 0 goto l0_%=;				\
229 	r0 = 0;						\
230 	exit;						\
231 l0_%=:	call %[bpf_sk_fullsock];			\
232 	if r0 != 0 goto l1_%=;				\
233 	r0 = 0;						\
234 	exit;						\
235 l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_dst_port]);	\
236 	r0 = 0;						\
237 	exit;						\
238 "	:
239 	: __imm(bpf_sk_fullsock),
240 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
241 	  __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
242 	: __clobber_all);
243 }
244 
245 SEC("cgroup/skb")
246 __description("sk_fullsock(skb->sk): sk->dst_port [half load]")
247 __success __success_unpriv __retval(0)
248 __naked void sk_dst_port_half_load(void)
249 {
250 	asm volatile ("					\
251 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
252 	if r1 != 0 goto l0_%=;				\
253 	r0 = 0;						\
254 	exit;						\
255 l0_%=:	call %[bpf_sk_fullsock];			\
256 	if r0 != 0 goto l1_%=;				\
257 	r0 = 0;						\
258 	exit;						\
259 l1_%=:	r0 = *(u16*)(r0 + %[bpf_sock_dst_port]);	\
260 	r0 = 0;						\
261 	exit;						\
262 "	:
263 	: __imm(bpf_sk_fullsock),
264 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
265 	  __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
266 	: __clobber_all);
267 }
268 
269 SEC("cgroup/skb")
270 __description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
271 __failure __msg("invalid sock access")
272 __failure_unpriv
273 __naked void dst_port_half_load_invalid_1(void)
274 {
275 	asm volatile ("					\
276 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
277 	if r1 != 0 goto l0_%=;				\
278 	r0 = 0;						\
279 	exit;						\
280 l0_%=:	call %[bpf_sk_fullsock];			\
281 	if r0 != 0 goto l1_%=;				\
282 	r0 = 0;						\
283 	exit;						\
284 l1_%=:	r0 = *(u16*)(r0 + %[__imm_0]);			\
285 	r0 = 0;						\
286 	exit;						\
287 "	:
288 	: __imm(bpf_sk_fullsock),
289 	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
290 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
291 	: __clobber_all);
292 }
293 
294 SEC("cgroup/skb")
295 __description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
296 __success __success_unpriv __retval(0)
297 __naked void sk_dst_port_byte_load(void)
298 {
299 	asm volatile ("					\
300 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
301 	if r1 != 0 goto l0_%=;				\
302 	r0 = 0;						\
303 	exit;						\
304 l0_%=:	call %[bpf_sk_fullsock];			\
305 	if r0 != 0 goto l1_%=;				\
306 	r0 = 0;						\
307 	exit;						\
308 l1_%=:	r2 = *(u8*)(r0 + %[bpf_sock_dst_port]);		\
309 	r2 = *(u8*)(r0 + %[__imm_0]);			\
310 	r0 = 0;						\
311 	exit;						\
312 "	:
313 	: __imm(bpf_sk_fullsock),
314 	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
315 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
316 	  __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
317 	: __clobber_all);
318 }
319 
320 SEC("cgroup/skb")
321 __description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
322 __failure __msg("invalid sock access")
323 __failure_unpriv
324 __naked void dst_port_byte_load_invalid(void)
325 {
326 	asm volatile ("					\
327 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
328 	if r1 != 0 goto l0_%=;				\
329 	r0 = 0;						\
330 	exit;						\
331 l0_%=:	call %[bpf_sk_fullsock];			\
332 	if r0 != 0 goto l1_%=;				\
333 	r0 = 0;						\
334 	exit;						\
335 l1_%=:	r0 = *(u8*)(r0 + %[__imm_0]);			\
336 	r0 = 0;						\
337 	exit;						\
338 "	:
339 	: __imm(bpf_sk_fullsock),
340 	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
341 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
342 	: __clobber_all);
343 }
344 
345 SEC("cgroup/skb")
346 __description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
347 __failure __msg("invalid sock access")
348 __failure_unpriv
349 __naked void dst_port_half_load_invalid_2(void)
350 {
351 	asm volatile ("					\
352 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
353 	if r1 != 0 goto l0_%=;				\
354 	r0 = 0;						\
355 	exit;						\
356 l0_%=:	call %[bpf_sk_fullsock];			\
357 	if r0 != 0 goto l1_%=;				\
358 	r0 = 0;						\
359 	exit;						\
360 l1_%=:	r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]);	\
361 	r0 = 0;						\
362 	exit;						\
363 "	:
364 	: __imm(bpf_sk_fullsock),
365 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
366 	  __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
367 	: __clobber_all);
368 }
369 
370 SEC("cgroup/skb")
371 __description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
372 __success __success_unpriv __retval(0)
373 __naked void dst_ip6_load_2nd_byte(void)
374 {
375 	asm volatile ("					\
376 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
377 	if r1 != 0 goto l0_%=;				\
378 	r0 = 0;						\
379 	exit;						\
380 l0_%=:	call %[bpf_sk_fullsock];			\
381 	if r0 != 0 goto l1_%=;				\
382 	r0 = 0;						\
383 	exit;						\
384 l1_%=:	r0 = *(u8*)(r0 + %[__imm_0]);			\
385 	r0 = 0;						\
386 	exit;						\
387 "	:
388 	: __imm(bpf_sk_fullsock),
389 	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
390 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
391 	: __clobber_all);
392 }
393 
394 SEC("cgroup/skb")
395 __description("sk_fullsock(skb->sk): sk->type [narrow load]")
396 __success __success_unpriv __retval(0)
397 __naked void sk_sk_type_narrow_load(void)
398 {
399 	asm volatile ("					\
400 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
401 	if r1 != 0 goto l0_%=;				\
402 	r0 = 0;						\
403 	exit;						\
404 l0_%=:	call %[bpf_sk_fullsock];			\
405 	if r0 != 0 goto l1_%=;				\
406 	r0 = 0;						\
407 	exit;						\
408 l1_%=:	r0 = *(u8*)(r0 + %[bpf_sock_type]);		\
409 	r0 = 0;						\
410 	exit;						\
411 "	:
412 	: __imm(bpf_sk_fullsock),
413 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
414 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
415 	: __clobber_all);
416 }
417 
418 SEC("cgroup/skb")
419 __description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
420 __success __success_unpriv __retval(0)
421 __naked void sk_sk_protocol_narrow_load(void)
422 {
423 	asm volatile ("					\
424 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
425 	if r1 != 0 goto l0_%=;				\
426 	r0 = 0;						\
427 	exit;						\
428 l0_%=:	call %[bpf_sk_fullsock];			\
429 	if r0 != 0 goto l1_%=;				\
430 	r0 = 0;						\
431 	exit;						\
432 l1_%=:	r0 = *(u8*)(r0 + %[bpf_sock_protocol]);		\
433 	r0 = 0;						\
434 	exit;						\
435 "	:
436 	: __imm(bpf_sk_fullsock),
437 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
438 	  __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
439 	: __clobber_all);
440 }
441 
442 SEC("cgroup/skb")
443 __description("sk_fullsock(skb->sk): beyond last field")
444 __failure __msg("invalid sock access")
445 __failure_unpriv
446 __naked void skb_sk_beyond_last_field_1(void)
447 {
448 	asm volatile ("					\
449 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
450 	if r1 != 0 goto l0_%=;				\
451 	r0 = 0;						\
452 	exit;						\
453 l0_%=:	call %[bpf_sk_fullsock];			\
454 	if r0 != 0 goto l1_%=;				\
455 	r0 = 0;						\
456 	exit;						\
457 l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
458 	r0 = 0;						\
459 	exit;						\
460 "	:
461 	: __imm(bpf_sk_fullsock),
462 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
463 	  __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
464 	: __clobber_all);
465 }
466 
467 SEC("cgroup/skb")
468 __description("bpf_tcp_sock(skb->sk): no !skb->sk check")
469 __failure __msg("type=sock_common_or_null expected=sock_common")
470 __failure_unpriv
471 __naked void sk_no_skb_sk_check_2(void)
472 {
473 	asm volatile ("					\
474 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
475 	call %[bpf_tcp_sock];				\
476 	r0 = 0;						\
477 	exit;						\
478 "	:
479 	: __imm(bpf_tcp_sock),
480 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
481 	: __clobber_all);
482 }
483 
484 SEC("cgroup/skb")
485 __description("bpf_tcp_sock(skb->sk): no NULL check on ret")
486 __failure __msg("invalid mem access 'tcp_sock_or_null'")
487 __failure_unpriv
488 __naked void no_null_check_on_ret_2(void)
489 {
490 	asm volatile ("					\
491 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
492 	if r1 != 0 goto l0_%=;				\
493 	r0 = 0;						\
494 	exit;						\
495 l0_%=:	call %[bpf_tcp_sock];				\
496 	r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);	\
497 	r0 = 0;						\
498 	exit;						\
499 "	:
500 	: __imm(bpf_tcp_sock),
501 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
502 	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
503 	: __clobber_all);
504 }
505 
506 SEC("cgroup/skb")
507 __description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
508 __success __success_unpriv __retval(0)
509 __naked void skb_sk_tp_snd_cwnd_1(void)
510 {
511 	asm volatile ("					\
512 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
513 	if r1 != 0 goto l0_%=;				\
514 	r0 = 0;						\
515 	exit;						\
516 l0_%=:	call %[bpf_tcp_sock];				\
517 	if r0 != 0 goto l1_%=;				\
518 	exit;						\
519 l1_%=:	r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);	\
520 	r0 = 0;						\
521 	exit;						\
522 "	:
523 	: __imm(bpf_tcp_sock),
524 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
525 	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
526 	: __clobber_all);
527 }
528 
529 SEC("cgroup/skb")
530 __description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
531 __success __success_unpriv __retval(0)
532 __naked void skb_sk_tp_bytes_acked(void)
533 {
534 	asm volatile ("					\
535 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
536 	if r1 != 0 goto l0_%=;				\
537 	r0 = 0;						\
538 	exit;						\
539 l0_%=:	call %[bpf_tcp_sock];				\
540 	if r0 != 0 goto l1_%=;				\
541 	exit;						\
542 l1_%=:	r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]);	\
543 	r0 = 0;						\
544 	exit;						\
545 "	:
546 	: __imm(bpf_tcp_sock),
547 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
548 	  __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
549 	: __clobber_all);
550 }
551 
552 SEC("cgroup/skb")
553 __description("bpf_tcp_sock(skb->sk): beyond last field")
554 __failure __msg("invalid tcp_sock access")
555 __failure_unpriv
556 __naked void skb_sk_beyond_last_field_2(void)
557 {
558 	asm volatile ("					\
559 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
560 	if r1 != 0 goto l0_%=;				\
561 	r0 = 0;						\
562 	exit;						\
563 l0_%=:	call %[bpf_tcp_sock];				\
564 	if r0 != 0 goto l1_%=;				\
565 	exit;						\
566 l1_%=:	r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
567 	r0 = 0;						\
568 	exit;						\
569 "	:
570 	: __imm(bpf_tcp_sock),
571 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
572 	  __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
573 	: __clobber_all);
574 }
575 
576 SEC("cgroup/skb")
577 __description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
578 __success __success_unpriv __retval(0)
579 __naked void skb_sk_tp_snd_cwnd_2(void)
580 {
581 	asm volatile ("					\
582 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
583 	if r1 != 0 goto l0_%=;				\
584 	r0 = 0;						\
585 	exit;						\
586 l0_%=:	call %[bpf_sk_fullsock];			\
587 	if r0 != 0 goto l1_%=;				\
588 	exit;						\
589 l1_%=:	r1 = r0;					\
590 	call %[bpf_tcp_sock];				\
591 	if r0 != 0 goto l2_%=;				\
592 	exit;						\
593 l2_%=:	r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);	\
594 	r0 = 0;						\
595 	exit;						\
596 "	:
597 	: __imm(bpf_sk_fullsock),
598 	  __imm(bpf_tcp_sock),
599 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
600 	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
601 	: __clobber_all);
602 }
603 
604 SEC("tc")
605 __description("bpf_sk_release(skb->sk)")
606 __failure __msg("R1 must be referenced when passed to release function")
607 __naked void bpf_sk_release_skb_sk(void)
608 {
609 	asm volatile ("					\
610 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
611 	if r1 == 0 goto l0_%=;				\
612 	call %[bpf_sk_release];				\
613 l0_%=:	r0 = 0;						\
614 	exit;						\
615 "	:
616 	: __imm(bpf_sk_release),
617 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
618 	: __clobber_all);
619 }
620 
621 SEC("tc")
622 __description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
623 __failure __msg("R1 must be referenced when passed to release function")
624 __naked void bpf_sk_fullsock_skb_sk(void)
625 {
626 	asm volatile ("					\
627 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
628 	if r1 != 0 goto l0_%=;				\
629 	r0 = 0;						\
630 	exit;						\
631 l0_%=:	call %[bpf_sk_fullsock];			\
632 	if r0 != 0 goto l1_%=;				\
633 	exit;						\
634 l1_%=:	r1 = r0;					\
635 	call %[bpf_sk_release];				\
636 	r0 = 1;						\
637 	exit;						\
638 "	:
639 	: __imm(bpf_sk_fullsock),
640 	  __imm(bpf_sk_release),
641 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
642 	: __clobber_all);
643 }
644 
645 SEC("tc")
646 __description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
647 __failure __msg("R1 must be referenced when passed to release function")
648 __naked void bpf_tcp_sock_skb_sk(void)
649 {
650 	asm volatile ("					\
651 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
652 	if r1 != 0 goto l0_%=;				\
653 	r0 = 0;						\
654 	exit;						\
655 l0_%=:	call %[bpf_tcp_sock];				\
656 	if r0 != 0 goto l1_%=;				\
657 	exit;						\
658 l1_%=:	r1 = r0;					\
659 	call %[bpf_sk_release];				\
660 	r0 = 1;						\
661 	exit;						\
662 "	:
663 	: __imm(bpf_sk_release),
664 	  __imm(bpf_tcp_sock),
665 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
666 	: __clobber_all);
667 }
668 
669 SEC("tc")
670 __description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
671 __success __retval(0)
672 __naked void sk_null_0_value_null(void)
673 {
674 	asm volatile ("					\
675 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
676 	if r1 != 0 goto l0_%=;				\
677 	r0 = 0;						\
678 	exit;						\
679 l0_%=:	call %[bpf_sk_fullsock];			\
680 	if r0 != 0 goto l1_%=;				\
681 	r0 = 0;						\
682 	exit;						\
683 l1_%=:	r4 = 0;						\
684 	r3 = 0;						\
685 	r2 = r0;					\
686 	r1 = %[sk_storage_map] ll;			\
687 	call %[bpf_sk_storage_get];			\
688 	r0 = 0;						\
689 	exit;						\
690 "	:
691 	: __imm(bpf_sk_fullsock),
692 	  __imm(bpf_sk_storage_get),
693 	  __imm_addr(sk_storage_map),
694 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
695 	: __clobber_all);
696 }
697 
698 SEC("tc")
699 __description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
700 __failure __msg("R3 type=scalar expected=fp")
701 __naked void sk_1_1_value_1(void)
702 {
703 	asm volatile ("					\
704 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
705 	if r1 != 0 goto l0_%=;				\
706 	r0 = 0;						\
707 	exit;						\
708 l0_%=:	call %[bpf_sk_fullsock];			\
709 	if r0 != 0 goto l1_%=;				\
710 	r0 = 0;						\
711 	exit;						\
712 l1_%=:	r4 = 1;						\
713 	r3 = 1;						\
714 	r2 = r0;					\
715 	r1 = %[sk_storage_map] ll;			\
716 	call %[bpf_sk_storage_get];			\
717 	r0 = 0;						\
718 	exit;						\
719 "	:
720 	: __imm(bpf_sk_fullsock),
721 	  __imm(bpf_sk_storage_get),
722 	  __imm_addr(sk_storage_map),
723 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
724 	: __clobber_all);
725 }
726 
727 SEC("tc")
728 __description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
729 __success __retval(0)
730 __naked void stack_value_1_stack_value(void)
731 {
732 	asm volatile ("					\
733 	r2 = 0;						\
734 	*(u64*)(r10 - 8) = r2;				\
735 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
736 	if r1 != 0 goto l0_%=;				\
737 	r0 = 0;						\
738 	exit;						\
739 l0_%=:	call %[bpf_sk_fullsock];			\
740 	if r0 != 0 goto l1_%=;				\
741 	r0 = 0;						\
742 	exit;						\
743 l1_%=:	r4 = 1;						\
744 	r3 = r10;					\
745 	r3 += -8;					\
746 	r2 = r0;					\
747 	r1 = %[sk_storage_map] ll;			\
748 	call %[bpf_sk_storage_get];			\
749 	r0 = 0;						\
750 	exit;						\
751 "	:
752 	: __imm(bpf_sk_fullsock),
753 	  __imm(bpf_sk_storage_get),
754 	  __imm_addr(sk_storage_map),
755 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
756 	: __clobber_all);
757 }
758 
759 SEC("tc")
760 __description("bpf_map_lookup_elem(smap, &key)")
761 __failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
762 __naked void map_lookup_elem_smap_key(void)
763 {
764 	asm volatile ("					\
765 	r1 = 0;						\
766 	*(u32*)(r10 - 4) = r1;				\
767 	r2 = r10;					\
768 	r2 += -4;					\
769 	r1 = %[sk_storage_map] ll;			\
770 	call %[bpf_map_lookup_elem];			\
771 	r0 = 0;						\
772 	exit;						\
773 "	:
774 	: __imm(bpf_map_lookup_elem),
775 	  __imm_addr(sk_storage_map)
776 	: __clobber_all);
777 }
778 
779 SEC("xdp")
780 __description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
781 __success __retval(0)
782 __naked void xskmap_key_xs_queue_id(void)
783 {
784 	asm volatile ("					\
785 	r1 = 0;						\
786 	*(u32*)(r10 - 8) = r1;				\
787 	r2 = r10;					\
788 	r2 += -8;					\
789 	r1 = %[map_xskmap] ll;				\
790 	call %[bpf_map_lookup_elem];			\
791 	if r0 != 0 goto l0_%=;				\
792 	exit;						\
793 l0_%=:	r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]);	\
794 	r0 = 0;						\
795 	exit;						\
796 "	:
797 	: __imm(bpf_map_lookup_elem),
798 	  __imm_addr(map_xskmap),
799 	  __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
800 	: __clobber_all);
801 }
802 
803 SEC("sk_skb")
804 __description("bpf_map_lookup_elem(sockmap, &key)")
805 __failure __msg("Unreleased reference id=2 alloc_insn=6")
806 __naked void map_lookup_elem_sockmap_key(void)
807 {
808 	asm volatile ("					\
809 	r1 = 0;						\
810 	*(u32*)(r10 - 4) = r1;				\
811 	r2 = r10;					\
812 	r2 += -4;					\
813 	r1 = %[map_sockmap] ll;				\
814 	call %[bpf_map_lookup_elem];			\
815 	r0 = 0;						\
816 	exit;						\
817 "	:
818 	: __imm(bpf_map_lookup_elem),
819 	  __imm_addr(map_sockmap)
820 	: __clobber_all);
821 }
822 
823 SEC("sk_skb")
824 __description("bpf_map_lookup_elem(sockhash, &key)")
825 __failure __msg("Unreleased reference id=2 alloc_insn=6")
826 __naked void map_lookup_elem_sockhash_key(void)
827 {
828 	asm volatile ("					\
829 	r1 = 0;						\
830 	*(u32*)(r10 - 4) = r1;				\
831 	r2 = r10;					\
832 	r2 += -4;					\
833 	r1 = %[map_sockhash] ll;			\
834 	call %[bpf_map_lookup_elem];			\
835 	r0 = 0;						\
836 	exit;						\
837 "	:
838 	: __imm(bpf_map_lookup_elem),
839 	  __imm_addr(map_sockhash)
840 	: __clobber_all);
841 }
842 
843 SEC("sk_skb")
844 __description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
845 __success
846 __naked void field_bpf_sk_release_sk_1(void)
847 {
848 	asm volatile ("					\
849 	r1 = 0;						\
850 	*(u32*)(r10 - 4) = r1;				\
851 	r2 = r10;					\
852 	r2 += -4;					\
853 	r1 = %[map_sockmap] ll;				\
854 	call %[bpf_map_lookup_elem];			\
855 	if r0 != 0 goto l0_%=;				\
856 	exit;						\
857 l0_%=:	r1 = r0;					\
858 	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
859 	call %[bpf_sk_release];				\
860 	exit;						\
861 "	:
862 	: __imm(bpf_map_lookup_elem),
863 	  __imm(bpf_sk_release),
864 	  __imm_addr(map_sockmap),
865 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
866 	: __clobber_all);
867 }
868 
869 SEC("sk_skb")
870 __description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
871 __success
872 __naked void field_bpf_sk_release_sk_2(void)
873 {
874 	asm volatile ("					\
875 	r1 = 0;						\
876 	*(u32*)(r10 - 4) = r1;				\
877 	r2 = r10;					\
878 	r2 += -4;					\
879 	r1 = %[map_sockhash] ll;			\
880 	call %[bpf_map_lookup_elem];			\
881 	if r0 != 0 goto l0_%=;				\
882 	exit;						\
883 l0_%=:	r1 = r0;					\
884 	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
885 	call %[bpf_sk_release];				\
886 	exit;						\
887 "	:
888 	: __imm(bpf_map_lookup_elem),
889 	  __imm(bpf_sk_release),
890 	  __imm_addr(map_sockhash),
891 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
892 	: __clobber_all);
893 }
894 
895 SEC("sk_reuseport")
896 __description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
897 __success
898 __naked void ctx_reuseport_array_key_flags(void)
899 {
900 	asm volatile ("					\
901 	r4 = 0;						\
902 	r2 = 0;						\
903 	*(u32*)(r10 - 4) = r2;				\
904 	r3 = r10;					\
905 	r3 += -4;					\
906 	r2 = %[map_reuseport_array] ll;			\
907 	call %[bpf_sk_select_reuseport];		\
908 	exit;						\
909 "	:
910 	: __imm(bpf_sk_select_reuseport),
911 	  __imm_addr(map_reuseport_array)
912 	: __clobber_all);
913 }
914 
915 SEC("sk_reuseport")
916 __description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
917 __success
918 __naked void reuseport_ctx_sockmap_key_flags(void)
919 {
920 	asm volatile ("					\
921 	r4 = 0;						\
922 	r2 = 0;						\
923 	*(u32*)(r10 - 4) = r2;				\
924 	r3 = r10;					\
925 	r3 += -4;					\
926 	r2 = %[map_sockmap] ll;				\
927 	call %[bpf_sk_select_reuseport];		\
928 	exit;						\
929 "	:
930 	: __imm(bpf_sk_select_reuseport),
931 	  __imm_addr(map_sockmap)
932 	: __clobber_all);
933 }
934 
935 SEC("sk_reuseport")
936 __description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
937 __success
938 __naked void reuseport_ctx_sockhash_key_flags(void)
939 {
940 	asm volatile ("					\
941 	r4 = 0;						\
942 	r2 = 0;						\
943 	*(u32*)(r10 - 4) = r2;				\
944 	r3 = r10;					\
945 	r3 += -4;					\
946 	r2 = %[map_sockmap] ll;				\
947 	call %[bpf_sk_select_reuseport];		\
948 	exit;						\
949 "	:
950 	: __imm(bpf_sk_select_reuseport),
951 	  __imm_addr(map_sockmap)
952 	: __clobber_all);
953 }
954 
955 SEC("tc")
956 __description("mark null check on return value of bpf_skc_to helpers")
957 __failure __msg("invalid mem access")
958 __naked void of_bpf_skc_to_helpers(void)
959 {
960 	asm volatile ("					\
961 	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
962 	if r1 != 0 goto l0_%=;				\
963 	r0 = 0;						\
964 	exit;						\
965 l0_%=:	r6 = r1;					\
966 	call %[bpf_skc_to_tcp_sock];			\
967 	r7 = r0;					\
968 	r1 = r6;					\
969 	call %[bpf_skc_to_tcp_request_sock];		\
970 	r8 = r0;					\
971 	if r8 != 0 goto l1_%=;				\
972 	r0 = 0;						\
973 	exit;						\
974 l1_%=:	r0 = *(u8*)(r7 + 0);				\
975 	exit;						\
976 "	:
977 	: __imm(bpf_skc_to_tcp_request_sock),
978 	  __imm(bpf_skc_to_tcp_sock),
979 	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
980 	: __clobber_all);
981 }
982 
983 SEC("cgroup/post_bind4")
984 __description("sk->src_ip6[0] [load 1st byte]")
985 __failure __msg("invalid bpf_context access off=28 size=2")
986 __naked void post_bind4_read_src_ip6(void)
987 {
988 	asm volatile ("					\
989 	r6 = r1;					\
990 	r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]);	\
991 	r0 = 1;						\
992 	exit;						\
993 "	:
994 	: __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0]))
995 	: __clobber_all);
996 }
997 
998 SEC("cgroup/post_bind4")
999 __description("sk->mark [load mark]")
1000 __failure __msg("invalid bpf_context access off=16 size=2")
1001 __naked void post_bind4_read_mark(void)
1002 {
1003 	asm volatile ("					\
1004 	r6 = r1;					\
1005 	r7 = *(u16*)(r6 + %[bpf_sock_mark]);		\
1006 	r0 = 1;						\
1007 	exit;						\
1008 "	:
1009 	: __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark))
1010 	: __clobber_all);
1011 }
1012 
1013 SEC("cgroup/post_bind6")
1014 __description("sk->src_ip4 [load src_ip4]")
1015 __failure __msg("invalid bpf_context access off=24 size=2")
1016 __naked void post_bind6_read_src_ip4(void)
1017 {
1018 	asm volatile ("					\
1019 	r6 = r1;					\
1020 	r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]);		\
1021 	r0 = 1;						\
1022 	exit;						\
1023 "	:
1024 	: __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4))
1025 	: __clobber_all);
1026 }
1027 
1028 SEC("cgroup/sock_create")
1029 __description("sk->src_port [word load]")
1030 __failure __msg("invalid bpf_context access off=44 size=2")
1031 __naked void sock_create_read_src_port(void)
1032 {
1033 	asm volatile ("					\
1034 	r6 = r1;					\
1035 	r7 = *(u16*)(r6 + %[bpf_sock_src_port]);	\
1036 	r0 = 1;						\
1037 	exit;						\
1038 "	:
1039 	: __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port))
1040 	: __clobber_all);
1041 }
1042 
1043 __noinline
1044 long skb_pull_data2(struct __sk_buff *sk, __u32 len)
1045 {
1046 	return bpf_skb_pull_data(sk, len);
1047 }
1048 
1049 __noinline
1050 long skb_pull_data1(struct __sk_buff *sk, __u32 len)
1051 {
1052 	return skb_pull_data2(sk, len);
1053 }
1054 
1055 /* global function calls bpf_skb_pull_data(), which invalidates packet
1056  * pointers established before global function call.
1057  */
1058 SEC("tc")
1059 __failure __msg("invalid mem access")
1060 int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
1061 {
1062 	int *p = (void *)(long)sk->data;
1063 
1064 	if ((void *)(p + 1) > (void *)(long)sk->data_end)
1065 		return TCX_DROP;
1066 	skb_pull_data1(sk, 0);
1067 	*p = 42; /* this is unsafe */
1068 	return TCX_PASS;
1069 }
1070 
1071 __noinline
1072 long xdp_pull_data2(struct xdp_md *x, __u32 len)
1073 {
1074 	return bpf_xdp_pull_data(x, len);
1075 }
1076 
1077 __noinline
1078 long xdp_pull_data1(struct xdp_md *x, __u32 len)
1079 {
1080 	return xdp_pull_data2(x, len);
1081 }
1082 
1083 /* global function calls bpf_xdp_pull_data(), which invalidates packet
1084  * pointers established before global function call.
1085  */
1086 SEC("xdp")
1087 __failure __msg("invalid mem access")
1088 int invalidate_xdp_pkt_pointers_from_global_func(struct xdp_md *x)
1089 {
1090 	int *p = (void *)(long)x->data;
1091 
1092 	if ((void *)(p + 1) > (void *)(long)x->data_end)
1093 		return XDP_DROP;
1094 	xdp_pull_data1(x, 0);
1095 	*p = 42; /* this is unsafe */
1096 	return XDP_PASS;
1097 }
1098 
1099 /* XDP packet changing kfunc calls invalidate packet pointers */
1100 SEC("xdp")
1101 __failure __msg("invalid mem access")
1102 int invalidate_xdp_pkt_pointers(struct xdp_md *x)
1103 {
1104 	int *p = (void *)(long)x->data;
1105 
1106 	if ((void *)(p + 1) > (void *)(long)x->data_end)
1107 		return XDP_DROP;
1108 	bpf_xdp_pull_data(x, 0);
1109 	*p = 42; /* this is unsafe */
1110 	return XDP_PASS;
1111 }
1112 
1113 __noinline
1114 int tail_call(struct __sk_buff *sk)
1115 {
1116 	bpf_tail_call_static(sk, &jmp_table, 0);
1117 	return 0;
1118 }
1119 
1120 /* Tail calls invalidate packet pointers. */
1121 SEC("tc")
1122 __failure __msg("invalid mem access")
1123 int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
1124 {
1125 	int *p = (void *)(long)sk->data;
1126 
1127 	if ((void *)(p + 1) > (void *)(long)sk->data_end)
1128 		return TCX_DROP;
1129 	tail_call(sk);
1130 	*p = 42; /* this is unsafe */
1131 	return TCX_PASS;
1132 }
1133 
1134 char _license[] SEC("license") = "GPL";
1135