1 // SPDX-License-Identifier: GPL-2.0 2 /* Converted from tools/testing/selftests/bpf/verifier/sock.c */ 3 4 #include <linux/bpf.h> 5 #include <bpf/bpf_helpers.h> 6 #include "bpf_misc.h" 7 8 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) 9 #define offsetofend(TYPE, MEMBER) \ 10 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) 11 12 struct { 13 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); 14 __uint(max_entries, 1); 15 __type(key, __u32); 16 __type(value, __u64); 17 } map_reuseport_array SEC(".maps"); 18 19 struct { 20 __uint(type, BPF_MAP_TYPE_SOCKHASH); 21 __uint(max_entries, 1); 22 __type(key, int); 23 __type(value, int); 24 } map_sockhash SEC(".maps"); 25 26 struct { 27 __uint(type, BPF_MAP_TYPE_SOCKMAP); 28 __uint(max_entries, 1); 29 __type(key, int); 30 __type(value, int); 31 } map_sockmap SEC(".maps"); 32 33 struct { 34 __uint(type, BPF_MAP_TYPE_XSKMAP); 35 __uint(max_entries, 1); 36 __type(key, int); 37 __type(value, int); 38 } map_xskmap SEC(".maps"); 39 40 struct val { 41 int cnt; 42 struct bpf_spin_lock l; 43 }; 44 45 struct { 46 __uint(type, BPF_MAP_TYPE_SK_STORAGE); 47 __uint(max_entries, 0); 48 __type(key, int); 49 __type(value, struct val); 50 __uint(map_flags, BPF_F_NO_PREALLOC); 51 } sk_storage_map SEC(".maps"); 52 53 struct { 54 __uint(type, BPF_MAP_TYPE_PROG_ARRAY); 55 __uint(max_entries, 1); 56 __uint(key_size, sizeof(__u32)); 57 __uint(value_size, sizeof(__u32)); 58 } jmp_table SEC(".maps"); 59 60 SEC("cgroup/skb") 61 __description("skb->sk: no NULL check") 62 __failure __msg("invalid mem access 'sock_common_or_null'") 63 __failure_unpriv 64 __naked void skb_sk_no_null_check(void) 65 { 66 asm volatile (" \ 67 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 68 r0 = *(u32*)(r1 + 0); \ 69 r0 = 0; \ 70 exit; \ 71 " : 72 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 73 : __clobber_all); 74 } 75 76 SEC("cgroup/skb") 77 __description("skb->sk: sk->family [non fullsock field]") 78 __success __success_unpriv __retval(0) 79 __naked void sk_family_non_fullsock_field_1(void) 80 { 81 asm volatile (" \ 82 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 83 if r1 != 0 goto l0_%=; \ 84 r0 = 0; \ 85 exit; \ 86 l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \ 87 r0 = 0; \ 88 exit; \ 89 " : 90 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 91 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) 92 : __clobber_all); 93 } 94 95 SEC("cgroup/skb") 96 __description("skb->sk: sk->type [fullsock field]") 97 __failure __msg("invalid sock_common access") 98 __failure_unpriv 99 __naked void sk_sk_type_fullsock_field_1(void) 100 { 101 asm volatile (" \ 102 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 103 if r1 != 0 goto l0_%=; \ 104 r0 = 0; \ 105 exit; \ 106 l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \ 107 r0 = 0; \ 108 exit; \ 109 " : 110 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 111 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 112 : __clobber_all); 113 } 114 115 SEC("cgroup/skb") 116 __description("bpf_sk_fullsock(skb->sk): no !skb->sk check") 117 __failure __msg("type=sock_common_or_null expected=sock_common") 118 __failure_unpriv 119 __naked void sk_no_skb_sk_check_1(void) 120 { 121 asm volatile (" \ 122 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 123 call %[bpf_sk_fullsock]; \ 124 r0 = 0; \ 125 exit; \ 126 " : 127 : __imm(bpf_sk_fullsock), 128 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 129 : __clobber_all); 130 } 131 132 SEC("cgroup/skb") 133 __description("sk_fullsock(skb->sk): no NULL check on ret") 134 __failure __msg("invalid mem access 'sock_or_null'") 135 __failure_unpriv 136 __naked void no_null_check_on_ret_1(void) 137 { 138 asm volatile (" \ 139 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 140 if r1 != 0 goto l0_%=; \ 141 r0 = 0; \ 142 exit; \ 143 l0_%=: call %[bpf_sk_fullsock]; \ 144 r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 145 r0 = 0; \ 146 exit; \ 147 " : 148 : __imm(bpf_sk_fullsock), 149 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 150 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 151 : __clobber_all); 152 } 153 154 SEC("cgroup/skb") 155 __description("sk_fullsock(skb->sk): sk->type [fullsock field]") 156 __success __success_unpriv __retval(0) 157 __naked void sk_sk_type_fullsock_field_2(void) 158 { 159 asm volatile (" \ 160 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 161 if r1 != 0 goto l0_%=; \ 162 r0 = 0; \ 163 exit; \ 164 l0_%=: call %[bpf_sk_fullsock]; \ 165 if r0 != 0 goto l1_%=; \ 166 r0 = 0; \ 167 exit; \ 168 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 169 r0 = 0; \ 170 exit; \ 171 " : 172 : __imm(bpf_sk_fullsock), 173 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 174 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 175 : __clobber_all); 176 } 177 178 SEC("cgroup/skb") 179 __description("sk_fullsock(skb->sk): sk->family [non fullsock field]") 180 __success __success_unpriv __retval(0) 181 __naked void sk_family_non_fullsock_field_2(void) 182 { 183 asm volatile (" \ 184 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 185 if r1 != 0 goto l0_%=; \ 186 r0 = 0; \ 187 exit; \ 188 l0_%=: call %[bpf_sk_fullsock]; \ 189 if r0 != 0 goto l1_%=; \ 190 exit; \ 191 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \ 192 r0 = 0; \ 193 exit; \ 194 " : 195 : __imm(bpf_sk_fullsock), 196 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 197 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) 198 : __clobber_all); 199 } 200 201 SEC("cgroup/skb") 202 __description("sk_fullsock(skb->sk): sk->state [narrow load]") 203 __success __success_unpriv __retval(0) 204 __naked void sk_sk_state_narrow_load(void) 205 { 206 asm volatile (" \ 207 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 208 if r1 != 0 goto l0_%=; \ 209 r0 = 0; \ 210 exit; \ 211 l0_%=: call %[bpf_sk_fullsock]; \ 212 if r0 != 0 goto l1_%=; \ 213 r0 = 0; \ 214 exit; \ 215 l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \ 216 r0 = 0; \ 217 exit; \ 218 " : 219 : __imm(bpf_sk_fullsock), 220 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 221 __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state)) 222 : __clobber_all); 223 } 224 225 SEC("cgroup/skb") 226 __description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)") 227 __success __success_unpriv __retval(0) 228 __naked void port_word_load_backward_compatibility(void) 229 { 230 asm volatile (" \ 231 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 232 if r1 != 0 goto l0_%=; \ 233 r0 = 0; \ 234 exit; \ 235 l0_%=: call %[bpf_sk_fullsock]; \ 236 if r0 != 0 goto l1_%=; \ 237 r0 = 0; \ 238 exit; \ 239 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \ 240 r0 = 0; \ 241 exit; \ 242 " : 243 : __imm(bpf_sk_fullsock), 244 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 245 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) 246 : __clobber_all); 247 } 248 249 SEC("cgroup/skb") 250 __description("sk_fullsock(skb->sk): sk->dst_port [half load]") 251 __success __success_unpriv __retval(0) 252 __naked void sk_dst_port_half_load(void) 253 { 254 asm volatile (" \ 255 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 256 if r1 != 0 goto l0_%=; \ 257 r0 = 0; \ 258 exit; \ 259 l0_%=: call %[bpf_sk_fullsock]; \ 260 if r0 != 0 goto l1_%=; \ 261 r0 = 0; \ 262 exit; \ 263 l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \ 264 r0 = 0; \ 265 exit; \ 266 " : 267 : __imm(bpf_sk_fullsock), 268 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 269 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) 270 : __clobber_all); 271 } 272 273 SEC("cgroup/skb") 274 __description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)") 275 __failure __msg("invalid sock access") 276 __failure_unpriv 277 __naked void dst_port_half_load_invalid_1(void) 278 { 279 asm volatile (" \ 280 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 281 if r1 != 0 goto l0_%=; \ 282 r0 = 0; \ 283 exit; \ 284 l0_%=: call %[bpf_sk_fullsock]; \ 285 if r0 != 0 goto l1_%=; \ 286 r0 = 0; \ 287 exit; \ 288 l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \ 289 r0 = 0; \ 290 exit; \ 291 " : 292 : __imm(bpf_sk_fullsock), 293 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), 294 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 295 : __clobber_all); 296 } 297 298 SEC("cgroup/skb") 299 __description("sk_fullsock(skb->sk): sk->dst_port [byte load]") 300 __success __success_unpriv __retval(0) 301 __naked void sk_dst_port_byte_load(void) 302 { 303 asm volatile (" \ 304 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 305 if r1 != 0 goto l0_%=; \ 306 r0 = 0; \ 307 exit; \ 308 l0_%=: call %[bpf_sk_fullsock]; \ 309 if r0 != 0 goto l1_%=; \ 310 r0 = 0; \ 311 exit; \ 312 l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \ 313 r2 = *(u8*)(r0 + %[__imm_0]); \ 314 r0 = 0; \ 315 exit; \ 316 " : 317 : __imm(bpf_sk_fullsock), 318 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1), 319 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 320 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) 321 : __clobber_all); 322 } 323 324 SEC("cgroup/skb") 325 __description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)") 326 __failure __msg("invalid sock access") 327 __failure_unpriv 328 __naked void dst_port_byte_load_invalid(void) 329 { 330 asm volatile (" \ 331 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 332 if r1 != 0 goto l0_%=; \ 333 r0 = 0; \ 334 exit; \ 335 l0_%=: call %[bpf_sk_fullsock]; \ 336 if r0 != 0 goto l1_%=; \ 337 r0 = 0; \ 338 exit; \ 339 l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ 340 r0 = 0; \ 341 exit; \ 342 " : 343 : __imm(bpf_sk_fullsock), 344 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), 345 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 346 : __clobber_all); 347 } 348 349 SEC("cgroup/skb") 350 __description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)") 351 __failure __msg("invalid sock access") 352 __failure_unpriv 353 __naked void dst_port_half_load_invalid_2(void) 354 { 355 asm volatile (" \ 356 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 357 if r1 != 0 goto l0_%=; \ 358 r0 = 0; \ 359 exit; \ 360 l0_%=: call %[bpf_sk_fullsock]; \ 361 if r0 != 0 goto l1_%=; \ 362 r0 = 0; \ 363 exit; \ 364 l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \ 365 r0 = 0; \ 366 exit; \ 367 " : 368 : __imm(bpf_sk_fullsock), 369 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 370 __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port)) 371 : __clobber_all); 372 } 373 374 SEC("cgroup/skb") 375 __description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]") 376 __success __success_unpriv __retval(0) 377 __naked void dst_ip6_load_2nd_byte(void) 378 { 379 asm volatile (" \ 380 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 381 if r1 != 0 goto l0_%=; \ 382 r0 = 0; \ 383 exit; \ 384 l0_%=: call %[bpf_sk_fullsock]; \ 385 if r0 != 0 goto l1_%=; \ 386 r0 = 0; \ 387 exit; \ 388 l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ 389 r0 = 0; \ 390 exit; \ 391 " : 392 : __imm(bpf_sk_fullsock), 393 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), 394 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 395 : __clobber_all); 396 } 397 398 SEC("cgroup/skb") 399 __description("sk_fullsock(skb->sk): sk->type [narrow load]") 400 __success __success_unpriv __retval(0) 401 __naked void sk_sk_type_narrow_load(void) 402 { 403 asm volatile (" \ 404 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 405 if r1 != 0 goto l0_%=; \ 406 r0 = 0; \ 407 exit; \ 408 l0_%=: call %[bpf_sk_fullsock]; \ 409 if r0 != 0 goto l1_%=; \ 410 r0 = 0; \ 411 exit; \ 412 l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \ 413 r0 = 0; \ 414 exit; \ 415 " : 416 : __imm(bpf_sk_fullsock), 417 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 418 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 419 : __clobber_all); 420 } 421 422 SEC("cgroup/skb") 423 __description("sk_fullsock(skb->sk): sk->protocol [narrow load]") 424 __success __success_unpriv __retval(0) 425 __naked void sk_sk_protocol_narrow_load(void) 426 { 427 asm volatile (" \ 428 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 429 if r1 != 0 goto l0_%=; \ 430 r0 = 0; \ 431 exit; \ 432 l0_%=: call %[bpf_sk_fullsock]; \ 433 if r0 != 0 goto l1_%=; \ 434 r0 = 0; \ 435 exit; \ 436 l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \ 437 r0 = 0; \ 438 exit; \ 439 " : 440 : __imm(bpf_sk_fullsock), 441 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 442 __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol)) 443 : __clobber_all); 444 } 445 446 SEC("cgroup/skb") 447 __description("sk_fullsock(skb->sk): beyond last field") 448 __failure __msg("invalid sock access") 449 __failure_unpriv 450 __naked void skb_sk_beyond_last_field_1(void) 451 { 452 asm volatile (" \ 453 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 454 if r1 != 0 goto l0_%=; \ 455 r0 = 0; \ 456 exit; \ 457 l0_%=: call %[bpf_sk_fullsock]; \ 458 if r0 != 0 goto l1_%=; \ 459 r0 = 0; \ 460 exit; \ 461 l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\ 462 r0 = 0; \ 463 exit; \ 464 " : 465 : __imm(bpf_sk_fullsock), 466 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 467 __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping)) 468 : __clobber_all); 469 } 470 471 SEC("cgroup/skb") 472 __description("bpf_tcp_sock(skb->sk): no !skb->sk check") 473 __failure __msg("type=sock_common_or_null expected=sock_common") 474 __failure_unpriv 475 __naked void sk_no_skb_sk_check_2(void) 476 { 477 asm volatile (" \ 478 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 479 call %[bpf_tcp_sock]; \ 480 r0 = 0; \ 481 exit; \ 482 " : 483 : __imm(bpf_tcp_sock), 484 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 485 : __clobber_all); 486 } 487 488 SEC("cgroup/skb") 489 __description("bpf_tcp_sock(skb->sk): no NULL check on ret") 490 __failure __msg("invalid mem access 'tcp_sock_or_null'") 491 __failure_unpriv 492 __naked void no_null_check_on_ret_2(void) 493 { 494 asm volatile (" \ 495 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 496 if r1 != 0 goto l0_%=; \ 497 r0 = 0; \ 498 exit; \ 499 l0_%=: call %[bpf_tcp_sock]; \ 500 r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ 501 r0 = 0; \ 502 exit; \ 503 " : 504 : __imm(bpf_tcp_sock), 505 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 506 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) 507 : __clobber_all); 508 } 509 510 SEC("cgroup/skb") 511 __description("bpf_tcp_sock(skb->sk): tp->snd_cwnd") 512 __success __success_unpriv __retval(0) 513 __naked void skb_sk_tp_snd_cwnd_1(void) 514 { 515 asm volatile (" \ 516 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 517 if r1 != 0 goto l0_%=; \ 518 r0 = 0; \ 519 exit; \ 520 l0_%=: call %[bpf_tcp_sock]; \ 521 if r0 != 0 goto l1_%=; \ 522 exit; \ 523 l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ 524 r0 = 0; \ 525 exit; \ 526 " : 527 : __imm(bpf_tcp_sock), 528 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 529 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) 530 : __clobber_all); 531 } 532 533 SEC("cgroup/skb") 534 __description("bpf_tcp_sock(skb->sk): tp->bytes_acked") 535 __success __success_unpriv __retval(0) 536 __naked void skb_sk_tp_bytes_acked(void) 537 { 538 asm volatile (" \ 539 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 540 if r1 != 0 goto l0_%=; \ 541 r0 = 0; \ 542 exit; \ 543 l0_%=: call %[bpf_tcp_sock]; \ 544 if r0 != 0 goto l1_%=; \ 545 exit; \ 546 l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \ 547 r0 = 0; \ 548 exit; \ 549 " : 550 : __imm(bpf_tcp_sock), 551 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 552 __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked)) 553 : __clobber_all); 554 } 555 556 SEC("cgroup/skb") 557 __description("bpf_tcp_sock(skb->sk): beyond last field") 558 __failure __msg("invalid tcp_sock access") 559 __failure_unpriv 560 __naked void skb_sk_beyond_last_field_2(void) 561 { 562 asm volatile (" \ 563 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 564 if r1 != 0 goto l0_%=; \ 565 r0 = 0; \ 566 exit; \ 567 l0_%=: call %[bpf_tcp_sock]; \ 568 if r0 != 0 goto l1_%=; \ 569 exit; \ 570 l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\ 571 r0 = 0; \ 572 exit; \ 573 " : 574 : __imm(bpf_tcp_sock), 575 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 576 __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked)) 577 : __clobber_all); 578 } 579 580 SEC("cgroup/skb") 581 __description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd") 582 __success __success_unpriv __retval(0) 583 __naked void skb_sk_tp_snd_cwnd_2(void) 584 { 585 asm volatile (" \ 586 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 587 if r1 != 0 goto l0_%=; \ 588 r0 = 0; \ 589 exit; \ 590 l0_%=: call %[bpf_sk_fullsock]; \ 591 if r0 != 0 goto l1_%=; \ 592 exit; \ 593 l1_%=: r1 = r0; \ 594 call %[bpf_tcp_sock]; \ 595 if r0 != 0 goto l2_%=; \ 596 exit; \ 597 l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ 598 r0 = 0; \ 599 exit; \ 600 " : 601 : __imm(bpf_sk_fullsock), 602 __imm(bpf_tcp_sock), 603 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 604 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) 605 : __clobber_all); 606 } 607 608 SEC("tc") 609 __description("bpf_sk_release(skb->sk)") 610 __failure __msg("R1 must be referenced when passed to release function") 611 __naked void bpf_sk_release_skb_sk(void) 612 { 613 asm volatile (" \ 614 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 615 if r1 == 0 goto l0_%=; \ 616 call %[bpf_sk_release]; \ 617 l0_%=: r0 = 0; \ 618 exit; \ 619 " : 620 : __imm(bpf_sk_release), 621 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 622 : __clobber_all); 623 } 624 625 SEC("tc") 626 __description("bpf_sk_release(bpf_sk_fullsock(skb->sk))") 627 __failure __msg("R1 must be referenced when passed to release function") 628 __naked void bpf_sk_fullsock_skb_sk(void) 629 { 630 asm volatile (" \ 631 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 632 if r1 != 0 goto l0_%=; \ 633 r0 = 0; \ 634 exit; \ 635 l0_%=: call %[bpf_sk_fullsock]; \ 636 if r0 != 0 goto l1_%=; \ 637 exit; \ 638 l1_%=: r1 = r0; \ 639 call %[bpf_sk_release]; \ 640 r0 = 1; \ 641 exit; \ 642 " : 643 : __imm(bpf_sk_fullsock), 644 __imm(bpf_sk_release), 645 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 646 : __clobber_all); 647 } 648 649 SEC("tc") 650 __description("bpf_sk_release(bpf_tcp_sock(skb->sk))") 651 __failure __msg("R1 must be referenced when passed to release function") 652 __naked void bpf_tcp_sock_skb_sk(void) 653 { 654 asm volatile (" \ 655 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 656 if r1 != 0 goto l0_%=; \ 657 r0 = 0; \ 658 exit; \ 659 l0_%=: call %[bpf_tcp_sock]; \ 660 if r0 != 0 goto l1_%=; \ 661 exit; \ 662 l1_%=: r1 = r0; \ 663 call %[bpf_sk_release]; \ 664 r0 = 1; \ 665 exit; \ 666 " : 667 : __imm(bpf_sk_release), 668 __imm(bpf_tcp_sock), 669 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 670 : __clobber_all); 671 } 672 673 SEC("tc") 674 __description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL") 675 __success __retval(0) 676 __naked void sk_null_0_value_null(void) 677 { 678 asm volatile (" \ 679 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 680 if r1 != 0 goto l0_%=; \ 681 r0 = 0; \ 682 exit; \ 683 l0_%=: call %[bpf_sk_fullsock]; \ 684 if r0 != 0 goto l1_%=; \ 685 r0 = 0; \ 686 exit; \ 687 l1_%=: r4 = 0; \ 688 r3 = 0; \ 689 r2 = r0; \ 690 r1 = %[sk_storage_map] ll; \ 691 call %[bpf_sk_storage_get]; \ 692 r0 = 0; \ 693 exit; \ 694 " : 695 : __imm(bpf_sk_fullsock), 696 __imm(bpf_sk_storage_get), 697 __imm_addr(sk_storage_map), 698 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 699 : __clobber_all); 700 } 701 702 SEC("tc") 703 __description("sk_storage_get(map, skb->sk, 1, 1): value == 1") 704 __failure __msg("R3 type=scalar expected=fp") 705 __naked void sk_1_1_value_1(void) 706 { 707 asm volatile (" \ 708 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 709 if r1 != 0 goto l0_%=; \ 710 r0 = 0; \ 711 exit; \ 712 l0_%=: call %[bpf_sk_fullsock]; \ 713 if r0 != 0 goto l1_%=; \ 714 r0 = 0; \ 715 exit; \ 716 l1_%=: r4 = 1; \ 717 r3 = 1; \ 718 r2 = r0; \ 719 r1 = %[sk_storage_map] ll; \ 720 call %[bpf_sk_storage_get]; \ 721 r0 = 0; \ 722 exit; \ 723 " : 724 : __imm(bpf_sk_fullsock), 725 __imm(bpf_sk_storage_get), 726 __imm_addr(sk_storage_map), 727 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 728 : __clobber_all); 729 } 730 731 SEC("tc") 732 __description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value") 733 __success __retval(0) 734 __naked void stack_value_1_stack_value(void) 735 { 736 asm volatile (" \ 737 r2 = 0; \ 738 *(u64*)(r10 - 8) = r2; \ 739 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 740 if r1 != 0 goto l0_%=; \ 741 r0 = 0; \ 742 exit; \ 743 l0_%=: call %[bpf_sk_fullsock]; \ 744 if r0 != 0 goto l1_%=; \ 745 r0 = 0; \ 746 exit; \ 747 l1_%=: r4 = 1; \ 748 r3 = r10; \ 749 r3 += -8; \ 750 r2 = r0; \ 751 r1 = %[sk_storage_map] ll; \ 752 call %[bpf_sk_storage_get]; \ 753 r0 = 0; \ 754 exit; \ 755 " : 756 : __imm(bpf_sk_fullsock), 757 __imm(bpf_sk_storage_get), 758 __imm_addr(sk_storage_map), 759 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 760 : __clobber_all); 761 } 762 763 SEC("tc") 764 __description("bpf_map_lookup_elem(smap, &key)") 765 __failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem") 766 __naked void map_lookup_elem_smap_key(void) 767 { 768 asm volatile (" \ 769 r1 = 0; \ 770 *(u32*)(r10 - 4) = r1; \ 771 r2 = r10; \ 772 r2 += -4; \ 773 r1 = %[sk_storage_map] ll; \ 774 call %[bpf_map_lookup_elem]; \ 775 r0 = 0; \ 776 exit; \ 777 " : 778 : __imm(bpf_map_lookup_elem), 779 __imm_addr(sk_storage_map) 780 : __clobber_all); 781 } 782 783 SEC("xdp") 784 __description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id") 785 __success __retval(0) 786 __naked void xskmap_key_xs_queue_id(void) 787 { 788 asm volatile (" \ 789 r1 = 0; \ 790 *(u32*)(r10 - 8) = r1; \ 791 r2 = r10; \ 792 r2 += -8; \ 793 r1 = %[map_xskmap] ll; \ 794 call %[bpf_map_lookup_elem]; \ 795 if r0 != 0 goto l0_%=; \ 796 exit; \ 797 l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \ 798 r0 = 0; \ 799 exit; \ 800 " : 801 : __imm(bpf_map_lookup_elem), 802 __imm_addr(map_xskmap), 803 __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) 804 : __clobber_all); 805 } 806 807 SEC("sk_skb") 808 __description("bpf_map_lookup_elem(sockmap, &key)") 809 __failure __msg("Unreleased reference id=2 alloc_insn=6") 810 __naked void map_lookup_elem_sockmap_key(void) 811 { 812 asm volatile (" \ 813 r1 = 0; \ 814 *(u32*)(r10 - 4) = r1; \ 815 r2 = r10; \ 816 r2 += -4; \ 817 r1 = %[map_sockmap] ll; \ 818 call %[bpf_map_lookup_elem]; \ 819 r0 = 0; \ 820 exit; \ 821 " : 822 : __imm(bpf_map_lookup_elem), 823 __imm_addr(map_sockmap) 824 : __clobber_all); 825 } 826 827 SEC("sk_skb") 828 __description("bpf_map_lookup_elem(sockhash, &key)") 829 __failure __msg("Unreleased reference id=2 alloc_insn=6") 830 __naked void map_lookup_elem_sockhash_key(void) 831 { 832 asm volatile (" \ 833 r1 = 0; \ 834 *(u32*)(r10 - 4) = r1; \ 835 r2 = r10; \ 836 r2 += -4; \ 837 r1 = %[map_sockhash] ll; \ 838 call %[bpf_map_lookup_elem]; \ 839 r0 = 0; \ 840 exit; \ 841 " : 842 : __imm(bpf_map_lookup_elem), 843 __imm_addr(map_sockhash) 844 : __clobber_all); 845 } 846 847 SEC("sk_skb") 848 __description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)") 849 __success 850 __naked void field_bpf_sk_release_sk_1(void) 851 { 852 asm volatile (" \ 853 r1 = 0; \ 854 *(u32*)(r10 - 4) = r1; \ 855 r2 = r10; \ 856 r2 += -4; \ 857 r1 = %[map_sockmap] ll; \ 858 call %[bpf_map_lookup_elem]; \ 859 if r0 != 0 goto l0_%=; \ 860 exit; \ 861 l0_%=: r1 = r0; \ 862 r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 863 call %[bpf_sk_release]; \ 864 exit; \ 865 " : 866 : __imm(bpf_map_lookup_elem), 867 __imm(bpf_sk_release), 868 __imm_addr(map_sockmap), 869 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 870 : __clobber_all); 871 } 872 873 SEC("sk_skb") 874 __description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)") 875 __success 876 __naked void field_bpf_sk_release_sk_2(void) 877 { 878 asm volatile (" \ 879 r1 = 0; \ 880 *(u32*)(r10 - 4) = r1; \ 881 r2 = r10; \ 882 r2 += -4; \ 883 r1 = %[map_sockhash] ll; \ 884 call %[bpf_map_lookup_elem]; \ 885 if r0 != 0 goto l0_%=; \ 886 exit; \ 887 l0_%=: r1 = r0; \ 888 r0 = *(u32*)(r0 + %[bpf_sock_type]); \ 889 call %[bpf_sk_release]; \ 890 exit; \ 891 " : 892 : __imm(bpf_map_lookup_elem), 893 __imm(bpf_sk_release), 894 __imm_addr(map_sockhash), 895 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 896 : __clobber_all); 897 } 898 899 SEC("sk_reuseport") 900 __description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)") 901 __success 902 __naked void ctx_reuseport_array_key_flags(void) 903 { 904 asm volatile (" \ 905 r4 = 0; \ 906 r2 = 0; \ 907 *(u32*)(r10 - 4) = r2; \ 908 r3 = r10; \ 909 r3 += -4; \ 910 r2 = %[map_reuseport_array] ll; \ 911 call %[bpf_sk_select_reuseport]; \ 912 exit; \ 913 " : 914 : __imm(bpf_sk_select_reuseport), 915 __imm_addr(map_reuseport_array) 916 : __clobber_all); 917 } 918 919 SEC("sk_reuseport") 920 __description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)") 921 __success 922 __naked void reuseport_ctx_sockmap_key_flags(void) 923 { 924 asm volatile (" \ 925 r4 = 0; \ 926 r2 = 0; \ 927 *(u32*)(r10 - 4) = r2; \ 928 r3 = r10; \ 929 r3 += -4; \ 930 r2 = %[map_sockmap] ll; \ 931 call %[bpf_sk_select_reuseport]; \ 932 exit; \ 933 " : 934 : __imm(bpf_sk_select_reuseport), 935 __imm_addr(map_sockmap) 936 : __clobber_all); 937 } 938 939 SEC("sk_reuseport") 940 __description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)") 941 __success 942 __naked void reuseport_ctx_sockhash_key_flags(void) 943 { 944 asm volatile (" \ 945 r4 = 0; \ 946 r2 = 0; \ 947 *(u32*)(r10 - 4) = r2; \ 948 r3 = r10; \ 949 r3 += -4; \ 950 r2 = %[map_sockmap] ll; \ 951 call %[bpf_sk_select_reuseport]; \ 952 exit; \ 953 " : 954 : __imm(bpf_sk_select_reuseport), 955 __imm_addr(map_sockmap) 956 : __clobber_all); 957 } 958 959 SEC("tc") 960 __description("mark null check on return value of bpf_skc_to helpers") 961 __failure __msg("invalid mem access") 962 __naked void of_bpf_skc_to_helpers(void) 963 { 964 asm volatile (" \ 965 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ 966 if r1 != 0 goto l0_%=; \ 967 r0 = 0; \ 968 exit; \ 969 l0_%=: r6 = r1; \ 970 call %[bpf_skc_to_tcp_sock]; \ 971 r7 = r0; \ 972 r1 = r6; \ 973 call %[bpf_skc_to_tcp_request_sock]; \ 974 r8 = r0; \ 975 if r8 != 0 goto l1_%=; \ 976 r0 = 0; \ 977 exit; \ 978 l1_%=: r0 = *(u8*)(r7 + 0); \ 979 exit; \ 980 " : 981 : __imm(bpf_skc_to_tcp_request_sock), 982 __imm(bpf_skc_to_tcp_sock), 983 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) 984 : __clobber_all); 985 } 986 987 SEC("cgroup/post_bind4") 988 __description("sk->src_ip6[0] [load 1st byte]") 989 __failure __msg("invalid bpf_context access off=28 size=2") 990 __naked void post_bind4_read_src_ip6(void) 991 { 992 asm volatile (" \ 993 r6 = r1; \ 994 r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \ 995 r0 = 1; \ 996 exit; \ 997 " : 998 : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0])) 999 : __clobber_all); 1000 } 1001 1002 SEC("cgroup/post_bind4") 1003 __description("sk->mark [load mark]") 1004 __failure __msg("invalid bpf_context access off=16 size=2") 1005 __naked void post_bind4_read_mark(void) 1006 { 1007 asm volatile (" \ 1008 r6 = r1; \ 1009 r7 = *(u16*)(r6 + %[bpf_sock_mark]); \ 1010 r0 = 1; \ 1011 exit; \ 1012 " : 1013 : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)) 1014 : __clobber_all); 1015 } 1016 1017 SEC("cgroup/post_bind6") 1018 __description("sk->src_ip4 [load src_ip4]") 1019 __failure __msg("invalid bpf_context access off=24 size=2") 1020 __naked void post_bind6_read_src_ip4(void) 1021 { 1022 asm volatile (" \ 1023 r6 = r1; \ 1024 r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \ 1025 r0 = 1; \ 1026 exit; \ 1027 " : 1028 : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4)) 1029 : __clobber_all); 1030 } 1031 1032 SEC("cgroup/sock_create") 1033 __description("sk->src_port [word load]") 1034 __failure __msg("invalid bpf_context access off=44 size=2") 1035 __naked void sock_create_read_src_port(void) 1036 { 1037 asm volatile (" \ 1038 r6 = r1; \ 1039 r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \ 1040 r0 = 1; \ 1041 exit; \ 1042 " : 1043 : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)) 1044 : __clobber_all); 1045 } 1046 1047 __noinline 1048 long skb_pull_data2(struct __sk_buff *sk, __u32 len) 1049 { 1050 return bpf_skb_pull_data(sk, len); 1051 } 1052 1053 __noinline 1054 long skb_pull_data1(struct __sk_buff *sk, __u32 len) 1055 { 1056 return skb_pull_data2(sk, len); 1057 } 1058 1059 /* global function calls bpf_skb_pull_data(), which invalidates packet 1060 * pointers established before global function call. 1061 */ 1062 SEC("tc") 1063 __failure __msg("invalid mem access") 1064 int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk) 1065 { 1066 int *p = (void *)(long)sk->data; 1067 1068 if ((void *)(p + 1) > (void *)(long)sk->data_end) 1069 return TCX_DROP; 1070 skb_pull_data1(sk, 0); 1071 *p = 42; /* this is unsafe */ 1072 return TCX_PASS; 1073 } 1074 1075 __noinline 1076 int tail_call(struct __sk_buff *sk) 1077 { 1078 bpf_tail_call_static(sk, &jmp_table, 0); 1079 return 0; 1080 } 1081 1082 /* Tail calls invalidate packet pointers. */ 1083 SEC("tc") 1084 __failure __msg("invalid mem access") 1085 int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk) 1086 { 1087 int *p = (void *)(long)sk->data; 1088 1089 if ((void *)(p + 1) > (void *)(long)sk->data_end) 1090 return TCX_DROP; 1091 tail_call(sk); 1092 *p = 42; /* this is unsafe */ 1093 return TCX_PASS; 1094 } 1095 1096 char _license[] SEC("license") = "GPL"; 1097