1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/bpf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/filter.h> 7 #include <linux/errno.h> 8 #include <linux/file.h> 9 #include <linux/net.h> 10 #include <linux/workqueue.h> 11 #include <linux/skmsg.h> 12 #include <linux/list.h> 13 #include <linux/jhash.h> 14 #include <linux/sock_diag.h> 15 #include <net/udp.h> 16 17 struct bpf_stab { 18 struct bpf_map map; 19 struct sock **sks; 20 struct sk_psock_progs progs; 21 spinlock_t lock; 22 }; 23 24 #define SOCK_CREATE_FLAG_MASK \ 25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 26 27 /* This mutex is used to 28 * - protect race between prog/link attach/detach and link prog update, and 29 * - protect race between releasing and accessing map in bpf_link. 30 * A single global mutex lock is used since it is expected contention is low. 31 */ 32 static DEFINE_MUTEX(sockmap_mutex); 33 34 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 35 struct bpf_prog *old, struct bpf_link *link, 36 u32 which); 37 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); 38 39 static struct bpf_map *sock_map_alloc(union bpf_attr *attr) 40 { 41 struct bpf_stab *stab; 42 43 if (attr->max_entries == 0 || 44 attr->key_size != 4 || 45 (attr->value_size != sizeof(u32) && 46 attr->value_size != sizeof(u64)) || 47 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 48 return ERR_PTR(-EINVAL); 49 50 stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE); 51 if (!stab) 52 return ERR_PTR(-ENOMEM); 53 54 bpf_map_init_from_attr(&stab->map, attr); 55 spin_lock_init(&stab->lock); 56 57 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * 58 sizeof(struct sock *), 59 stab->map.numa_node); 60 if (!stab->sks) { 61 bpf_map_area_free(stab); 62 return ERR_PTR(-ENOMEM); 63 } 64 65 return &stab->map; 66 } 67 68 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) 69 { 70 struct bpf_map *map; 71 int ret; 72 73 if (attr->attach_flags || attr->replace_bpf_fd) 74 return -EINVAL; 75 76 CLASS(fd, f)(attr->target_fd); 77 map = __bpf_map_get(f); 78 if (IS_ERR(map)) 79 return PTR_ERR(map); 80 mutex_lock(&sockmap_mutex); 81 ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type); 82 mutex_unlock(&sockmap_mutex); 83 return ret; 84 } 85 86 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 87 { 88 struct bpf_prog *prog; 89 struct bpf_map *map; 90 int ret; 91 92 if (attr->attach_flags || attr->replace_bpf_fd) 93 return -EINVAL; 94 95 CLASS(fd, f)(attr->target_fd); 96 map = __bpf_map_get(f); 97 if (IS_ERR(map)) 98 return PTR_ERR(map); 99 100 prog = bpf_prog_get(attr->attach_bpf_fd); 101 if (IS_ERR(prog)) 102 return PTR_ERR(prog); 103 104 if (prog->type != ptype) { 105 ret = -EINVAL; 106 goto put_prog; 107 } 108 109 mutex_lock(&sockmap_mutex); 110 ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type); 111 mutex_unlock(&sockmap_mutex); 112 put_prog: 113 bpf_prog_put(prog); 114 return ret; 115 } 116 117 static void sock_map_sk_acquire(struct sock *sk) 118 __acquires(&sk->sk_lock.slock) 119 { 120 lock_sock(sk); 121 rcu_read_lock(); 122 } 123 124 static void sock_map_sk_release(struct sock *sk) 125 __releases(&sk->sk_lock.slock) 126 { 127 rcu_read_unlock(); 128 release_sock(sk); 129 } 130 131 static void sock_map_add_link(struct sk_psock *psock, 132 struct sk_psock_link *link, 133 struct bpf_map *map, void *link_raw) 134 { 135 link->link_raw = link_raw; 136 link->map = map; 137 spin_lock_bh(&psock->link_lock); 138 list_add_tail(&link->list, &psock->link); 139 spin_unlock_bh(&psock->link_lock); 140 } 141 142 static void sock_map_del_link(struct sock *sk, 143 struct sk_psock *psock, void *link_raw) 144 { 145 bool strp_stop = false, verdict_stop = false; 146 struct sk_psock_link *link, *tmp; 147 148 spin_lock_bh(&psock->link_lock); 149 list_for_each_entry_safe(link, tmp, &psock->link, list) { 150 if (link->link_raw == link_raw) { 151 struct bpf_map *map = link->map; 152 struct sk_psock_progs *progs = sock_map_progs(map); 153 154 if (psock->saved_data_ready && progs->stream_parser) 155 strp_stop = true; 156 if (psock->saved_data_ready && progs->stream_verdict) 157 verdict_stop = true; 158 if (psock->saved_data_ready && progs->skb_verdict) 159 verdict_stop = true; 160 list_del(&link->list); 161 sk_psock_free_link(link); 162 break; 163 } 164 } 165 spin_unlock_bh(&psock->link_lock); 166 if (strp_stop || verdict_stop) { 167 write_lock_bh(&sk->sk_callback_lock); 168 if (strp_stop) 169 sk_psock_stop_strp(sk, psock); 170 if (verdict_stop) 171 sk_psock_stop_verdict(sk, psock); 172 173 if (psock->psock_update_sk_prot) 174 psock->psock_update_sk_prot(sk, psock, false); 175 write_unlock_bh(&sk->sk_callback_lock); 176 } 177 } 178 179 static void sock_map_unref(struct sock *sk, void *link_raw) 180 { 181 struct sk_psock *psock = sk_psock(sk); 182 183 if (likely(psock)) { 184 sock_map_del_link(sk, psock, link_raw); 185 sk_psock_put(sk, psock); 186 } 187 } 188 189 static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) 190 { 191 if (!sk->sk_prot->psock_update_sk_prot) 192 return -EINVAL; 193 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot; 194 return sk->sk_prot->psock_update_sk_prot(sk, psock, false); 195 } 196 197 static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) 198 { 199 struct sk_psock *psock; 200 201 rcu_read_lock(); 202 psock = sk_psock(sk); 203 if (psock) { 204 if (sk->sk_prot->close != sock_map_close) { 205 psock = ERR_PTR(-EBUSY); 206 goto out; 207 } 208 209 if (!refcount_inc_not_zero(&psock->refcnt)) 210 psock = ERR_PTR(-EBUSY); 211 } 212 out: 213 rcu_read_unlock(); 214 return psock; 215 } 216 217 static int sock_map_link(struct bpf_map *map, struct sock *sk) 218 { 219 struct sk_psock_progs *progs = sock_map_progs(map); 220 struct bpf_prog *stream_verdict = NULL; 221 struct bpf_prog *stream_parser = NULL; 222 struct bpf_prog *skb_verdict = NULL; 223 struct bpf_prog *msg_parser = NULL; 224 struct sk_psock *psock; 225 int ret; 226 227 stream_verdict = READ_ONCE(progs->stream_verdict); 228 if (stream_verdict) { 229 stream_verdict = bpf_prog_inc_not_zero(stream_verdict); 230 if (IS_ERR(stream_verdict)) 231 return PTR_ERR(stream_verdict); 232 } 233 234 stream_parser = READ_ONCE(progs->stream_parser); 235 if (stream_parser) { 236 stream_parser = bpf_prog_inc_not_zero(stream_parser); 237 if (IS_ERR(stream_parser)) { 238 ret = PTR_ERR(stream_parser); 239 goto out_put_stream_verdict; 240 } 241 } 242 243 msg_parser = READ_ONCE(progs->msg_parser); 244 if (msg_parser) { 245 msg_parser = bpf_prog_inc_not_zero(msg_parser); 246 if (IS_ERR(msg_parser)) { 247 ret = PTR_ERR(msg_parser); 248 goto out_put_stream_parser; 249 } 250 } 251 252 skb_verdict = READ_ONCE(progs->skb_verdict); 253 if (skb_verdict) { 254 skb_verdict = bpf_prog_inc_not_zero(skb_verdict); 255 if (IS_ERR(skb_verdict)) { 256 ret = PTR_ERR(skb_verdict); 257 goto out_put_msg_parser; 258 } 259 } 260 261 psock = sock_map_psock_get_checked(sk); 262 if (IS_ERR(psock)) { 263 ret = PTR_ERR(psock); 264 goto out_progs; 265 } 266 267 if (psock) { 268 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || 269 (stream_parser && READ_ONCE(psock->progs.stream_parser)) || 270 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) || 271 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) || 272 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) || 273 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) { 274 sk_psock_put(sk, psock); 275 ret = -EBUSY; 276 goto out_progs; 277 } 278 } else { 279 psock = sk_psock_init(sk, map->numa_node); 280 if (IS_ERR(psock)) { 281 ret = PTR_ERR(psock); 282 goto out_progs; 283 } 284 } 285 286 if (msg_parser) 287 psock_set_prog(&psock->progs.msg_parser, msg_parser); 288 if (stream_parser) 289 psock_set_prog(&psock->progs.stream_parser, stream_parser); 290 if (stream_verdict) 291 psock_set_prog(&psock->progs.stream_verdict, stream_verdict); 292 if (skb_verdict) 293 psock_set_prog(&psock->progs.skb_verdict, skb_verdict); 294 295 /* msg_* and stream_* programs references tracked in psock after this 296 * point. Reference dec and cleanup will occur through psock destructor 297 */ 298 ret = sock_map_init_proto(sk, psock); 299 if (ret < 0) { 300 sk_psock_put(sk, psock); 301 goto out; 302 } 303 304 write_lock_bh(&sk->sk_callback_lock); 305 if (stream_parser && stream_verdict && !psock->saved_data_ready) { 306 if (sk_is_tcp(sk)) 307 ret = sk_psock_init_strp(sk, psock); 308 else 309 ret = -EOPNOTSUPP; 310 if (ret) { 311 write_unlock_bh(&sk->sk_callback_lock); 312 sk_psock_put(sk, psock); 313 goto out; 314 } 315 sk_psock_start_strp(sk, psock); 316 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { 317 sk_psock_start_verdict(sk,psock); 318 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { 319 sk_psock_start_verdict(sk, psock); 320 } 321 write_unlock_bh(&sk->sk_callback_lock); 322 return 0; 323 out_progs: 324 if (skb_verdict) 325 bpf_prog_put(skb_verdict); 326 out_put_msg_parser: 327 if (msg_parser) 328 bpf_prog_put(msg_parser); 329 out_put_stream_parser: 330 if (stream_parser) 331 bpf_prog_put(stream_parser); 332 out_put_stream_verdict: 333 if (stream_verdict) 334 bpf_prog_put(stream_verdict); 335 out: 336 return ret; 337 } 338 339 static void sock_map_free(struct bpf_map *map) 340 { 341 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 342 int i; 343 344 /* After the sync no updates or deletes will be in-flight so it 345 * is safe to walk map and remove entries without risking a race 346 * in EEXIST update case. 347 */ 348 synchronize_rcu(); 349 for (i = 0; i < stab->map.max_entries; i++) { 350 struct sock **psk = &stab->sks[i]; 351 struct sock *sk; 352 353 sk = xchg(psk, NULL); 354 if (sk) { 355 sock_hold(sk); 356 lock_sock(sk); 357 rcu_read_lock(); 358 sock_map_unref(sk, psk); 359 rcu_read_unlock(); 360 release_sock(sk); 361 sock_put(sk); 362 } 363 } 364 365 /* wait for psock readers accessing its map link */ 366 synchronize_rcu(); 367 368 bpf_map_area_free(stab->sks); 369 bpf_map_area_free(stab); 370 } 371 372 static void sock_map_release_progs(struct bpf_map *map) 373 { 374 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); 375 } 376 377 static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 378 { 379 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 380 381 WARN_ON_ONCE(!rcu_read_lock_held()); 382 383 if (unlikely(key >= map->max_entries)) 384 return NULL; 385 return READ_ONCE(stab->sks[key]); 386 } 387 388 static void *sock_map_lookup(struct bpf_map *map, void *key) 389 { 390 struct sock *sk; 391 392 sk = __sock_map_lookup_elem(map, *(u32 *)key); 393 if (!sk) 394 return NULL; 395 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 396 return NULL; 397 return sk; 398 } 399 400 static void *sock_map_lookup_sys(struct bpf_map *map, void *key) 401 { 402 struct sock *sk; 403 404 if (map->value_size != sizeof(u64)) 405 return ERR_PTR(-ENOSPC); 406 407 sk = __sock_map_lookup_elem(map, *(u32 *)key); 408 if (!sk) 409 return ERR_PTR(-ENOENT); 410 411 __sock_gen_cookie(sk); 412 return &sk->sk_cookie; 413 } 414 415 static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, 416 struct sock **psk) 417 { 418 struct sock *sk = NULL; 419 int err = 0; 420 421 spin_lock_bh(&stab->lock); 422 if (!sk_test || sk_test == *psk) 423 sk = xchg(psk, NULL); 424 425 if (likely(sk)) 426 sock_map_unref(sk, psk); 427 else 428 err = -EINVAL; 429 430 spin_unlock_bh(&stab->lock); 431 return err; 432 } 433 434 static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 435 void *link_raw) 436 { 437 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 438 439 __sock_map_delete(stab, sk, link_raw); 440 } 441 442 static long sock_map_delete_elem(struct bpf_map *map, void *key) 443 { 444 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 445 u32 i = *(u32 *)key; 446 struct sock **psk; 447 448 if (unlikely(i >= map->max_entries)) 449 return -EINVAL; 450 451 psk = &stab->sks[i]; 452 return __sock_map_delete(stab, NULL, psk); 453 } 454 455 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) 456 { 457 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 458 u32 i = key ? *(u32 *)key : U32_MAX; 459 u32 *key_next = next; 460 461 if (i == stab->map.max_entries - 1) 462 return -ENOENT; 463 if (i >= stab->map.max_entries) 464 *key_next = 0; 465 else 466 *key_next = i + 1; 467 return 0; 468 } 469 470 static int sock_map_update_common(struct bpf_map *map, u32 idx, 471 struct sock *sk, u64 flags) 472 { 473 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 474 struct sk_psock_link *link; 475 struct sk_psock *psock; 476 struct sock *osk; 477 int ret; 478 479 WARN_ON_ONCE(!rcu_read_lock_held()); 480 if (unlikely(flags > BPF_EXIST)) 481 return -EINVAL; 482 if (unlikely(idx >= map->max_entries)) 483 return -E2BIG; 484 485 link = sk_psock_init_link(); 486 if (!link) 487 return -ENOMEM; 488 489 ret = sock_map_link(map, sk); 490 if (ret < 0) 491 goto out_free; 492 493 psock = sk_psock(sk); 494 WARN_ON_ONCE(!psock); 495 496 spin_lock_bh(&stab->lock); 497 osk = stab->sks[idx]; 498 if (osk && flags == BPF_NOEXIST) { 499 ret = -EEXIST; 500 goto out_unlock; 501 } else if (!osk && flags == BPF_EXIST) { 502 ret = -ENOENT; 503 goto out_unlock; 504 } 505 506 sock_map_add_link(psock, link, map, &stab->sks[idx]); 507 stab->sks[idx] = sk; 508 if (osk) 509 sock_map_unref(osk, &stab->sks[idx]); 510 spin_unlock_bh(&stab->lock); 511 return 0; 512 out_unlock: 513 spin_unlock_bh(&stab->lock); 514 if (psock) 515 sk_psock_put(sk, psock); 516 out_free: 517 sk_psock_free_link(link); 518 return ret; 519 } 520 521 static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) 522 { 523 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || 524 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || 525 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; 526 } 527 528 static bool sock_map_redirect_allowed(const struct sock *sk) 529 { 530 if (sk_is_tcp(sk)) 531 return sk->sk_state != TCP_LISTEN; 532 else 533 return sk->sk_state == TCP_ESTABLISHED; 534 } 535 536 static bool sock_map_sk_is_suitable(const struct sock *sk) 537 { 538 return !!sk->sk_prot->psock_update_sk_prot; 539 } 540 541 static bool sock_map_sk_state_allowed(const struct sock *sk) 542 { 543 if (sk_is_tcp(sk)) 544 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); 545 if (sk_is_stream_unix(sk)) 546 return (1 << sk->sk_state) & TCPF_ESTABLISHED; 547 if (sk_is_vsock(sk) && 548 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) 549 return (1 << sk->sk_state) & TCPF_ESTABLISHED; 550 return true; 551 } 552 553 static int sock_hash_update_common(struct bpf_map *map, void *key, 554 struct sock *sk, u64 flags); 555 556 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 557 u64 flags) 558 { 559 struct socket *sock; 560 struct sock *sk; 561 int ret; 562 u64 ufd; 563 564 if (map->value_size == sizeof(u64)) 565 ufd = *(u64 *)value; 566 else 567 ufd = *(u32 *)value; 568 if (ufd > S32_MAX) 569 return -EINVAL; 570 571 sock = sockfd_lookup(ufd, &ret); 572 if (!sock) 573 return ret; 574 sk = sock->sk; 575 if (!sk) { 576 ret = -EINVAL; 577 goto out; 578 } 579 if (!sock_map_sk_is_suitable(sk)) { 580 ret = -EOPNOTSUPP; 581 goto out; 582 } 583 584 sock_map_sk_acquire(sk); 585 if (!sock_map_sk_state_allowed(sk)) 586 ret = -EOPNOTSUPP; 587 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 588 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 589 else 590 ret = sock_hash_update_common(map, key, sk, flags); 591 sock_map_sk_release(sk); 592 out: 593 sockfd_put(sock); 594 return ret; 595 } 596 597 static long sock_map_update_elem(struct bpf_map *map, void *key, 598 void *value, u64 flags) 599 { 600 struct sock *sk = (struct sock *)value; 601 int ret; 602 603 if (unlikely(!sk || !sk_fullsock(sk))) 604 return -EINVAL; 605 606 if (!sock_map_sk_is_suitable(sk)) 607 return -EOPNOTSUPP; 608 609 local_bh_disable(); 610 bh_lock_sock(sk); 611 if (!sock_map_sk_state_allowed(sk)) 612 ret = -EOPNOTSUPP; 613 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 614 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 615 else 616 ret = sock_hash_update_common(map, key, sk, flags); 617 bh_unlock_sock(sk); 618 local_bh_enable(); 619 return ret; 620 } 621 622 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, 623 struct bpf_map *, map, void *, key, u64, flags) 624 { 625 WARN_ON_ONCE(!rcu_read_lock_held()); 626 627 if (likely(sock_map_sk_is_suitable(sops->sk) && 628 sock_map_op_okay(sops))) 629 return sock_map_update_common(map, *(u32 *)key, sops->sk, 630 flags); 631 return -EOPNOTSUPP; 632 } 633 634 const struct bpf_func_proto bpf_sock_map_update_proto = { 635 .func = bpf_sock_map_update, 636 .gpl_only = false, 637 .pkt_access = true, 638 .ret_type = RET_INTEGER, 639 .arg1_type = ARG_PTR_TO_CTX, 640 .arg2_type = ARG_CONST_MAP_PTR, 641 .arg3_type = ARG_PTR_TO_MAP_KEY, 642 .arg4_type = ARG_ANYTHING, 643 }; 644 645 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, 646 struct bpf_map *, map, u32, key, u64, flags) 647 { 648 struct sock *sk; 649 650 if (unlikely(flags & ~(BPF_F_INGRESS))) 651 return SK_DROP; 652 653 sk = __sock_map_lookup_elem(map, key); 654 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 655 return SK_DROP; 656 if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk)) 657 return SK_DROP; 658 659 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 660 return SK_PASS; 661 } 662 663 const struct bpf_func_proto bpf_sk_redirect_map_proto = { 664 .func = bpf_sk_redirect_map, 665 .gpl_only = false, 666 .ret_type = RET_INTEGER, 667 .arg1_type = ARG_PTR_TO_CTX, 668 .arg2_type = ARG_CONST_MAP_PTR, 669 .arg3_type = ARG_ANYTHING, 670 .arg4_type = ARG_ANYTHING, 671 }; 672 673 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, 674 struct bpf_map *, map, u32, key, u64, flags) 675 { 676 struct sock *sk; 677 678 if (unlikely(flags & ~(BPF_F_INGRESS))) 679 return SK_DROP; 680 681 sk = __sock_map_lookup_elem(map, key); 682 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 683 return SK_DROP; 684 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) 685 return SK_DROP; 686 if (sk_is_vsock(sk)) 687 return SK_DROP; 688 689 msg->flags = flags; 690 msg->sk_redir = sk; 691 return SK_PASS; 692 } 693 694 const struct bpf_func_proto bpf_msg_redirect_map_proto = { 695 .func = bpf_msg_redirect_map, 696 .gpl_only = false, 697 .ret_type = RET_INTEGER, 698 .arg1_type = ARG_PTR_TO_CTX, 699 .arg2_type = ARG_CONST_MAP_PTR, 700 .arg3_type = ARG_ANYTHING, 701 .arg4_type = ARG_ANYTHING, 702 }; 703 704 struct sock_map_seq_info { 705 struct bpf_map *map; 706 struct sock *sk; 707 u32 index; 708 }; 709 710 struct bpf_iter__sockmap { 711 __bpf_md_ptr(struct bpf_iter_meta *, meta); 712 __bpf_md_ptr(struct bpf_map *, map); 713 __bpf_md_ptr(void *, key); 714 __bpf_md_ptr(struct sock *, sk); 715 }; 716 717 DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, 718 struct bpf_map *map, void *key, 719 struct sock *sk) 720 721 static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) 722 { 723 if (unlikely(info->index >= info->map->max_entries)) 724 return NULL; 725 726 info->sk = __sock_map_lookup_elem(info->map, info->index); 727 728 /* can't return sk directly, since that might be NULL */ 729 return info; 730 } 731 732 static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) 733 __acquires(rcu) 734 { 735 struct sock_map_seq_info *info = seq->private; 736 737 if (*pos == 0) 738 ++*pos; 739 740 /* pairs with sock_map_seq_stop */ 741 rcu_read_lock(); 742 return sock_map_seq_lookup_elem(info); 743 } 744 745 static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 746 __must_hold(rcu) 747 { 748 struct sock_map_seq_info *info = seq->private; 749 750 ++*pos; 751 ++info->index; 752 753 return sock_map_seq_lookup_elem(info); 754 } 755 756 static int sock_map_seq_show(struct seq_file *seq, void *v) 757 __must_hold(rcu) 758 { 759 struct sock_map_seq_info *info = seq->private; 760 struct bpf_iter__sockmap ctx = {}; 761 struct bpf_iter_meta meta; 762 struct bpf_prog *prog; 763 764 meta.seq = seq; 765 prog = bpf_iter_get_info(&meta, !v); 766 if (!prog) 767 return 0; 768 769 ctx.meta = &meta; 770 ctx.map = info->map; 771 if (v) { 772 ctx.key = &info->index; 773 ctx.sk = info->sk; 774 } 775 776 return bpf_iter_run_prog(prog, &ctx); 777 } 778 779 static void sock_map_seq_stop(struct seq_file *seq, void *v) 780 __releases(rcu) 781 { 782 if (!v) 783 (void)sock_map_seq_show(seq, NULL); 784 785 /* pairs with sock_map_seq_start */ 786 rcu_read_unlock(); 787 } 788 789 static const struct seq_operations sock_map_seq_ops = { 790 .start = sock_map_seq_start, 791 .next = sock_map_seq_next, 792 .stop = sock_map_seq_stop, 793 .show = sock_map_seq_show, 794 }; 795 796 static int sock_map_init_seq_private(void *priv_data, 797 struct bpf_iter_aux_info *aux) 798 { 799 struct sock_map_seq_info *info = priv_data; 800 801 bpf_map_inc_with_uref(aux->map); 802 info->map = aux->map; 803 return 0; 804 } 805 806 static void sock_map_fini_seq_private(void *priv_data) 807 { 808 struct sock_map_seq_info *info = priv_data; 809 810 bpf_map_put_with_uref(info->map); 811 } 812 813 static u64 sock_map_mem_usage(const struct bpf_map *map) 814 { 815 u64 usage = sizeof(struct bpf_stab); 816 817 usage += (u64)map->max_entries * sizeof(struct sock *); 818 return usage; 819 } 820 821 static const struct bpf_iter_seq_info sock_map_iter_seq_info = { 822 .seq_ops = &sock_map_seq_ops, 823 .init_seq_private = sock_map_init_seq_private, 824 .fini_seq_private = sock_map_fini_seq_private, 825 .seq_priv_size = sizeof(struct sock_map_seq_info), 826 }; 827 828 BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab) 829 const struct bpf_map_ops sock_map_ops = { 830 .map_meta_equal = bpf_map_meta_equal, 831 .map_alloc = sock_map_alloc, 832 .map_free = sock_map_free, 833 .map_get_next_key = sock_map_get_next_key, 834 .map_lookup_elem_sys_only = sock_map_lookup_sys, 835 .map_update_elem = sock_map_update_elem, 836 .map_delete_elem = sock_map_delete_elem, 837 .map_lookup_elem = sock_map_lookup, 838 .map_release_uref = sock_map_release_progs, 839 .map_check_btf = map_check_no_btf, 840 .map_mem_usage = sock_map_mem_usage, 841 .map_btf_id = &sock_map_btf_ids[0], 842 .iter_seq_info = &sock_map_iter_seq_info, 843 }; 844 845 struct bpf_shtab_elem { 846 struct rcu_head rcu; 847 u32 hash; 848 struct sock *sk; 849 struct hlist_node node; 850 u8 key[]; 851 }; 852 853 struct bpf_shtab_bucket { 854 struct hlist_head head; 855 spinlock_t lock; 856 }; 857 858 struct bpf_shtab { 859 struct bpf_map map; 860 struct bpf_shtab_bucket *buckets; 861 u32 buckets_num; 862 u32 elem_size; 863 struct sk_psock_progs progs; 864 atomic_t count; 865 }; 866 867 static inline u32 sock_hash_bucket_hash(const void *key, u32 len) 868 { 869 return jhash(key, len, 0); 870 } 871 872 static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, 873 u32 hash) 874 { 875 return &htab->buckets[hash & (htab->buckets_num - 1)]; 876 } 877 878 static struct bpf_shtab_elem * 879 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, 880 u32 key_size) 881 { 882 struct bpf_shtab_elem *elem; 883 884 hlist_for_each_entry_rcu(elem, head, node) { 885 if (elem->hash == hash && 886 !memcmp(&elem->key, key, key_size)) 887 return elem; 888 } 889 890 return NULL; 891 } 892 893 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) 894 { 895 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 896 u32 key_size = map->key_size, hash; 897 struct bpf_shtab_bucket *bucket; 898 struct bpf_shtab_elem *elem; 899 900 WARN_ON_ONCE(!rcu_read_lock_held()); 901 902 hash = sock_hash_bucket_hash(key, key_size); 903 bucket = sock_hash_select_bucket(htab, hash); 904 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 905 906 return elem ? elem->sk : NULL; 907 } 908 909 static void sock_hash_free_elem(struct bpf_shtab *htab, 910 struct bpf_shtab_elem *elem) 911 { 912 atomic_dec(&htab->count); 913 kfree_rcu(elem, rcu); 914 } 915 916 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, 917 void *link_raw) 918 { 919 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 920 struct bpf_shtab_elem *elem_probe, *elem = link_raw; 921 struct bpf_shtab_bucket *bucket; 922 923 WARN_ON_ONCE(!rcu_read_lock_held()); 924 bucket = sock_hash_select_bucket(htab, elem->hash); 925 926 /* elem may be deleted in parallel from the map, but access here 927 * is okay since it's going away only after RCU grace period. 928 * However, we need to check whether it's still present. 929 */ 930 spin_lock_bh(&bucket->lock); 931 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, 932 elem->key, map->key_size); 933 if (elem_probe && elem_probe == elem) { 934 hlist_del_rcu(&elem->node); 935 sock_map_unref(elem->sk, elem); 936 sock_hash_free_elem(htab, elem); 937 } 938 spin_unlock_bh(&bucket->lock); 939 } 940 941 static long sock_hash_delete_elem(struct bpf_map *map, void *key) 942 { 943 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 944 u32 hash, key_size = map->key_size; 945 struct bpf_shtab_bucket *bucket; 946 struct bpf_shtab_elem *elem; 947 int ret = -ENOENT; 948 949 hash = sock_hash_bucket_hash(key, key_size); 950 bucket = sock_hash_select_bucket(htab, hash); 951 952 spin_lock_bh(&bucket->lock); 953 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 954 if (elem) { 955 hlist_del_rcu(&elem->node); 956 sock_map_unref(elem->sk, elem); 957 sock_hash_free_elem(htab, elem); 958 ret = 0; 959 } 960 spin_unlock_bh(&bucket->lock); 961 return ret; 962 } 963 964 static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, 965 void *key, u32 key_size, 966 u32 hash, struct sock *sk, 967 struct bpf_shtab_elem *old) 968 { 969 struct bpf_shtab_elem *new; 970 971 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 972 if (!old) { 973 atomic_dec(&htab->count); 974 return ERR_PTR(-E2BIG); 975 } 976 } 977 978 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, 979 GFP_ATOMIC | __GFP_NOWARN, 980 htab->map.numa_node); 981 if (!new) { 982 atomic_dec(&htab->count); 983 return ERR_PTR(-ENOMEM); 984 } 985 memcpy(new->key, key, key_size); 986 new->sk = sk; 987 new->hash = hash; 988 return new; 989 } 990 991 static int sock_hash_update_common(struct bpf_map *map, void *key, 992 struct sock *sk, u64 flags) 993 { 994 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 995 u32 key_size = map->key_size, hash; 996 struct bpf_shtab_elem *elem, *elem_new; 997 struct bpf_shtab_bucket *bucket; 998 struct sk_psock_link *link; 999 struct sk_psock *psock; 1000 int ret; 1001 1002 WARN_ON_ONCE(!rcu_read_lock_held()); 1003 if (unlikely(flags > BPF_EXIST)) 1004 return -EINVAL; 1005 1006 link = sk_psock_init_link(); 1007 if (!link) 1008 return -ENOMEM; 1009 1010 ret = sock_map_link(map, sk); 1011 if (ret < 0) 1012 goto out_free; 1013 1014 psock = sk_psock(sk); 1015 WARN_ON_ONCE(!psock); 1016 1017 hash = sock_hash_bucket_hash(key, key_size); 1018 bucket = sock_hash_select_bucket(htab, hash); 1019 1020 spin_lock_bh(&bucket->lock); 1021 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 1022 if (elem && flags == BPF_NOEXIST) { 1023 ret = -EEXIST; 1024 goto out_unlock; 1025 } else if (!elem && flags == BPF_EXIST) { 1026 ret = -ENOENT; 1027 goto out_unlock; 1028 } 1029 1030 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); 1031 if (IS_ERR(elem_new)) { 1032 ret = PTR_ERR(elem_new); 1033 goto out_unlock; 1034 } 1035 1036 sock_map_add_link(psock, link, map, elem_new); 1037 /* Add new element to the head of the list, so that 1038 * concurrent search will find it before old elem. 1039 */ 1040 hlist_add_head_rcu(&elem_new->node, &bucket->head); 1041 if (elem) { 1042 hlist_del_rcu(&elem->node); 1043 sock_map_unref(elem->sk, elem); 1044 sock_hash_free_elem(htab, elem); 1045 } 1046 spin_unlock_bh(&bucket->lock); 1047 return 0; 1048 out_unlock: 1049 spin_unlock_bh(&bucket->lock); 1050 sk_psock_put(sk, psock); 1051 out_free: 1052 sk_psock_free_link(link); 1053 return ret; 1054 } 1055 1056 static int sock_hash_get_next_key(struct bpf_map *map, void *key, 1057 void *key_next) 1058 { 1059 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1060 struct bpf_shtab_elem *elem, *elem_next; 1061 u32 hash, key_size = map->key_size; 1062 struct hlist_head *head; 1063 int i = 0; 1064 1065 if (!key) 1066 goto find_first_elem; 1067 hash = sock_hash_bucket_hash(key, key_size); 1068 head = &sock_hash_select_bucket(htab, hash)->head; 1069 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); 1070 if (!elem) 1071 goto find_first_elem; 1072 1073 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)), 1074 struct bpf_shtab_elem, node); 1075 if (elem_next) { 1076 memcpy(key_next, elem_next->key, key_size); 1077 return 0; 1078 } 1079 1080 i = hash & (htab->buckets_num - 1); 1081 i++; 1082 find_first_elem: 1083 for (; i < htab->buckets_num; i++) { 1084 head = &sock_hash_select_bucket(htab, i)->head; 1085 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)), 1086 struct bpf_shtab_elem, node); 1087 if (elem_next) { 1088 memcpy(key_next, elem_next->key, key_size); 1089 return 0; 1090 } 1091 } 1092 1093 return -ENOENT; 1094 } 1095 1096 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) 1097 { 1098 struct bpf_shtab *htab; 1099 int i, err; 1100 1101 if (attr->max_entries == 0 || 1102 attr->key_size == 0 || 1103 (attr->value_size != sizeof(u32) && 1104 attr->value_size != sizeof(u64)) || 1105 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1106 return ERR_PTR(-EINVAL); 1107 if (attr->key_size > MAX_BPF_STACK) 1108 return ERR_PTR(-E2BIG); 1109 1110 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); 1111 if (!htab) 1112 return ERR_PTR(-ENOMEM); 1113 1114 bpf_map_init_from_attr(&htab->map, attr); 1115 1116 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); 1117 htab->elem_size = sizeof(struct bpf_shtab_elem) + 1118 round_up(htab->map.key_size, 8); 1119 if (htab->buckets_num == 0 || 1120 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { 1121 err = -EINVAL; 1122 goto free_htab; 1123 } 1124 1125 htab->buckets = bpf_map_area_alloc(htab->buckets_num * 1126 sizeof(struct bpf_shtab_bucket), 1127 htab->map.numa_node); 1128 if (!htab->buckets) { 1129 err = -ENOMEM; 1130 goto free_htab; 1131 } 1132 1133 for (i = 0; i < htab->buckets_num; i++) { 1134 INIT_HLIST_HEAD(&htab->buckets[i].head); 1135 spin_lock_init(&htab->buckets[i].lock); 1136 } 1137 1138 return &htab->map; 1139 free_htab: 1140 bpf_map_area_free(htab); 1141 return ERR_PTR(err); 1142 } 1143 1144 static void sock_hash_free(struct bpf_map *map) 1145 { 1146 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1147 struct bpf_shtab_bucket *bucket; 1148 struct hlist_head unlink_list; 1149 struct bpf_shtab_elem *elem; 1150 struct hlist_node *node; 1151 int i; 1152 1153 /* After the sync no updates or deletes will be in-flight so it 1154 * is safe to walk map and remove entries without risking a race 1155 * in EEXIST update case. 1156 */ 1157 synchronize_rcu(); 1158 for (i = 0; i < htab->buckets_num; i++) { 1159 bucket = sock_hash_select_bucket(htab, i); 1160 1161 /* We are racing with sock_hash_delete_from_link to 1162 * enter the spin-lock critical section. Every socket on 1163 * the list is still linked to sockhash. Since link 1164 * exists, psock exists and holds a ref to socket. That 1165 * lets us to grab a socket ref too. 1166 */ 1167 spin_lock_bh(&bucket->lock); 1168 hlist_for_each_entry(elem, &bucket->head, node) 1169 sock_hold(elem->sk); 1170 hlist_move_list(&bucket->head, &unlink_list); 1171 spin_unlock_bh(&bucket->lock); 1172 1173 /* Process removed entries out of atomic context to 1174 * block for socket lock before deleting the psock's 1175 * link to sockhash. 1176 */ 1177 hlist_for_each_entry_safe(elem, node, &unlink_list, node) { 1178 hlist_del(&elem->node); 1179 lock_sock(elem->sk); 1180 rcu_read_lock(); 1181 sock_map_unref(elem->sk, elem); 1182 rcu_read_unlock(); 1183 release_sock(elem->sk); 1184 sock_put(elem->sk); 1185 sock_hash_free_elem(htab, elem); 1186 } 1187 cond_resched(); 1188 } 1189 1190 /* wait for psock readers accessing its map link */ 1191 synchronize_rcu(); 1192 1193 bpf_map_area_free(htab->buckets); 1194 bpf_map_area_free(htab); 1195 } 1196 1197 static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) 1198 { 1199 struct sock *sk; 1200 1201 if (map->value_size != sizeof(u64)) 1202 return ERR_PTR(-ENOSPC); 1203 1204 sk = __sock_hash_lookup_elem(map, key); 1205 if (!sk) 1206 return ERR_PTR(-ENOENT); 1207 1208 __sock_gen_cookie(sk); 1209 return &sk->sk_cookie; 1210 } 1211 1212 static void *sock_hash_lookup(struct bpf_map *map, void *key) 1213 { 1214 struct sock *sk; 1215 1216 sk = __sock_hash_lookup_elem(map, key); 1217 if (!sk) 1218 return NULL; 1219 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 1220 return NULL; 1221 return sk; 1222 } 1223 1224 static void sock_hash_release_progs(struct bpf_map *map) 1225 { 1226 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); 1227 } 1228 1229 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, 1230 struct bpf_map *, map, void *, key, u64, flags) 1231 { 1232 WARN_ON_ONCE(!rcu_read_lock_held()); 1233 1234 if (likely(sock_map_sk_is_suitable(sops->sk) && 1235 sock_map_op_okay(sops))) 1236 return sock_hash_update_common(map, key, sops->sk, flags); 1237 return -EOPNOTSUPP; 1238 } 1239 1240 const struct bpf_func_proto bpf_sock_hash_update_proto = { 1241 .func = bpf_sock_hash_update, 1242 .gpl_only = false, 1243 .pkt_access = true, 1244 .ret_type = RET_INTEGER, 1245 .arg1_type = ARG_PTR_TO_CTX, 1246 .arg2_type = ARG_CONST_MAP_PTR, 1247 .arg3_type = ARG_PTR_TO_MAP_KEY, 1248 .arg4_type = ARG_ANYTHING, 1249 }; 1250 1251 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, 1252 struct bpf_map *, map, void *, key, u64, flags) 1253 { 1254 struct sock *sk; 1255 1256 if (unlikely(flags & ~(BPF_F_INGRESS))) 1257 return SK_DROP; 1258 1259 sk = __sock_hash_lookup_elem(map, key); 1260 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1261 return SK_DROP; 1262 if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk)) 1263 return SK_DROP; 1264 1265 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 1266 return SK_PASS; 1267 } 1268 1269 const struct bpf_func_proto bpf_sk_redirect_hash_proto = { 1270 .func = bpf_sk_redirect_hash, 1271 .gpl_only = false, 1272 .ret_type = RET_INTEGER, 1273 .arg1_type = ARG_PTR_TO_CTX, 1274 .arg2_type = ARG_CONST_MAP_PTR, 1275 .arg3_type = ARG_PTR_TO_MAP_KEY, 1276 .arg4_type = ARG_ANYTHING, 1277 }; 1278 1279 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, 1280 struct bpf_map *, map, void *, key, u64, flags) 1281 { 1282 struct sock *sk; 1283 1284 if (unlikely(flags & ~(BPF_F_INGRESS))) 1285 return SK_DROP; 1286 1287 sk = __sock_hash_lookup_elem(map, key); 1288 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1289 return SK_DROP; 1290 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) 1291 return SK_DROP; 1292 if (sk_is_vsock(sk)) 1293 return SK_DROP; 1294 1295 msg->flags = flags; 1296 msg->sk_redir = sk; 1297 return SK_PASS; 1298 } 1299 1300 const struct bpf_func_proto bpf_msg_redirect_hash_proto = { 1301 .func = bpf_msg_redirect_hash, 1302 .gpl_only = false, 1303 .ret_type = RET_INTEGER, 1304 .arg1_type = ARG_PTR_TO_CTX, 1305 .arg2_type = ARG_CONST_MAP_PTR, 1306 .arg3_type = ARG_PTR_TO_MAP_KEY, 1307 .arg4_type = ARG_ANYTHING, 1308 }; 1309 1310 struct sock_hash_seq_info { 1311 struct bpf_map *map; 1312 struct bpf_shtab *htab; 1313 u32 bucket_id; 1314 }; 1315 1316 static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, 1317 struct bpf_shtab_elem *prev_elem) 1318 { 1319 const struct bpf_shtab *htab = info->htab; 1320 struct bpf_shtab_bucket *bucket; 1321 struct bpf_shtab_elem *elem; 1322 struct hlist_node *node; 1323 1324 /* try to find next elem in the same bucket */ 1325 if (prev_elem) { 1326 node = rcu_dereference(hlist_next_rcu(&prev_elem->node)); 1327 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1328 if (elem) 1329 return elem; 1330 1331 /* no more elements, continue in the next bucket */ 1332 info->bucket_id++; 1333 } 1334 1335 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) { 1336 bucket = &htab->buckets[info->bucket_id]; 1337 node = rcu_dereference(hlist_first_rcu(&bucket->head)); 1338 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1339 if (elem) 1340 return elem; 1341 } 1342 1343 return NULL; 1344 } 1345 1346 static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) 1347 __acquires(rcu) 1348 { 1349 struct sock_hash_seq_info *info = seq->private; 1350 1351 if (*pos == 0) 1352 ++*pos; 1353 1354 /* pairs with sock_hash_seq_stop */ 1355 rcu_read_lock(); 1356 return sock_hash_seq_find_next(info, NULL); 1357 } 1358 1359 static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1360 __must_hold(rcu) 1361 { 1362 struct sock_hash_seq_info *info = seq->private; 1363 1364 ++*pos; 1365 return sock_hash_seq_find_next(info, v); 1366 } 1367 1368 static int sock_hash_seq_show(struct seq_file *seq, void *v) 1369 __must_hold(rcu) 1370 { 1371 struct sock_hash_seq_info *info = seq->private; 1372 struct bpf_iter__sockmap ctx = {}; 1373 struct bpf_shtab_elem *elem = v; 1374 struct bpf_iter_meta meta; 1375 struct bpf_prog *prog; 1376 1377 meta.seq = seq; 1378 prog = bpf_iter_get_info(&meta, !elem); 1379 if (!prog) 1380 return 0; 1381 1382 ctx.meta = &meta; 1383 ctx.map = info->map; 1384 if (elem) { 1385 ctx.key = elem->key; 1386 ctx.sk = elem->sk; 1387 } 1388 1389 return bpf_iter_run_prog(prog, &ctx); 1390 } 1391 1392 static void sock_hash_seq_stop(struct seq_file *seq, void *v) 1393 __releases(rcu) 1394 { 1395 if (!v) 1396 (void)sock_hash_seq_show(seq, NULL); 1397 1398 /* pairs with sock_hash_seq_start */ 1399 rcu_read_unlock(); 1400 } 1401 1402 static const struct seq_operations sock_hash_seq_ops = { 1403 .start = sock_hash_seq_start, 1404 .next = sock_hash_seq_next, 1405 .stop = sock_hash_seq_stop, 1406 .show = sock_hash_seq_show, 1407 }; 1408 1409 static int sock_hash_init_seq_private(void *priv_data, 1410 struct bpf_iter_aux_info *aux) 1411 { 1412 struct sock_hash_seq_info *info = priv_data; 1413 1414 bpf_map_inc_with_uref(aux->map); 1415 info->map = aux->map; 1416 info->htab = container_of(aux->map, struct bpf_shtab, map); 1417 return 0; 1418 } 1419 1420 static void sock_hash_fini_seq_private(void *priv_data) 1421 { 1422 struct sock_hash_seq_info *info = priv_data; 1423 1424 bpf_map_put_with_uref(info->map); 1425 } 1426 1427 static u64 sock_hash_mem_usage(const struct bpf_map *map) 1428 { 1429 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1430 u64 usage = sizeof(*htab); 1431 1432 usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket); 1433 usage += atomic_read(&htab->count) * (u64)htab->elem_size; 1434 return usage; 1435 } 1436 1437 static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { 1438 .seq_ops = &sock_hash_seq_ops, 1439 .init_seq_private = sock_hash_init_seq_private, 1440 .fini_seq_private = sock_hash_fini_seq_private, 1441 .seq_priv_size = sizeof(struct sock_hash_seq_info), 1442 }; 1443 1444 BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab) 1445 const struct bpf_map_ops sock_hash_ops = { 1446 .map_meta_equal = bpf_map_meta_equal, 1447 .map_alloc = sock_hash_alloc, 1448 .map_free = sock_hash_free, 1449 .map_get_next_key = sock_hash_get_next_key, 1450 .map_update_elem = sock_map_update_elem, 1451 .map_delete_elem = sock_hash_delete_elem, 1452 .map_lookup_elem = sock_hash_lookup, 1453 .map_lookup_elem_sys_only = sock_hash_lookup_sys, 1454 .map_release_uref = sock_hash_release_progs, 1455 .map_check_btf = map_check_no_btf, 1456 .map_mem_usage = sock_hash_mem_usage, 1457 .map_btf_id = &sock_hash_map_btf_ids[0], 1458 .iter_seq_info = &sock_hash_iter_seq_info, 1459 }; 1460 1461 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) 1462 { 1463 switch (map->map_type) { 1464 case BPF_MAP_TYPE_SOCKMAP: 1465 return &container_of(map, struct bpf_stab, map)->progs; 1466 case BPF_MAP_TYPE_SOCKHASH: 1467 return &container_of(map, struct bpf_shtab, map)->progs; 1468 default: 1469 break; 1470 } 1471 1472 return NULL; 1473 } 1474 1475 static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog, 1476 struct bpf_link ***plink, u32 which) 1477 { 1478 struct sk_psock_progs *progs = sock_map_progs(map); 1479 struct bpf_prog **cur_pprog; 1480 struct bpf_link **cur_plink; 1481 1482 if (!progs) 1483 return -EOPNOTSUPP; 1484 1485 switch (which) { 1486 case BPF_SK_MSG_VERDICT: 1487 cur_pprog = &progs->msg_parser; 1488 cur_plink = &progs->msg_parser_link; 1489 break; 1490 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 1491 case BPF_SK_SKB_STREAM_PARSER: 1492 cur_pprog = &progs->stream_parser; 1493 cur_plink = &progs->stream_parser_link; 1494 break; 1495 #endif 1496 case BPF_SK_SKB_STREAM_VERDICT: 1497 if (progs->skb_verdict) 1498 return -EBUSY; 1499 cur_pprog = &progs->stream_verdict; 1500 cur_plink = &progs->stream_verdict_link; 1501 break; 1502 case BPF_SK_SKB_VERDICT: 1503 if (progs->stream_verdict) 1504 return -EBUSY; 1505 cur_pprog = &progs->skb_verdict; 1506 cur_plink = &progs->skb_verdict_link; 1507 break; 1508 default: 1509 return -EOPNOTSUPP; 1510 } 1511 1512 *pprog = cur_pprog; 1513 if (plink) 1514 *plink = cur_plink; 1515 return 0; 1516 } 1517 1518 /* Handle the following four cases: 1519 * prog_attach: prog != NULL, old == NULL, link == NULL 1520 * prog_detach: prog == NULL, old != NULL, link == NULL 1521 * link_attach: prog != NULL, old == NULL, link != NULL 1522 * link_detach: prog == NULL, old != NULL, link != NULL 1523 */ 1524 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1525 struct bpf_prog *old, struct bpf_link *link, 1526 u32 which) 1527 { 1528 struct bpf_prog **pprog; 1529 struct bpf_link **plink; 1530 int ret; 1531 1532 ret = sock_map_prog_link_lookup(map, &pprog, &plink, which); 1533 if (ret) 1534 return ret; 1535 1536 /* for prog_attach/prog_detach/link_attach, return error if a bpf_link 1537 * exists for that prog. 1538 */ 1539 if ((!link || prog) && *plink) 1540 return -EBUSY; 1541 1542 if (old) { 1543 ret = psock_replace_prog(pprog, prog, old); 1544 if (!ret) 1545 *plink = NULL; 1546 } else { 1547 psock_set_prog(pprog, prog); 1548 if (link) 1549 *plink = link; 1550 } 1551 1552 return ret; 1553 } 1554 1555 int sock_map_bpf_prog_query(const union bpf_attr *attr, 1556 union bpf_attr __user *uattr) 1557 { 1558 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 1559 u32 prog_cnt = 0, flags = 0; 1560 struct bpf_prog **pprog; 1561 struct bpf_prog *prog; 1562 struct bpf_map *map; 1563 u32 id = 0; 1564 int ret; 1565 1566 if (attr->query.query_flags) 1567 return -EINVAL; 1568 1569 CLASS(fd, f)(attr->target_fd); 1570 map = __bpf_map_get(f); 1571 if (IS_ERR(map)) 1572 return PTR_ERR(map); 1573 1574 rcu_read_lock(); 1575 1576 ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type); 1577 if (ret) 1578 goto end; 1579 1580 prog = *pprog; 1581 prog_cnt = !prog ? 0 : 1; 1582 1583 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) 1584 goto end; 1585 1586 /* we do not hold the refcnt, the bpf prog may be released 1587 * asynchronously and the id would be set to 0. 1588 */ 1589 id = data_race(prog->aux->id); 1590 if (id == 0) 1591 prog_cnt = 0; 1592 1593 end: 1594 rcu_read_unlock(); 1595 1596 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) || 1597 (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) || 1598 copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) 1599 ret = -EFAULT; 1600 1601 return ret; 1602 } 1603 1604 static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) 1605 { 1606 switch (link->map->map_type) { 1607 case BPF_MAP_TYPE_SOCKMAP: 1608 return sock_map_delete_from_link(link->map, sk, 1609 link->link_raw); 1610 case BPF_MAP_TYPE_SOCKHASH: 1611 return sock_hash_delete_from_link(link->map, sk, 1612 link->link_raw); 1613 default: 1614 break; 1615 } 1616 } 1617 1618 static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) 1619 { 1620 struct sk_psock_link *link; 1621 1622 while ((link = sk_psock_link_pop(psock))) { 1623 sock_map_unlink(sk, link); 1624 sk_psock_free_link(link); 1625 } 1626 } 1627 1628 void sock_map_unhash(struct sock *sk) 1629 { 1630 void (*saved_unhash)(struct sock *sk); 1631 struct sk_psock *psock; 1632 1633 rcu_read_lock(); 1634 psock = sk_psock(sk); 1635 if (unlikely(!psock)) { 1636 rcu_read_unlock(); 1637 saved_unhash = READ_ONCE(sk->sk_prot)->unhash; 1638 } else { 1639 saved_unhash = psock->saved_unhash; 1640 sock_map_remove_links(sk, psock); 1641 rcu_read_unlock(); 1642 } 1643 if (WARN_ON_ONCE(saved_unhash == sock_map_unhash)) 1644 return; 1645 if (saved_unhash) 1646 saved_unhash(sk); 1647 } 1648 EXPORT_SYMBOL_GPL(sock_map_unhash); 1649 1650 void sock_map_destroy(struct sock *sk) 1651 { 1652 void (*saved_destroy)(struct sock *sk); 1653 struct sk_psock *psock; 1654 1655 rcu_read_lock(); 1656 psock = sk_psock_get(sk); 1657 if (unlikely(!psock)) { 1658 rcu_read_unlock(); 1659 saved_destroy = READ_ONCE(sk->sk_prot)->destroy; 1660 } else { 1661 saved_destroy = psock->saved_destroy; 1662 sock_map_remove_links(sk, psock); 1663 rcu_read_unlock(); 1664 sk_psock_stop(psock); 1665 sk_psock_put(sk, psock); 1666 } 1667 if (WARN_ON_ONCE(saved_destroy == sock_map_destroy)) 1668 return; 1669 if (saved_destroy) 1670 saved_destroy(sk); 1671 } 1672 EXPORT_SYMBOL_GPL(sock_map_destroy); 1673 1674 void sock_map_close(struct sock *sk, long timeout) 1675 { 1676 void (*saved_close)(struct sock *sk, long timeout); 1677 struct sk_psock *psock; 1678 1679 lock_sock(sk); 1680 rcu_read_lock(); 1681 psock = sk_psock(sk); 1682 if (likely(psock)) { 1683 saved_close = psock->saved_close; 1684 sock_map_remove_links(sk, psock); 1685 psock = sk_psock_get(sk); 1686 if (unlikely(!psock)) 1687 goto no_psock; 1688 rcu_read_unlock(); 1689 sk_psock_stop(psock); 1690 release_sock(sk); 1691 cancel_delayed_work_sync(&psock->work); 1692 sk_psock_put(sk, psock); 1693 } else { 1694 saved_close = READ_ONCE(sk->sk_prot)->close; 1695 no_psock: 1696 rcu_read_unlock(); 1697 release_sock(sk); 1698 } 1699 1700 /* Make sure we do not recurse. This is a bug. 1701 * Leak the socket instead of crashing on a stack overflow. 1702 */ 1703 if (WARN_ON_ONCE(saved_close == sock_map_close)) 1704 return; 1705 saved_close(sk, timeout); 1706 } 1707 EXPORT_SYMBOL_GPL(sock_map_close); 1708 1709 struct sockmap_link { 1710 struct bpf_link link; 1711 struct bpf_map *map; 1712 enum bpf_attach_type attach_type; 1713 }; 1714 1715 static void sock_map_link_release(struct bpf_link *link) 1716 { 1717 struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1718 1719 mutex_lock(&sockmap_mutex); 1720 if (!sockmap_link->map) 1721 goto out; 1722 1723 WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link, 1724 sockmap_link->attach_type)); 1725 1726 bpf_map_put_with_uref(sockmap_link->map); 1727 sockmap_link->map = NULL; 1728 out: 1729 mutex_unlock(&sockmap_mutex); 1730 } 1731 1732 static int sock_map_link_detach(struct bpf_link *link) 1733 { 1734 sock_map_link_release(link); 1735 return 0; 1736 } 1737 1738 static void sock_map_link_dealloc(struct bpf_link *link) 1739 { 1740 kfree(link); 1741 } 1742 1743 /* Handle the following two cases: 1744 * case 1: link != NULL, prog != NULL, old != NULL 1745 * case 2: link != NULL, prog != NULL, old == NULL 1746 */ 1747 static int sock_map_link_update_prog(struct bpf_link *link, 1748 struct bpf_prog *prog, 1749 struct bpf_prog *old) 1750 { 1751 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1752 struct bpf_prog **pprog, *old_link_prog; 1753 struct bpf_link **plink; 1754 int ret = 0; 1755 1756 mutex_lock(&sockmap_mutex); 1757 1758 /* If old prog is not NULL, ensure old prog is the same as link->prog. */ 1759 if (old && link->prog != old) { 1760 ret = -EPERM; 1761 goto out; 1762 } 1763 /* Ensure link->prog has the same type/attach_type as the new prog. */ 1764 if (link->prog->type != prog->type || 1765 link->prog->expected_attach_type != prog->expected_attach_type) { 1766 ret = -EINVAL; 1767 goto out; 1768 } 1769 if (!sockmap_link->map) { 1770 ret = -ENOLINK; 1771 goto out; 1772 } 1773 1774 ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink, 1775 sockmap_link->attach_type); 1776 if (ret) 1777 goto out; 1778 1779 /* return error if the stored bpf_link does not match the incoming bpf_link. */ 1780 if (link != *plink) { 1781 ret = -EBUSY; 1782 goto out; 1783 } 1784 1785 if (old) { 1786 ret = psock_replace_prog(pprog, prog, old); 1787 if (ret) 1788 goto out; 1789 } else { 1790 psock_set_prog(pprog, prog); 1791 } 1792 1793 bpf_prog_inc(prog); 1794 old_link_prog = xchg(&link->prog, prog); 1795 bpf_prog_put(old_link_prog); 1796 1797 out: 1798 mutex_unlock(&sockmap_mutex); 1799 return ret; 1800 } 1801 1802 static u32 sock_map_link_get_map_id(const struct sockmap_link *sockmap_link) 1803 { 1804 u32 map_id = 0; 1805 1806 mutex_lock(&sockmap_mutex); 1807 if (sockmap_link->map) 1808 map_id = sockmap_link->map->id; 1809 mutex_unlock(&sockmap_mutex); 1810 return map_id; 1811 } 1812 1813 static int sock_map_link_fill_info(const struct bpf_link *link, 1814 struct bpf_link_info *info) 1815 { 1816 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1817 u32 map_id = sock_map_link_get_map_id(sockmap_link); 1818 1819 info->sockmap.map_id = map_id; 1820 info->sockmap.attach_type = sockmap_link->attach_type; 1821 return 0; 1822 } 1823 1824 static void sock_map_link_show_fdinfo(const struct bpf_link *link, 1825 struct seq_file *seq) 1826 { 1827 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1828 u32 map_id = sock_map_link_get_map_id(sockmap_link); 1829 1830 seq_printf(seq, "map_id:\t%u\n", map_id); 1831 seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type); 1832 } 1833 1834 static const struct bpf_link_ops sock_map_link_ops = { 1835 .release = sock_map_link_release, 1836 .dealloc = sock_map_link_dealloc, 1837 .detach = sock_map_link_detach, 1838 .update_prog = sock_map_link_update_prog, 1839 .fill_link_info = sock_map_link_fill_info, 1840 .show_fdinfo = sock_map_link_show_fdinfo, 1841 }; 1842 1843 int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog) 1844 { 1845 struct bpf_link_primer link_primer; 1846 struct sockmap_link *sockmap_link; 1847 enum bpf_attach_type attach_type; 1848 struct bpf_map *map; 1849 int ret; 1850 1851 if (attr->link_create.flags) 1852 return -EINVAL; 1853 1854 map = bpf_map_get_with_uref(attr->link_create.target_fd); 1855 if (IS_ERR(map)) 1856 return PTR_ERR(map); 1857 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) { 1858 ret = -EINVAL; 1859 goto out; 1860 } 1861 1862 sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER); 1863 if (!sockmap_link) { 1864 ret = -ENOMEM; 1865 goto out; 1866 } 1867 1868 attach_type = attr->link_create.attach_type; 1869 bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog); 1870 sockmap_link->map = map; 1871 sockmap_link->attach_type = attach_type; 1872 1873 ret = bpf_link_prime(&sockmap_link->link, &link_primer); 1874 if (ret) { 1875 kfree(sockmap_link); 1876 goto out; 1877 } 1878 1879 mutex_lock(&sockmap_mutex); 1880 ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type); 1881 mutex_unlock(&sockmap_mutex); 1882 if (ret) { 1883 bpf_link_cleanup(&link_primer); 1884 goto out; 1885 } 1886 1887 /* Increase refcnt for the prog since when old prog is replaced with 1888 * psock_replace_prog() and psock_set_prog() its refcnt will be decreased. 1889 * 1890 * Actually, we do not need to increase refcnt for the prog since bpf_link 1891 * will hold a reference. But in order to have less complexity w.r.t. 1892 * replacing/setting prog, let us increase the refcnt to make things simpler. 1893 */ 1894 bpf_prog_inc(prog); 1895 1896 return bpf_link_settle(&link_primer); 1897 1898 out: 1899 bpf_map_put_with_uref(map); 1900 return ret; 1901 } 1902 1903 static int sock_map_iter_attach_target(struct bpf_prog *prog, 1904 union bpf_iter_link_info *linfo, 1905 struct bpf_iter_aux_info *aux) 1906 { 1907 struct bpf_map *map; 1908 int err = -EINVAL; 1909 1910 if (!linfo->map.map_fd) 1911 return -EBADF; 1912 1913 map = bpf_map_get_with_uref(linfo->map.map_fd); 1914 if (IS_ERR(map)) 1915 return PTR_ERR(map); 1916 1917 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && 1918 map->map_type != BPF_MAP_TYPE_SOCKHASH) 1919 goto put_map; 1920 1921 if (prog->aux->max_rdonly_access > map->key_size) { 1922 err = -EACCES; 1923 goto put_map; 1924 } 1925 1926 aux->map = map; 1927 return 0; 1928 1929 put_map: 1930 bpf_map_put_with_uref(map); 1931 return err; 1932 } 1933 1934 static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux) 1935 { 1936 bpf_map_put_with_uref(aux->map); 1937 } 1938 1939 static struct bpf_iter_reg sock_map_iter_reg = { 1940 .target = "sockmap", 1941 .attach_target = sock_map_iter_attach_target, 1942 .detach_target = sock_map_iter_detach_target, 1943 .show_fdinfo = bpf_iter_map_show_fdinfo, 1944 .fill_link_info = bpf_iter_map_fill_link_info, 1945 .ctx_arg_info_size = 2, 1946 .ctx_arg_info = { 1947 { offsetof(struct bpf_iter__sockmap, key), 1948 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, 1949 { offsetof(struct bpf_iter__sockmap, sk), 1950 PTR_TO_BTF_ID_OR_NULL }, 1951 }, 1952 }; 1953 1954 static int __init bpf_sockmap_iter_init(void) 1955 { 1956 sock_map_iter_reg.ctx_arg_info[1].btf_id = 1957 btf_sock_ids[BTF_SOCK_TYPE_SOCK]; 1958 return bpf_iter_reg_target(&sock_map_iter_reg); 1959 } 1960 late_initcall(bpf_sockmap_iter_init); 1961