1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/bpf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/filter.h> 7 #include <linux/errno.h> 8 #include <linux/file.h> 9 #include <linux/net.h> 10 #include <linux/workqueue.h> 11 #include <linux/skmsg.h> 12 #include <linux/list.h> 13 #include <linux/jhash.h> 14 #include <linux/sock_diag.h> 15 #include <net/udp.h> 16 17 struct bpf_stab { 18 struct bpf_map map; 19 struct sock **sks; 20 struct sk_psock_progs progs; 21 spinlock_t lock; 22 }; 23 24 #define SOCK_CREATE_FLAG_MASK \ 25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 26 27 /* This mutex is used to 28 * - protect race between prog/link attach/detach and link prog update, and 29 * - protect race between releasing and accessing map in bpf_link. 30 * A single global mutex lock is used since it is expected contention is low. 31 */ 32 static DEFINE_MUTEX(sockmap_mutex); 33 34 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 35 struct bpf_prog *old, struct bpf_link *link, 36 u32 which); 37 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); 38 39 static struct bpf_map *sock_map_alloc(union bpf_attr *attr) 40 { 41 struct bpf_stab *stab; 42 43 if (attr->max_entries == 0 || 44 attr->key_size != 4 || 45 (attr->value_size != sizeof(u32) && 46 attr->value_size != sizeof(u64)) || 47 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 48 return ERR_PTR(-EINVAL); 49 50 stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE); 51 if (!stab) 52 return ERR_PTR(-ENOMEM); 53 54 bpf_map_init_from_attr(&stab->map, attr); 55 spin_lock_init(&stab->lock); 56 57 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * 58 sizeof(struct sock *), 59 stab->map.numa_node); 60 if (!stab->sks) { 61 bpf_map_area_free(stab); 62 return ERR_PTR(-ENOMEM); 63 } 64 65 return &stab->map; 66 } 67 68 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) 69 { 70 u32 ufd = attr->target_fd; 71 struct bpf_map *map; 72 struct fd f; 73 int ret; 74 75 if (attr->attach_flags || attr->replace_bpf_fd) 76 return -EINVAL; 77 78 f = fdget(ufd); 79 map = __bpf_map_get(f); 80 if (IS_ERR(map)) 81 return PTR_ERR(map); 82 mutex_lock(&sockmap_mutex); 83 ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type); 84 mutex_unlock(&sockmap_mutex); 85 fdput(f); 86 return ret; 87 } 88 89 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 90 { 91 u32 ufd = attr->target_fd; 92 struct bpf_prog *prog; 93 struct bpf_map *map; 94 struct fd f; 95 int ret; 96 97 if (attr->attach_flags || attr->replace_bpf_fd) 98 return -EINVAL; 99 100 f = fdget(ufd); 101 map = __bpf_map_get(f); 102 if (IS_ERR(map)) 103 return PTR_ERR(map); 104 105 prog = bpf_prog_get(attr->attach_bpf_fd); 106 if (IS_ERR(prog)) { 107 ret = PTR_ERR(prog); 108 goto put_map; 109 } 110 111 if (prog->type != ptype) { 112 ret = -EINVAL; 113 goto put_prog; 114 } 115 116 mutex_lock(&sockmap_mutex); 117 ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type); 118 mutex_unlock(&sockmap_mutex); 119 put_prog: 120 bpf_prog_put(prog); 121 put_map: 122 fdput(f); 123 return ret; 124 } 125 126 static void sock_map_sk_acquire(struct sock *sk) 127 __acquires(&sk->sk_lock.slock) 128 { 129 lock_sock(sk); 130 rcu_read_lock(); 131 } 132 133 static void sock_map_sk_release(struct sock *sk) 134 __releases(&sk->sk_lock.slock) 135 { 136 rcu_read_unlock(); 137 release_sock(sk); 138 } 139 140 static void sock_map_add_link(struct sk_psock *psock, 141 struct sk_psock_link *link, 142 struct bpf_map *map, void *link_raw) 143 { 144 link->link_raw = link_raw; 145 link->map = map; 146 spin_lock_bh(&psock->link_lock); 147 list_add_tail(&link->list, &psock->link); 148 spin_unlock_bh(&psock->link_lock); 149 } 150 151 static void sock_map_del_link(struct sock *sk, 152 struct sk_psock *psock, void *link_raw) 153 { 154 bool strp_stop = false, verdict_stop = false; 155 struct sk_psock_link *link, *tmp; 156 157 spin_lock_bh(&psock->link_lock); 158 list_for_each_entry_safe(link, tmp, &psock->link, list) { 159 if (link->link_raw == link_raw) { 160 struct bpf_map *map = link->map; 161 struct sk_psock_progs *progs = sock_map_progs(map); 162 163 if (psock->saved_data_ready && progs->stream_parser) 164 strp_stop = true; 165 if (psock->saved_data_ready && progs->stream_verdict) 166 verdict_stop = true; 167 if (psock->saved_data_ready && progs->skb_verdict) 168 verdict_stop = true; 169 list_del(&link->list); 170 sk_psock_free_link(link); 171 } 172 } 173 spin_unlock_bh(&psock->link_lock); 174 if (strp_stop || verdict_stop) { 175 write_lock_bh(&sk->sk_callback_lock); 176 if (strp_stop) 177 sk_psock_stop_strp(sk, psock); 178 if (verdict_stop) 179 sk_psock_stop_verdict(sk, psock); 180 181 if (psock->psock_update_sk_prot) 182 psock->psock_update_sk_prot(sk, psock, false); 183 write_unlock_bh(&sk->sk_callback_lock); 184 } 185 } 186 187 static void sock_map_unref(struct sock *sk, void *link_raw) 188 { 189 struct sk_psock *psock = sk_psock(sk); 190 191 if (likely(psock)) { 192 sock_map_del_link(sk, psock, link_raw); 193 sk_psock_put(sk, psock); 194 } 195 } 196 197 static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) 198 { 199 if (!sk->sk_prot->psock_update_sk_prot) 200 return -EINVAL; 201 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot; 202 return sk->sk_prot->psock_update_sk_prot(sk, psock, false); 203 } 204 205 static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) 206 { 207 struct sk_psock *psock; 208 209 rcu_read_lock(); 210 psock = sk_psock(sk); 211 if (psock) { 212 if (sk->sk_prot->close != sock_map_close) { 213 psock = ERR_PTR(-EBUSY); 214 goto out; 215 } 216 217 if (!refcount_inc_not_zero(&psock->refcnt)) 218 psock = ERR_PTR(-EBUSY); 219 } 220 out: 221 rcu_read_unlock(); 222 return psock; 223 } 224 225 static int sock_map_link(struct bpf_map *map, struct sock *sk) 226 { 227 struct sk_psock_progs *progs = sock_map_progs(map); 228 struct bpf_prog *stream_verdict = NULL; 229 struct bpf_prog *stream_parser = NULL; 230 struct bpf_prog *skb_verdict = NULL; 231 struct bpf_prog *msg_parser = NULL; 232 struct sk_psock *psock; 233 int ret; 234 235 stream_verdict = READ_ONCE(progs->stream_verdict); 236 if (stream_verdict) { 237 stream_verdict = bpf_prog_inc_not_zero(stream_verdict); 238 if (IS_ERR(stream_verdict)) 239 return PTR_ERR(stream_verdict); 240 } 241 242 stream_parser = READ_ONCE(progs->stream_parser); 243 if (stream_parser) { 244 stream_parser = bpf_prog_inc_not_zero(stream_parser); 245 if (IS_ERR(stream_parser)) { 246 ret = PTR_ERR(stream_parser); 247 goto out_put_stream_verdict; 248 } 249 } 250 251 msg_parser = READ_ONCE(progs->msg_parser); 252 if (msg_parser) { 253 msg_parser = bpf_prog_inc_not_zero(msg_parser); 254 if (IS_ERR(msg_parser)) { 255 ret = PTR_ERR(msg_parser); 256 goto out_put_stream_parser; 257 } 258 } 259 260 skb_verdict = READ_ONCE(progs->skb_verdict); 261 if (skb_verdict) { 262 skb_verdict = bpf_prog_inc_not_zero(skb_verdict); 263 if (IS_ERR(skb_verdict)) { 264 ret = PTR_ERR(skb_verdict); 265 goto out_put_msg_parser; 266 } 267 } 268 269 psock = sock_map_psock_get_checked(sk); 270 if (IS_ERR(psock)) { 271 ret = PTR_ERR(psock); 272 goto out_progs; 273 } 274 275 if (psock) { 276 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || 277 (stream_parser && READ_ONCE(psock->progs.stream_parser)) || 278 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) || 279 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) || 280 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) || 281 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) { 282 sk_psock_put(sk, psock); 283 ret = -EBUSY; 284 goto out_progs; 285 } 286 } else { 287 psock = sk_psock_init(sk, map->numa_node); 288 if (IS_ERR(psock)) { 289 ret = PTR_ERR(psock); 290 goto out_progs; 291 } 292 } 293 294 if (msg_parser) 295 psock_set_prog(&psock->progs.msg_parser, msg_parser); 296 if (stream_parser) 297 psock_set_prog(&psock->progs.stream_parser, stream_parser); 298 if (stream_verdict) 299 psock_set_prog(&psock->progs.stream_verdict, stream_verdict); 300 if (skb_verdict) 301 psock_set_prog(&psock->progs.skb_verdict, skb_verdict); 302 303 /* msg_* and stream_* programs references tracked in psock after this 304 * point. Reference dec and cleanup will occur through psock destructor 305 */ 306 ret = sock_map_init_proto(sk, psock); 307 if (ret < 0) { 308 sk_psock_put(sk, psock); 309 goto out; 310 } 311 312 write_lock_bh(&sk->sk_callback_lock); 313 if (stream_parser && stream_verdict && !psock->saved_data_ready) { 314 ret = sk_psock_init_strp(sk, psock); 315 if (ret) { 316 write_unlock_bh(&sk->sk_callback_lock); 317 sk_psock_put(sk, psock); 318 goto out; 319 } 320 sk_psock_start_strp(sk, psock); 321 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { 322 sk_psock_start_verdict(sk,psock); 323 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { 324 sk_psock_start_verdict(sk, psock); 325 } 326 write_unlock_bh(&sk->sk_callback_lock); 327 return 0; 328 out_progs: 329 if (skb_verdict) 330 bpf_prog_put(skb_verdict); 331 out_put_msg_parser: 332 if (msg_parser) 333 bpf_prog_put(msg_parser); 334 out_put_stream_parser: 335 if (stream_parser) 336 bpf_prog_put(stream_parser); 337 out_put_stream_verdict: 338 if (stream_verdict) 339 bpf_prog_put(stream_verdict); 340 out: 341 return ret; 342 } 343 344 static void sock_map_free(struct bpf_map *map) 345 { 346 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 347 int i; 348 349 /* After the sync no updates or deletes will be in-flight so it 350 * is safe to walk map and remove entries without risking a race 351 * in EEXIST update case. 352 */ 353 synchronize_rcu(); 354 for (i = 0; i < stab->map.max_entries; i++) { 355 struct sock **psk = &stab->sks[i]; 356 struct sock *sk; 357 358 sk = xchg(psk, NULL); 359 if (sk) { 360 sock_hold(sk); 361 lock_sock(sk); 362 rcu_read_lock(); 363 sock_map_unref(sk, psk); 364 rcu_read_unlock(); 365 release_sock(sk); 366 sock_put(sk); 367 } 368 } 369 370 /* wait for psock readers accessing its map link */ 371 synchronize_rcu(); 372 373 bpf_map_area_free(stab->sks); 374 bpf_map_area_free(stab); 375 } 376 377 static void sock_map_release_progs(struct bpf_map *map) 378 { 379 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); 380 } 381 382 static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 383 { 384 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 385 386 WARN_ON_ONCE(!rcu_read_lock_held()); 387 388 if (unlikely(key >= map->max_entries)) 389 return NULL; 390 return READ_ONCE(stab->sks[key]); 391 } 392 393 static void *sock_map_lookup(struct bpf_map *map, void *key) 394 { 395 struct sock *sk; 396 397 sk = __sock_map_lookup_elem(map, *(u32 *)key); 398 if (!sk) 399 return NULL; 400 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 401 return NULL; 402 return sk; 403 } 404 405 static void *sock_map_lookup_sys(struct bpf_map *map, void *key) 406 { 407 struct sock *sk; 408 409 if (map->value_size != sizeof(u64)) 410 return ERR_PTR(-ENOSPC); 411 412 sk = __sock_map_lookup_elem(map, *(u32 *)key); 413 if (!sk) 414 return ERR_PTR(-ENOENT); 415 416 __sock_gen_cookie(sk); 417 return &sk->sk_cookie; 418 } 419 420 static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, 421 struct sock **psk) 422 { 423 struct sock *sk; 424 int err = 0; 425 426 spin_lock_bh(&stab->lock); 427 sk = *psk; 428 if (!sk_test || sk_test == sk) 429 sk = xchg(psk, NULL); 430 431 if (likely(sk)) 432 sock_map_unref(sk, psk); 433 else 434 err = -EINVAL; 435 436 spin_unlock_bh(&stab->lock); 437 return err; 438 } 439 440 static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 441 void *link_raw) 442 { 443 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 444 445 __sock_map_delete(stab, sk, link_raw); 446 } 447 448 static long sock_map_delete_elem(struct bpf_map *map, void *key) 449 { 450 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 451 u32 i = *(u32 *)key; 452 struct sock **psk; 453 454 if (unlikely(i >= map->max_entries)) 455 return -EINVAL; 456 457 psk = &stab->sks[i]; 458 return __sock_map_delete(stab, NULL, psk); 459 } 460 461 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) 462 { 463 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 464 u32 i = key ? *(u32 *)key : U32_MAX; 465 u32 *key_next = next; 466 467 if (i == stab->map.max_entries - 1) 468 return -ENOENT; 469 if (i >= stab->map.max_entries) 470 *key_next = 0; 471 else 472 *key_next = i + 1; 473 return 0; 474 } 475 476 static int sock_map_update_common(struct bpf_map *map, u32 idx, 477 struct sock *sk, u64 flags) 478 { 479 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 480 struct sk_psock_link *link; 481 struct sk_psock *psock; 482 struct sock *osk; 483 int ret; 484 485 WARN_ON_ONCE(!rcu_read_lock_held()); 486 if (unlikely(flags > BPF_EXIST)) 487 return -EINVAL; 488 if (unlikely(idx >= map->max_entries)) 489 return -E2BIG; 490 491 link = sk_psock_init_link(); 492 if (!link) 493 return -ENOMEM; 494 495 ret = sock_map_link(map, sk); 496 if (ret < 0) 497 goto out_free; 498 499 psock = sk_psock(sk); 500 WARN_ON_ONCE(!psock); 501 502 spin_lock_bh(&stab->lock); 503 osk = stab->sks[idx]; 504 if (osk && flags == BPF_NOEXIST) { 505 ret = -EEXIST; 506 goto out_unlock; 507 } else if (!osk && flags == BPF_EXIST) { 508 ret = -ENOENT; 509 goto out_unlock; 510 } 511 512 sock_map_add_link(psock, link, map, &stab->sks[idx]); 513 stab->sks[idx] = sk; 514 if (osk) 515 sock_map_unref(osk, &stab->sks[idx]); 516 spin_unlock_bh(&stab->lock); 517 return 0; 518 out_unlock: 519 spin_unlock_bh(&stab->lock); 520 if (psock) 521 sk_psock_put(sk, psock); 522 out_free: 523 sk_psock_free_link(link); 524 return ret; 525 } 526 527 static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) 528 { 529 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || 530 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || 531 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; 532 } 533 534 static bool sock_map_redirect_allowed(const struct sock *sk) 535 { 536 if (sk_is_tcp(sk)) 537 return sk->sk_state != TCP_LISTEN; 538 else 539 return sk->sk_state == TCP_ESTABLISHED; 540 } 541 542 static bool sock_map_sk_is_suitable(const struct sock *sk) 543 { 544 return !!sk->sk_prot->psock_update_sk_prot; 545 } 546 547 static bool sock_map_sk_state_allowed(const struct sock *sk) 548 { 549 if (sk_is_tcp(sk)) 550 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); 551 if (sk_is_stream_unix(sk)) 552 return (1 << sk->sk_state) & TCPF_ESTABLISHED; 553 return true; 554 } 555 556 static int sock_hash_update_common(struct bpf_map *map, void *key, 557 struct sock *sk, u64 flags); 558 559 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 560 u64 flags) 561 { 562 struct socket *sock; 563 struct sock *sk; 564 int ret; 565 u64 ufd; 566 567 if (map->value_size == sizeof(u64)) 568 ufd = *(u64 *)value; 569 else 570 ufd = *(u32 *)value; 571 if (ufd > S32_MAX) 572 return -EINVAL; 573 574 sock = sockfd_lookup(ufd, &ret); 575 if (!sock) 576 return ret; 577 sk = sock->sk; 578 if (!sk) { 579 ret = -EINVAL; 580 goto out; 581 } 582 if (!sock_map_sk_is_suitable(sk)) { 583 ret = -EOPNOTSUPP; 584 goto out; 585 } 586 587 sock_map_sk_acquire(sk); 588 if (!sock_map_sk_state_allowed(sk)) 589 ret = -EOPNOTSUPP; 590 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 591 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 592 else 593 ret = sock_hash_update_common(map, key, sk, flags); 594 sock_map_sk_release(sk); 595 out: 596 sockfd_put(sock); 597 return ret; 598 } 599 600 static long sock_map_update_elem(struct bpf_map *map, void *key, 601 void *value, u64 flags) 602 { 603 struct sock *sk = (struct sock *)value; 604 int ret; 605 606 if (unlikely(!sk || !sk_fullsock(sk))) 607 return -EINVAL; 608 609 if (!sock_map_sk_is_suitable(sk)) 610 return -EOPNOTSUPP; 611 612 local_bh_disable(); 613 bh_lock_sock(sk); 614 if (!sock_map_sk_state_allowed(sk)) 615 ret = -EOPNOTSUPP; 616 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 617 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 618 else 619 ret = sock_hash_update_common(map, key, sk, flags); 620 bh_unlock_sock(sk); 621 local_bh_enable(); 622 return ret; 623 } 624 625 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, 626 struct bpf_map *, map, void *, key, u64, flags) 627 { 628 WARN_ON_ONCE(!rcu_read_lock_held()); 629 630 if (likely(sock_map_sk_is_suitable(sops->sk) && 631 sock_map_op_okay(sops))) 632 return sock_map_update_common(map, *(u32 *)key, sops->sk, 633 flags); 634 return -EOPNOTSUPP; 635 } 636 637 const struct bpf_func_proto bpf_sock_map_update_proto = { 638 .func = bpf_sock_map_update, 639 .gpl_only = false, 640 .pkt_access = true, 641 .ret_type = RET_INTEGER, 642 .arg1_type = ARG_PTR_TO_CTX, 643 .arg2_type = ARG_CONST_MAP_PTR, 644 .arg3_type = ARG_PTR_TO_MAP_KEY, 645 .arg4_type = ARG_ANYTHING, 646 }; 647 648 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, 649 struct bpf_map *, map, u32, key, u64, flags) 650 { 651 struct sock *sk; 652 653 if (unlikely(flags & ~(BPF_F_INGRESS))) 654 return SK_DROP; 655 656 sk = __sock_map_lookup_elem(map, key); 657 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 658 return SK_DROP; 659 660 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 661 return SK_PASS; 662 } 663 664 const struct bpf_func_proto bpf_sk_redirect_map_proto = { 665 .func = bpf_sk_redirect_map, 666 .gpl_only = false, 667 .ret_type = RET_INTEGER, 668 .arg1_type = ARG_PTR_TO_CTX, 669 .arg2_type = ARG_CONST_MAP_PTR, 670 .arg3_type = ARG_ANYTHING, 671 .arg4_type = ARG_ANYTHING, 672 }; 673 674 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, 675 struct bpf_map *, map, u32, key, u64, flags) 676 { 677 struct sock *sk; 678 679 if (unlikely(flags & ~(BPF_F_INGRESS))) 680 return SK_DROP; 681 682 sk = __sock_map_lookup_elem(map, key); 683 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 684 return SK_DROP; 685 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) 686 return SK_DROP; 687 688 msg->flags = flags; 689 msg->sk_redir = sk; 690 return SK_PASS; 691 } 692 693 const struct bpf_func_proto bpf_msg_redirect_map_proto = { 694 .func = bpf_msg_redirect_map, 695 .gpl_only = false, 696 .ret_type = RET_INTEGER, 697 .arg1_type = ARG_PTR_TO_CTX, 698 .arg2_type = ARG_CONST_MAP_PTR, 699 .arg3_type = ARG_ANYTHING, 700 .arg4_type = ARG_ANYTHING, 701 }; 702 703 struct sock_map_seq_info { 704 struct bpf_map *map; 705 struct sock *sk; 706 u32 index; 707 }; 708 709 struct bpf_iter__sockmap { 710 __bpf_md_ptr(struct bpf_iter_meta *, meta); 711 __bpf_md_ptr(struct bpf_map *, map); 712 __bpf_md_ptr(void *, key); 713 __bpf_md_ptr(struct sock *, sk); 714 }; 715 716 DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, 717 struct bpf_map *map, void *key, 718 struct sock *sk) 719 720 static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) 721 { 722 if (unlikely(info->index >= info->map->max_entries)) 723 return NULL; 724 725 info->sk = __sock_map_lookup_elem(info->map, info->index); 726 727 /* can't return sk directly, since that might be NULL */ 728 return info; 729 } 730 731 static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) 732 __acquires(rcu) 733 { 734 struct sock_map_seq_info *info = seq->private; 735 736 if (*pos == 0) 737 ++*pos; 738 739 /* pairs with sock_map_seq_stop */ 740 rcu_read_lock(); 741 return sock_map_seq_lookup_elem(info); 742 } 743 744 static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 745 __must_hold(rcu) 746 { 747 struct sock_map_seq_info *info = seq->private; 748 749 ++*pos; 750 ++info->index; 751 752 return sock_map_seq_lookup_elem(info); 753 } 754 755 static int sock_map_seq_show(struct seq_file *seq, void *v) 756 __must_hold(rcu) 757 { 758 struct sock_map_seq_info *info = seq->private; 759 struct bpf_iter__sockmap ctx = {}; 760 struct bpf_iter_meta meta; 761 struct bpf_prog *prog; 762 763 meta.seq = seq; 764 prog = bpf_iter_get_info(&meta, !v); 765 if (!prog) 766 return 0; 767 768 ctx.meta = &meta; 769 ctx.map = info->map; 770 if (v) { 771 ctx.key = &info->index; 772 ctx.sk = info->sk; 773 } 774 775 return bpf_iter_run_prog(prog, &ctx); 776 } 777 778 static void sock_map_seq_stop(struct seq_file *seq, void *v) 779 __releases(rcu) 780 { 781 if (!v) 782 (void)sock_map_seq_show(seq, NULL); 783 784 /* pairs with sock_map_seq_start */ 785 rcu_read_unlock(); 786 } 787 788 static const struct seq_operations sock_map_seq_ops = { 789 .start = sock_map_seq_start, 790 .next = sock_map_seq_next, 791 .stop = sock_map_seq_stop, 792 .show = sock_map_seq_show, 793 }; 794 795 static int sock_map_init_seq_private(void *priv_data, 796 struct bpf_iter_aux_info *aux) 797 { 798 struct sock_map_seq_info *info = priv_data; 799 800 bpf_map_inc_with_uref(aux->map); 801 info->map = aux->map; 802 return 0; 803 } 804 805 static void sock_map_fini_seq_private(void *priv_data) 806 { 807 struct sock_map_seq_info *info = priv_data; 808 809 bpf_map_put_with_uref(info->map); 810 } 811 812 static u64 sock_map_mem_usage(const struct bpf_map *map) 813 { 814 u64 usage = sizeof(struct bpf_stab); 815 816 usage += (u64)map->max_entries * sizeof(struct sock *); 817 return usage; 818 } 819 820 static const struct bpf_iter_seq_info sock_map_iter_seq_info = { 821 .seq_ops = &sock_map_seq_ops, 822 .init_seq_private = sock_map_init_seq_private, 823 .fini_seq_private = sock_map_fini_seq_private, 824 .seq_priv_size = sizeof(struct sock_map_seq_info), 825 }; 826 827 BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab) 828 const struct bpf_map_ops sock_map_ops = { 829 .map_meta_equal = bpf_map_meta_equal, 830 .map_alloc = sock_map_alloc, 831 .map_free = sock_map_free, 832 .map_get_next_key = sock_map_get_next_key, 833 .map_lookup_elem_sys_only = sock_map_lookup_sys, 834 .map_update_elem = sock_map_update_elem, 835 .map_delete_elem = sock_map_delete_elem, 836 .map_lookup_elem = sock_map_lookup, 837 .map_release_uref = sock_map_release_progs, 838 .map_check_btf = map_check_no_btf, 839 .map_mem_usage = sock_map_mem_usage, 840 .map_btf_id = &sock_map_btf_ids[0], 841 .iter_seq_info = &sock_map_iter_seq_info, 842 }; 843 844 struct bpf_shtab_elem { 845 struct rcu_head rcu; 846 u32 hash; 847 struct sock *sk; 848 struct hlist_node node; 849 u8 key[]; 850 }; 851 852 struct bpf_shtab_bucket { 853 struct hlist_head head; 854 spinlock_t lock; 855 }; 856 857 struct bpf_shtab { 858 struct bpf_map map; 859 struct bpf_shtab_bucket *buckets; 860 u32 buckets_num; 861 u32 elem_size; 862 struct sk_psock_progs progs; 863 atomic_t count; 864 }; 865 866 static inline u32 sock_hash_bucket_hash(const void *key, u32 len) 867 { 868 return jhash(key, len, 0); 869 } 870 871 static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, 872 u32 hash) 873 { 874 return &htab->buckets[hash & (htab->buckets_num - 1)]; 875 } 876 877 static struct bpf_shtab_elem * 878 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, 879 u32 key_size) 880 { 881 struct bpf_shtab_elem *elem; 882 883 hlist_for_each_entry_rcu(elem, head, node) { 884 if (elem->hash == hash && 885 !memcmp(&elem->key, key, key_size)) 886 return elem; 887 } 888 889 return NULL; 890 } 891 892 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) 893 { 894 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 895 u32 key_size = map->key_size, hash; 896 struct bpf_shtab_bucket *bucket; 897 struct bpf_shtab_elem *elem; 898 899 WARN_ON_ONCE(!rcu_read_lock_held()); 900 901 hash = sock_hash_bucket_hash(key, key_size); 902 bucket = sock_hash_select_bucket(htab, hash); 903 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 904 905 return elem ? elem->sk : NULL; 906 } 907 908 static void sock_hash_free_elem(struct bpf_shtab *htab, 909 struct bpf_shtab_elem *elem) 910 { 911 atomic_dec(&htab->count); 912 kfree_rcu(elem, rcu); 913 } 914 915 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, 916 void *link_raw) 917 { 918 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 919 struct bpf_shtab_elem *elem_probe, *elem = link_raw; 920 struct bpf_shtab_bucket *bucket; 921 922 WARN_ON_ONCE(!rcu_read_lock_held()); 923 bucket = sock_hash_select_bucket(htab, elem->hash); 924 925 /* elem may be deleted in parallel from the map, but access here 926 * is okay since it's going away only after RCU grace period. 927 * However, we need to check whether it's still present. 928 */ 929 spin_lock_bh(&bucket->lock); 930 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, 931 elem->key, map->key_size); 932 if (elem_probe && elem_probe == elem) { 933 hlist_del_rcu(&elem->node); 934 sock_map_unref(elem->sk, elem); 935 sock_hash_free_elem(htab, elem); 936 } 937 spin_unlock_bh(&bucket->lock); 938 } 939 940 static long sock_hash_delete_elem(struct bpf_map *map, void *key) 941 { 942 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 943 u32 hash, key_size = map->key_size; 944 struct bpf_shtab_bucket *bucket; 945 struct bpf_shtab_elem *elem; 946 int ret = -ENOENT; 947 948 hash = sock_hash_bucket_hash(key, key_size); 949 bucket = sock_hash_select_bucket(htab, hash); 950 951 spin_lock_bh(&bucket->lock); 952 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 953 if (elem) { 954 hlist_del_rcu(&elem->node); 955 sock_map_unref(elem->sk, elem); 956 sock_hash_free_elem(htab, elem); 957 ret = 0; 958 } 959 spin_unlock_bh(&bucket->lock); 960 return ret; 961 } 962 963 static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, 964 void *key, u32 key_size, 965 u32 hash, struct sock *sk, 966 struct bpf_shtab_elem *old) 967 { 968 struct bpf_shtab_elem *new; 969 970 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 971 if (!old) { 972 atomic_dec(&htab->count); 973 return ERR_PTR(-E2BIG); 974 } 975 } 976 977 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, 978 GFP_ATOMIC | __GFP_NOWARN, 979 htab->map.numa_node); 980 if (!new) { 981 atomic_dec(&htab->count); 982 return ERR_PTR(-ENOMEM); 983 } 984 memcpy(new->key, key, key_size); 985 new->sk = sk; 986 new->hash = hash; 987 return new; 988 } 989 990 static int sock_hash_update_common(struct bpf_map *map, void *key, 991 struct sock *sk, u64 flags) 992 { 993 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 994 u32 key_size = map->key_size, hash; 995 struct bpf_shtab_elem *elem, *elem_new; 996 struct bpf_shtab_bucket *bucket; 997 struct sk_psock_link *link; 998 struct sk_psock *psock; 999 int ret; 1000 1001 WARN_ON_ONCE(!rcu_read_lock_held()); 1002 if (unlikely(flags > BPF_EXIST)) 1003 return -EINVAL; 1004 1005 link = sk_psock_init_link(); 1006 if (!link) 1007 return -ENOMEM; 1008 1009 ret = sock_map_link(map, sk); 1010 if (ret < 0) 1011 goto out_free; 1012 1013 psock = sk_psock(sk); 1014 WARN_ON_ONCE(!psock); 1015 1016 hash = sock_hash_bucket_hash(key, key_size); 1017 bucket = sock_hash_select_bucket(htab, hash); 1018 1019 spin_lock_bh(&bucket->lock); 1020 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 1021 if (elem && flags == BPF_NOEXIST) { 1022 ret = -EEXIST; 1023 goto out_unlock; 1024 } else if (!elem && flags == BPF_EXIST) { 1025 ret = -ENOENT; 1026 goto out_unlock; 1027 } 1028 1029 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); 1030 if (IS_ERR(elem_new)) { 1031 ret = PTR_ERR(elem_new); 1032 goto out_unlock; 1033 } 1034 1035 sock_map_add_link(psock, link, map, elem_new); 1036 /* Add new element to the head of the list, so that 1037 * concurrent search will find it before old elem. 1038 */ 1039 hlist_add_head_rcu(&elem_new->node, &bucket->head); 1040 if (elem) { 1041 hlist_del_rcu(&elem->node); 1042 sock_map_unref(elem->sk, elem); 1043 sock_hash_free_elem(htab, elem); 1044 } 1045 spin_unlock_bh(&bucket->lock); 1046 return 0; 1047 out_unlock: 1048 spin_unlock_bh(&bucket->lock); 1049 sk_psock_put(sk, psock); 1050 out_free: 1051 sk_psock_free_link(link); 1052 return ret; 1053 } 1054 1055 static int sock_hash_get_next_key(struct bpf_map *map, void *key, 1056 void *key_next) 1057 { 1058 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1059 struct bpf_shtab_elem *elem, *elem_next; 1060 u32 hash, key_size = map->key_size; 1061 struct hlist_head *head; 1062 int i = 0; 1063 1064 if (!key) 1065 goto find_first_elem; 1066 hash = sock_hash_bucket_hash(key, key_size); 1067 head = &sock_hash_select_bucket(htab, hash)->head; 1068 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); 1069 if (!elem) 1070 goto find_first_elem; 1071 1072 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)), 1073 struct bpf_shtab_elem, node); 1074 if (elem_next) { 1075 memcpy(key_next, elem_next->key, key_size); 1076 return 0; 1077 } 1078 1079 i = hash & (htab->buckets_num - 1); 1080 i++; 1081 find_first_elem: 1082 for (; i < htab->buckets_num; i++) { 1083 head = &sock_hash_select_bucket(htab, i)->head; 1084 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)), 1085 struct bpf_shtab_elem, node); 1086 if (elem_next) { 1087 memcpy(key_next, elem_next->key, key_size); 1088 return 0; 1089 } 1090 } 1091 1092 return -ENOENT; 1093 } 1094 1095 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) 1096 { 1097 struct bpf_shtab *htab; 1098 int i, err; 1099 1100 if (attr->max_entries == 0 || 1101 attr->key_size == 0 || 1102 (attr->value_size != sizeof(u32) && 1103 attr->value_size != sizeof(u64)) || 1104 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1105 return ERR_PTR(-EINVAL); 1106 if (attr->key_size > MAX_BPF_STACK) 1107 return ERR_PTR(-E2BIG); 1108 1109 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); 1110 if (!htab) 1111 return ERR_PTR(-ENOMEM); 1112 1113 bpf_map_init_from_attr(&htab->map, attr); 1114 1115 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); 1116 htab->elem_size = sizeof(struct bpf_shtab_elem) + 1117 round_up(htab->map.key_size, 8); 1118 if (htab->buckets_num == 0 || 1119 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { 1120 err = -EINVAL; 1121 goto free_htab; 1122 } 1123 1124 htab->buckets = bpf_map_area_alloc(htab->buckets_num * 1125 sizeof(struct bpf_shtab_bucket), 1126 htab->map.numa_node); 1127 if (!htab->buckets) { 1128 err = -ENOMEM; 1129 goto free_htab; 1130 } 1131 1132 for (i = 0; i < htab->buckets_num; i++) { 1133 INIT_HLIST_HEAD(&htab->buckets[i].head); 1134 spin_lock_init(&htab->buckets[i].lock); 1135 } 1136 1137 return &htab->map; 1138 free_htab: 1139 bpf_map_area_free(htab); 1140 return ERR_PTR(err); 1141 } 1142 1143 static void sock_hash_free(struct bpf_map *map) 1144 { 1145 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1146 struct bpf_shtab_bucket *bucket; 1147 struct hlist_head unlink_list; 1148 struct bpf_shtab_elem *elem; 1149 struct hlist_node *node; 1150 int i; 1151 1152 /* After the sync no updates or deletes will be in-flight so it 1153 * is safe to walk map and remove entries without risking a race 1154 * in EEXIST update case. 1155 */ 1156 synchronize_rcu(); 1157 for (i = 0; i < htab->buckets_num; i++) { 1158 bucket = sock_hash_select_bucket(htab, i); 1159 1160 /* We are racing with sock_hash_delete_from_link to 1161 * enter the spin-lock critical section. Every socket on 1162 * the list is still linked to sockhash. Since link 1163 * exists, psock exists and holds a ref to socket. That 1164 * lets us to grab a socket ref too. 1165 */ 1166 spin_lock_bh(&bucket->lock); 1167 hlist_for_each_entry(elem, &bucket->head, node) 1168 sock_hold(elem->sk); 1169 hlist_move_list(&bucket->head, &unlink_list); 1170 spin_unlock_bh(&bucket->lock); 1171 1172 /* Process removed entries out of atomic context to 1173 * block for socket lock before deleting the psock's 1174 * link to sockhash. 1175 */ 1176 hlist_for_each_entry_safe(elem, node, &unlink_list, node) { 1177 hlist_del(&elem->node); 1178 lock_sock(elem->sk); 1179 rcu_read_lock(); 1180 sock_map_unref(elem->sk, elem); 1181 rcu_read_unlock(); 1182 release_sock(elem->sk); 1183 sock_put(elem->sk); 1184 sock_hash_free_elem(htab, elem); 1185 } 1186 } 1187 1188 /* wait for psock readers accessing its map link */ 1189 synchronize_rcu(); 1190 1191 bpf_map_area_free(htab->buckets); 1192 bpf_map_area_free(htab); 1193 } 1194 1195 static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) 1196 { 1197 struct sock *sk; 1198 1199 if (map->value_size != sizeof(u64)) 1200 return ERR_PTR(-ENOSPC); 1201 1202 sk = __sock_hash_lookup_elem(map, key); 1203 if (!sk) 1204 return ERR_PTR(-ENOENT); 1205 1206 __sock_gen_cookie(sk); 1207 return &sk->sk_cookie; 1208 } 1209 1210 static void *sock_hash_lookup(struct bpf_map *map, void *key) 1211 { 1212 struct sock *sk; 1213 1214 sk = __sock_hash_lookup_elem(map, key); 1215 if (!sk) 1216 return NULL; 1217 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 1218 return NULL; 1219 return sk; 1220 } 1221 1222 static void sock_hash_release_progs(struct bpf_map *map) 1223 { 1224 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); 1225 } 1226 1227 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, 1228 struct bpf_map *, map, void *, key, u64, flags) 1229 { 1230 WARN_ON_ONCE(!rcu_read_lock_held()); 1231 1232 if (likely(sock_map_sk_is_suitable(sops->sk) && 1233 sock_map_op_okay(sops))) 1234 return sock_hash_update_common(map, key, sops->sk, flags); 1235 return -EOPNOTSUPP; 1236 } 1237 1238 const struct bpf_func_proto bpf_sock_hash_update_proto = { 1239 .func = bpf_sock_hash_update, 1240 .gpl_only = false, 1241 .pkt_access = true, 1242 .ret_type = RET_INTEGER, 1243 .arg1_type = ARG_PTR_TO_CTX, 1244 .arg2_type = ARG_CONST_MAP_PTR, 1245 .arg3_type = ARG_PTR_TO_MAP_KEY, 1246 .arg4_type = ARG_ANYTHING, 1247 }; 1248 1249 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, 1250 struct bpf_map *, map, void *, key, u64, flags) 1251 { 1252 struct sock *sk; 1253 1254 if (unlikely(flags & ~(BPF_F_INGRESS))) 1255 return SK_DROP; 1256 1257 sk = __sock_hash_lookup_elem(map, key); 1258 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1259 return SK_DROP; 1260 1261 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 1262 return SK_PASS; 1263 } 1264 1265 const struct bpf_func_proto bpf_sk_redirect_hash_proto = { 1266 .func = bpf_sk_redirect_hash, 1267 .gpl_only = false, 1268 .ret_type = RET_INTEGER, 1269 .arg1_type = ARG_PTR_TO_CTX, 1270 .arg2_type = ARG_CONST_MAP_PTR, 1271 .arg3_type = ARG_PTR_TO_MAP_KEY, 1272 .arg4_type = ARG_ANYTHING, 1273 }; 1274 1275 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, 1276 struct bpf_map *, map, void *, key, u64, flags) 1277 { 1278 struct sock *sk; 1279 1280 if (unlikely(flags & ~(BPF_F_INGRESS))) 1281 return SK_DROP; 1282 1283 sk = __sock_hash_lookup_elem(map, key); 1284 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1285 return SK_DROP; 1286 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) 1287 return SK_DROP; 1288 1289 msg->flags = flags; 1290 msg->sk_redir = sk; 1291 return SK_PASS; 1292 } 1293 1294 const struct bpf_func_proto bpf_msg_redirect_hash_proto = { 1295 .func = bpf_msg_redirect_hash, 1296 .gpl_only = false, 1297 .ret_type = RET_INTEGER, 1298 .arg1_type = ARG_PTR_TO_CTX, 1299 .arg2_type = ARG_CONST_MAP_PTR, 1300 .arg3_type = ARG_PTR_TO_MAP_KEY, 1301 .arg4_type = ARG_ANYTHING, 1302 }; 1303 1304 struct sock_hash_seq_info { 1305 struct bpf_map *map; 1306 struct bpf_shtab *htab; 1307 u32 bucket_id; 1308 }; 1309 1310 static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, 1311 struct bpf_shtab_elem *prev_elem) 1312 { 1313 const struct bpf_shtab *htab = info->htab; 1314 struct bpf_shtab_bucket *bucket; 1315 struct bpf_shtab_elem *elem; 1316 struct hlist_node *node; 1317 1318 /* try to find next elem in the same bucket */ 1319 if (prev_elem) { 1320 node = rcu_dereference(hlist_next_rcu(&prev_elem->node)); 1321 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1322 if (elem) 1323 return elem; 1324 1325 /* no more elements, continue in the next bucket */ 1326 info->bucket_id++; 1327 } 1328 1329 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) { 1330 bucket = &htab->buckets[info->bucket_id]; 1331 node = rcu_dereference(hlist_first_rcu(&bucket->head)); 1332 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1333 if (elem) 1334 return elem; 1335 } 1336 1337 return NULL; 1338 } 1339 1340 static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) 1341 __acquires(rcu) 1342 { 1343 struct sock_hash_seq_info *info = seq->private; 1344 1345 if (*pos == 0) 1346 ++*pos; 1347 1348 /* pairs with sock_hash_seq_stop */ 1349 rcu_read_lock(); 1350 return sock_hash_seq_find_next(info, NULL); 1351 } 1352 1353 static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1354 __must_hold(rcu) 1355 { 1356 struct sock_hash_seq_info *info = seq->private; 1357 1358 ++*pos; 1359 return sock_hash_seq_find_next(info, v); 1360 } 1361 1362 static int sock_hash_seq_show(struct seq_file *seq, void *v) 1363 __must_hold(rcu) 1364 { 1365 struct sock_hash_seq_info *info = seq->private; 1366 struct bpf_iter__sockmap ctx = {}; 1367 struct bpf_shtab_elem *elem = v; 1368 struct bpf_iter_meta meta; 1369 struct bpf_prog *prog; 1370 1371 meta.seq = seq; 1372 prog = bpf_iter_get_info(&meta, !elem); 1373 if (!prog) 1374 return 0; 1375 1376 ctx.meta = &meta; 1377 ctx.map = info->map; 1378 if (elem) { 1379 ctx.key = elem->key; 1380 ctx.sk = elem->sk; 1381 } 1382 1383 return bpf_iter_run_prog(prog, &ctx); 1384 } 1385 1386 static void sock_hash_seq_stop(struct seq_file *seq, void *v) 1387 __releases(rcu) 1388 { 1389 if (!v) 1390 (void)sock_hash_seq_show(seq, NULL); 1391 1392 /* pairs with sock_hash_seq_start */ 1393 rcu_read_unlock(); 1394 } 1395 1396 static const struct seq_operations sock_hash_seq_ops = { 1397 .start = sock_hash_seq_start, 1398 .next = sock_hash_seq_next, 1399 .stop = sock_hash_seq_stop, 1400 .show = sock_hash_seq_show, 1401 }; 1402 1403 static int sock_hash_init_seq_private(void *priv_data, 1404 struct bpf_iter_aux_info *aux) 1405 { 1406 struct sock_hash_seq_info *info = priv_data; 1407 1408 bpf_map_inc_with_uref(aux->map); 1409 info->map = aux->map; 1410 info->htab = container_of(aux->map, struct bpf_shtab, map); 1411 return 0; 1412 } 1413 1414 static void sock_hash_fini_seq_private(void *priv_data) 1415 { 1416 struct sock_hash_seq_info *info = priv_data; 1417 1418 bpf_map_put_with_uref(info->map); 1419 } 1420 1421 static u64 sock_hash_mem_usage(const struct bpf_map *map) 1422 { 1423 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1424 u64 usage = sizeof(*htab); 1425 1426 usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket); 1427 usage += atomic_read(&htab->count) * (u64)htab->elem_size; 1428 return usage; 1429 } 1430 1431 static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { 1432 .seq_ops = &sock_hash_seq_ops, 1433 .init_seq_private = sock_hash_init_seq_private, 1434 .fini_seq_private = sock_hash_fini_seq_private, 1435 .seq_priv_size = sizeof(struct sock_hash_seq_info), 1436 }; 1437 1438 BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab) 1439 const struct bpf_map_ops sock_hash_ops = { 1440 .map_meta_equal = bpf_map_meta_equal, 1441 .map_alloc = sock_hash_alloc, 1442 .map_free = sock_hash_free, 1443 .map_get_next_key = sock_hash_get_next_key, 1444 .map_update_elem = sock_map_update_elem, 1445 .map_delete_elem = sock_hash_delete_elem, 1446 .map_lookup_elem = sock_hash_lookup, 1447 .map_lookup_elem_sys_only = sock_hash_lookup_sys, 1448 .map_release_uref = sock_hash_release_progs, 1449 .map_check_btf = map_check_no_btf, 1450 .map_mem_usage = sock_hash_mem_usage, 1451 .map_btf_id = &sock_hash_map_btf_ids[0], 1452 .iter_seq_info = &sock_hash_iter_seq_info, 1453 }; 1454 1455 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) 1456 { 1457 switch (map->map_type) { 1458 case BPF_MAP_TYPE_SOCKMAP: 1459 return &container_of(map, struct bpf_stab, map)->progs; 1460 case BPF_MAP_TYPE_SOCKHASH: 1461 return &container_of(map, struct bpf_shtab, map)->progs; 1462 default: 1463 break; 1464 } 1465 1466 return NULL; 1467 } 1468 1469 static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog, 1470 struct bpf_link ***plink, u32 which) 1471 { 1472 struct sk_psock_progs *progs = sock_map_progs(map); 1473 struct bpf_prog **cur_pprog; 1474 struct bpf_link **cur_plink; 1475 1476 if (!progs) 1477 return -EOPNOTSUPP; 1478 1479 switch (which) { 1480 case BPF_SK_MSG_VERDICT: 1481 cur_pprog = &progs->msg_parser; 1482 cur_plink = &progs->msg_parser_link; 1483 break; 1484 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 1485 case BPF_SK_SKB_STREAM_PARSER: 1486 cur_pprog = &progs->stream_parser; 1487 cur_plink = &progs->stream_parser_link; 1488 break; 1489 #endif 1490 case BPF_SK_SKB_STREAM_VERDICT: 1491 if (progs->skb_verdict) 1492 return -EBUSY; 1493 cur_pprog = &progs->stream_verdict; 1494 cur_plink = &progs->stream_verdict_link; 1495 break; 1496 case BPF_SK_SKB_VERDICT: 1497 if (progs->stream_verdict) 1498 return -EBUSY; 1499 cur_pprog = &progs->skb_verdict; 1500 cur_plink = &progs->skb_verdict_link; 1501 break; 1502 default: 1503 return -EOPNOTSUPP; 1504 } 1505 1506 *pprog = cur_pprog; 1507 if (plink) 1508 *plink = cur_plink; 1509 return 0; 1510 } 1511 1512 /* Handle the following four cases: 1513 * prog_attach: prog != NULL, old == NULL, link == NULL 1514 * prog_detach: prog == NULL, old != NULL, link == NULL 1515 * link_attach: prog != NULL, old == NULL, link != NULL 1516 * link_detach: prog == NULL, old != NULL, link != NULL 1517 */ 1518 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1519 struct bpf_prog *old, struct bpf_link *link, 1520 u32 which) 1521 { 1522 struct bpf_prog **pprog; 1523 struct bpf_link **plink; 1524 int ret; 1525 1526 ret = sock_map_prog_link_lookup(map, &pprog, &plink, which); 1527 if (ret) 1528 return ret; 1529 1530 /* for prog_attach/prog_detach/link_attach, return error if a bpf_link 1531 * exists for that prog. 1532 */ 1533 if ((!link || prog) && *plink) 1534 return -EBUSY; 1535 1536 if (old) { 1537 ret = psock_replace_prog(pprog, prog, old); 1538 if (!ret) 1539 *plink = NULL; 1540 } else { 1541 psock_set_prog(pprog, prog); 1542 if (link) 1543 *plink = link; 1544 } 1545 1546 return ret; 1547 } 1548 1549 int sock_map_bpf_prog_query(const union bpf_attr *attr, 1550 union bpf_attr __user *uattr) 1551 { 1552 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 1553 u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd; 1554 struct bpf_prog **pprog; 1555 struct bpf_prog *prog; 1556 struct bpf_map *map; 1557 struct fd f; 1558 u32 id = 0; 1559 int ret; 1560 1561 if (attr->query.query_flags) 1562 return -EINVAL; 1563 1564 f = fdget(ufd); 1565 map = __bpf_map_get(f); 1566 if (IS_ERR(map)) 1567 return PTR_ERR(map); 1568 1569 rcu_read_lock(); 1570 1571 ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type); 1572 if (ret) 1573 goto end; 1574 1575 prog = *pprog; 1576 prog_cnt = !prog ? 0 : 1; 1577 1578 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) 1579 goto end; 1580 1581 /* we do not hold the refcnt, the bpf prog may be released 1582 * asynchronously and the id would be set to 0. 1583 */ 1584 id = data_race(prog->aux->id); 1585 if (id == 0) 1586 prog_cnt = 0; 1587 1588 end: 1589 rcu_read_unlock(); 1590 1591 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) || 1592 (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) || 1593 copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) 1594 ret = -EFAULT; 1595 1596 fdput(f); 1597 return ret; 1598 } 1599 1600 static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) 1601 { 1602 switch (link->map->map_type) { 1603 case BPF_MAP_TYPE_SOCKMAP: 1604 return sock_map_delete_from_link(link->map, sk, 1605 link->link_raw); 1606 case BPF_MAP_TYPE_SOCKHASH: 1607 return sock_hash_delete_from_link(link->map, sk, 1608 link->link_raw); 1609 default: 1610 break; 1611 } 1612 } 1613 1614 static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) 1615 { 1616 struct sk_psock_link *link; 1617 1618 while ((link = sk_psock_link_pop(psock))) { 1619 sock_map_unlink(sk, link); 1620 sk_psock_free_link(link); 1621 } 1622 } 1623 1624 void sock_map_unhash(struct sock *sk) 1625 { 1626 void (*saved_unhash)(struct sock *sk); 1627 struct sk_psock *psock; 1628 1629 rcu_read_lock(); 1630 psock = sk_psock(sk); 1631 if (unlikely(!psock)) { 1632 rcu_read_unlock(); 1633 saved_unhash = READ_ONCE(sk->sk_prot)->unhash; 1634 } else { 1635 saved_unhash = psock->saved_unhash; 1636 sock_map_remove_links(sk, psock); 1637 rcu_read_unlock(); 1638 } 1639 if (WARN_ON_ONCE(saved_unhash == sock_map_unhash)) 1640 return; 1641 if (saved_unhash) 1642 saved_unhash(sk); 1643 } 1644 EXPORT_SYMBOL_GPL(sock_map_unhash); 1645 1646 void sock_map_destroy(struct sock *sk) 1647 { 1648 void (*saved_destroy)(struct sock *sk); 1649 struct sk_psock *psock; 1650 1651 rcu_read_lock(); 1652 psock = sk_psock_get(sk); 1653 if (unlikely(!psock)) { 1654 rcu_read_unlock(); 1655 saved_destroy = READ_ONCE(sk->sk_prot)->destroy; 1656 } else { 1657 saved_destroy = psock->saved_destroy; 1658 sock_map_remove_links(sk, psock); 1659 rcu_read_unlock(); 1660 sk_psock_stop(psock); 1661 sk_psock_put(sk, psock); 1662 } 1663 if (WARN_ON_ONCE(saved_destroy == sock_map_destroy)) 1664 return; 1665 if (saved_destroy) 1666 saved_destroy(sk); 1667 } 1668 EXPORT_SYMBOL_GPL(sock_map_destroy); 1669 1670 void sock_map_close(struct sock *sk, long timeout) 1671 { 1672 void (*saved_close)(struct sock *sk, long timeout); 1673 struct sk_psock *psock; 1674 1675 lock_sock(sk); 1676 rcu_read_lock(); 1677 psock = sk_psock(sk); 1678 if (likely(psock)) { 1679 saved_close = psock->saved_close; 1680 sock_map_remove_links(sk, psock); 1681 psock = sk_psock_get(sk); 1682 if (unlikely(!psock)) 1683 goto no_psock; 1684 rcu_read_unlock(); 1685 sk_psock_stop(psock); 1686 release_sock(sk); 1687 cancel_delayed_work_sync(&psock->work); 1688 sk_psock_put(sk, psock); 1689 } else { 1690 saved_close = READ_ONCE(sk->sk_prot)->close; 1691 no_psock: 1692 rcu_read_unlock(); 1693 release_sock(sk); 1694 } 1695 1696 /* Make sure we do not recurse. This is a bug. 1697 * Leak the socket instead of crashing on a stack overflow. 1698 */ 1699 if (WARN_ON_ONCE(saved_close == sock_map_close)) 1700 return; 1701 saved_close(sk, timeout); 1702 } 1703 EXPORT_SYMBOL_GPL(sock_map_close); 1704 1705 struct sockmap_link { 1706 struct bpf_link link; 1707 struct bpf_map *map; 1708 enum bpf_attach_type attach_type; 1709 }; 1710 1711 static void sock_map_link_release(struct bpf_link *link) 1712 { 1713 struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1714 1715 mutex_lock(&sockmap_mutex); 1716 if (!sockmap_link->map) 1717 goto out; 1718 1719 WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link, 1720 sockmap_link->attach_type)); 1721 1722 bpf_map_put_with_uref(sockmap_link->map); 1723 sockmap_link->map = NULL; 1724 out: 1725 mutex_unlock(&sockmap_mutex); 1726 } 1727 1728 static int sock_map_link_detach(struct bpf_link *link) 1729 { 1730 sock_map_link_release(link); 1731 return 0; 1732 } 1733 1734 static void sock_map_link_dealloc(struct bpf_link *link) 1735 { 1736 kfree(link); 1737 } 1738 1739 /* Handle the following two cases: 1740 * case 1: link != NULL, prog != NULL, old != NULL 1741 * case 2: link != NULL, prog != NULL, old == NULL 1742 */ 1743 static int sock_map_link_update_prog(struct bpf_link *link, 1744 struct bpf_prog *prog, 1745 struct bpf_prog *old) 1746 { 1747 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1748 struct bpf_prog **pprog, *old_link_prog; 1749 struct bpf_link **plink; 1750 int ret = 0; 1751 1752 mutex_lock(&sockmap_mutex); 1753 1754 /* If old prog is not NULL, ensure old prog is the same as link->prog. */ 1755 if (old && link->prog != old) { 1756 ret = -EPERM; 1757 goto out; 1758 } 1759 /* Ensure link->prog has the same type/attach_type as the new prog. */ 1760 if (link->prog->type != prog->type || 1761 link->prog->expected_attach_type != prog->expected_attach_type) { 1762 ret = -EINVAL; 1763 goto out; 1764 } 1765 1766 ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink, 1767 sockmap_link->attach_type); 1768 if (ret) 1769 goto out; 1770 1771 /* return error if the stored bpf_link does not match the incoming bpf_link. */ 1772 if (link != *plink) { 1773 ret = -EBUSY; 1774 goto out; 1775 } 1776 1777 if (old) { 1778 ret = psock_replace_prog(pprog, prog, old); 1779 if (ret) 1780 goto out; 1781 } else { 1782 psock_set_prog(pprog, prog); 1783 } 1784 1785 bpf_prog_inc(prog); 1786 old_link_prog = xchg(&link->prog, prog); 1787 bpf_prog_put(old_link_prog); 1788 1789 out: 1790 mutex_unlock(&sockmap_mutex); 1791 return ret; 1792 } 1793 1794 static u32 sock_map_link_get_map_id(const struct sockmap_link *sockmap_link) 1795 { 1796 u32 map_id = 0; 1797 1798 mutex_lock(&sockmap_mutex); 1799 if (sockmap_link->map) 1800 map_id = sockmap_link->map->id; 1801 mutex_unlock(&sockmap_mutex); 1802 return map_id; 1803 } 1804 1805 static int sock_map_link_fill_info(const struct bpf_link *link, 1806 struct bpf_link_info *info) 1807 { 1808 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1809 u32 map_id = sock_map_link_get_map_id(sockmap_link); 1810 1811 info->sockmap.map_id = map_id; 1812 info->sockmap.attach_type = sockmap_link->attach_type; 1813 return 0; 1814 } 1815 1816 static void sock_map_link_show_fdinfo(const struct bpf_link *link, 1817 struct seq_file *seq) 1818 { 1819 const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); 1820 u32 map_id = sock_map_link_get_map_id(sockmap_link); 1821 1822 seq_printf(seq, "map_id:\t%u\n", map_id); 1823 seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type); 1824 } 1825 1826 static const struct bpf_link_ops sock_map_link_ops = { 1827 .release = sock_map_link_release, 1828 .dealloc = sock_map_link_dealloc, 1829 .detach = sock_map_link_detach, 1830 .update_prog = sock_map_link_update_prog, 1831 .fill_link_info = sock_map_link_fill_info, 1832 .show_fdinfo = sock_map_link_show_fdinfo, 1833 }; 1834 1835 int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog) 1836 { 1837 struct bpf_link_primer link_primer; 1838 struct sockmap_link *sockmap_link; 1839 enum bpf_attach_type attach_type; 1840 struct bpf_map *map; 1841 int ret; 1842 1843 if (attr->link_create.flags) 1844 return -EINVAL; 1845 1846 map = bpf_map_get_with_uref(attr->link_create.target_fd); 1847 if (IS_ERR(map)) 1848 return PTR_ERR(map); 1849 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) { 1850 ret = -EINVAL; 1851 goto out; 1852 } 1853 1854 sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER); 1855 if (!sockmap_link) { 1856 ret = -ENOMEM; 1857 goto out; 1858 } 1859 1860 attach_type = attr->link_create.attach_type; 1861 bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog); 1862 sockmap_link->map = map; 1863 sockmap_link->attach_type = attach_type; 1864 1865 ret = bpf_link_prime(&sockmap_link->link, &link_primer); 1866 if (ret) { 1867 kfree(sockmap_link); 1868 goto out; 1869 } 1870 1871 mutex_lock(&sockmap_mutex); 1872 ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type); 1873 mutex_unlock(&sockmap_mutex); 1874 if (ret) { 1875 bpf_link_cleanup(&link_primer); 1876 goto out; 1877 } 1878 1879 /* Increase refcnt for the prog since when old prog is replaced with 1880 * psock_replace_prog() and psock_set_prog() its refcnt will be decreased. 1881 * 1882 * Actually, we do not need to increase refcnt for the prog since bpf_link 1883 * will hold a reference. But in order to have less complexity w.r.t. 1884 * replacing/setting prog, let us increase the refcnt to make things simpler. 1885 */ 1886 bpf_prog_inc(prog); 1887 1888 return bpf_link_settle(&link_primer); 1889 1890 out: 1891 bpf_map_put_with_uref(map); 1892 return ret; 1893 } 1894 1895 static int sock_map_iter_attach_target(struct bpf_prog *prog, 1896 union bpf_iter_link_info *linfo, 1897 struct bpf_iter_aux_info *aux) 1898 { 1899 struct bpf_map *map; 1900 int err = -EINVAL; 1901 1902 if (!linfo->map.map_fd) 1903 return -EBADF; 1904 1905 map = bpf_map_get_with_uref(linfo->map.map_fd); 1906 if (IS_ERR(map)) 1907 return PTR_ERR(map); 1908 1909 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && 1910 map->map_type != BPF_MAP_TYPE_SOCKHASH) 1911 goto put_map; 1912 1913 if (prog->aux->max_rdonly_access > map->key_size) { 1914 err = -EACCES; 1915 goto put_map; 1916 } 1917 1918 aux->map = map; 1919 return 0; 1920 1921 put_map: 1922 bpf_map_put_with_uref(map); 1923 return err; 1924 } 1925 1926 static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux) 1927 { 1928 bpf_map_put_with_uref(aux->map); 1929 } 1930 1931 static struct bpf_iter_reg sock_map_iter_reg = { 1932 .target = "sockmap", 1933 .attach_target = sock_map_iter_attach_target, 1934 .detach_target = sock_map_iter_detach_target, 1935 .show_fdinfo = bpf_iter_map_show_fdinfo, 1936 .fill_link_info = bpf_iter_map_fill_link_info, 1937 .ctx_arg_info_size = 2, 1938 .ctx_arg_info = { 1939 { offsetof(struct bpf_iter__sockmap, key), 1940 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, 1941 { offsetof(struct bpf_iter__sockmap, sk), 1942 PTR_TO_BTF_ID_OR_NULL }, 1943 }, 1944 }; 1945 1946 static int __init bpf_sockmap_iter_init(void) 1947 { 1948 sock_map_iter_reg.ctx_arg_info[1].btf_id = 1949 btf_sock_ids[BTF_SOCK_TYPE_SOCK]; 1950 return bpf_iter_reg_target(&sock_map_iter_reg); 1951 } 1952 late_initcall(bpf_sockmap_iter_init); 1953