1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/skmsg.h> 5 #include <linux/filter.h> 6 #include <linux/bpf.h> 7 #include <linux/init.h> 8 #include <linux/wait.h> 9 #include <linux/util_macros.h> 10 11 #include <net/inet_common.h> 12 #include <net/tls.h> 13 14 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) 15 { 16 struct tcp_sock *tcp; 17 int copied; 18 19 if (!skb || !skb->len || !sk_is_tcp(sk)) 20 return; 21 22 if (skb_bpf_strparser(skb)) 23 return; 24 25 tcp = tcp_sk(sk); 26 copied = tcp->copied_seq + skb->len; 27 WRITE_ONCE(tcp->copied_seq, copied); 28 tcp_rcv_space_adjust(sk); 29 __tcp_cleanup_rbuf(sk, skb->len); 30 } 31 32 static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, 33 struct sk_msg *msg, u32 apply_bytes, int flags) 34 { 35 bool apply = apply_bytes; 36 struct scatterlist *sge; 37 u32 size, copied = 0; 38 struct sk_msg *tmp; 39 int i, ret = 0; 40 41 tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); 42 if (unlikely(!tmp)) 43 return -ENOMEM; 44 45 lock_sock(sk); 46 tmp->sg.start = msg->sg.start; 47 i = msg->sg.start; 48 do { 49 sge = sk_msg_elem(msg, i); 50 size = (apply && apply_bytes < sge->length) ? 51 apply_bytes : sge->length; 52 if (!sk_wmem_schedule(sk, size)) { 53 if (!copied) 54 ret = -ENOMEM; 55 break; 56 } 57 58 sk_mem_charge(sk, size); 59 sk_msg_xfer(tmp, msg, i, size); 60 copied += size; 61 if (sge->length) 62 get_page(sk_msg_page(tmp, i)); 63 sk_msg_iter_var_next(i); 64 tmp->sg.end = i; 65 if (apply) { 66 apply_bytes -= size; 67 if (!apply_bytes) { 68 if (sge->length) 69 sk_msg_iter_var_prev(i); 70 break; 71 } 72 } 73 } while (i != msg->sg.end); 74 75 if (!ret) { 76 msg->sg.start = i; 77 sk_psock_queue_msg(psock, tmp); 78 sk_psock_data_ready(sk, psock); 79 } else { 80 sk_msg_free(sk, tmp); 81 kfree(tmp); 82 } 83 84 release_sock(sk); 85 return ret; 86 } 87 88 static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, 89 int flags, bool uncharge) 90 { 91 struct msghdr msghdr = {}; 92 bool apply = apply_bytes; 93 struct scatterlist *sge; 94 struct page *page; 95 int size, ret = 0; 96 u32 off; 97 98 while (1) { 99 struct bio_vec bvec; 100 bool has_tx_ulp; 101 102 sge = sk_msg_elem(msg, msg->sg.start); 103 size = (apply && apply_bytes < sge->length) ? 104 apply_bytes : sge->length; 105 off = sge->offset; 106 page = sg_page(sge); 107 108 tcp_rate_check_app_limited(sk); 109 retry: 110 msghdr.msg_flags = flags | MSG_SPLICE_PAGES; 111 has_tx_ulp = tls_sw_has_ctx_tx(sk); 112 if (has_tx_ulp) 113 msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; 114 115 if (size < sge->length && msg->sg.start != msg->sg.end) 116 msghdr.msg_flags |= MSG_MORE; 117 118 bvec_set_page(&bvec, page, size, off); 119 iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); 120 ret = tcp_sendmsg_locked(sk, &msghdr, size); 121 if (ret <= 0) 122 return ret; 123 124 if (apply) 125 apply_bytes -= ret; 126 msg->sg.size -= ret; 127 sge->offset += ret; 128 sge->length -= ret; 129 if (uncharge) 130 sk_mem_uncharge(sk, ret); 131 if (ret != size) { 132 size -= ret; 133 off += ret; 134 goto retry; 135 } 136 if (!sge->length) { 137 put_page(page); 138 sk_msg_iter_next(msg, start); 139 sg_init_table(sge, 1); 140 if (msg->sg.start == msg->sg.end) 141 break; 142 } 143 if (apply && !apply_bytes) 144 break; 145 } 146 147 return 0; 148 } 149 150 static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, 151 u32 apply_bytes, int flags, bool uncharge) 152 { 153 int ret; 154 155 lock_sock(sk); 156 ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); 157 release_sock(sk); 158 return ret; 159 } 160 161 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, 162 struct sk_msg *msg, u32 bytes, int flags) 163 { 164 struct sk_psock *psock = sk_psock_get(sk); 165 int ret; 166 167 if (unlikely(!psock)) 168 return -EPIPE; 169 170 ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : 171 tcp_bpf_push_locked(sk, msg, bytes, flags, false); 172 sk_psock_put(sk, psock); 173 return ret; 174 } 175 EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); 176 177 #ifdef CONFIG_BPF_SYSCALL 178 static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 179 long timeo) 180 { 181 DEFINE_WAIT_FUNC(wait, woken_wake_function); 182 int ret = 0; 183 184 if (sk->sk_shutdown & RCV_SHUTDOWN) 185 return 1; 186 187 if (!timeo) 188 return ret; 189 190 add_wait_queue(sk_sleep(sk), &wait); 191 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 192 ret = sk_wait_event(sk, &timeo, 193 !list_empty(&psock->ingress_msg) || 194 !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); 195 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 196 remove_wait_queue(sk_sleep(sk), &wait); 197 return ret; 198 } 199 200 static bool is_next_msg_fin(struct sk_psock *psock) 201 { 202 struct scatterlist *sge; 203 struct sk_msg *msg_rx; 204 int i; 205 206 msg_rx = sk_psock_peek_msg(psock); 207 i = msg_rx->sg.start; 208 sge = sk_msg_elem(msg_rx, i); 209 if (!sge->length) { 210 struct sk_buff *skb = msg_rx->skb; 211 212 if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 213 return true; 214 } 215 return false; 216 } 217 218 static int tcp_bpf_recvmsg_parser(struct sock *sk, 219 struct msghdr *msg, 220 size_t len, 221 int flags, 222 int *addr_len) 223 { 224 struct tcp_sock *tcp = tcp_sk(sk); 225 u32 seq = tcp->copied_seq; 226 struct sk_psock *psock; 227 int copied = 0; 228 229 if (unlikely(flags & MSG_ERRQUEUE)) 230 return inet_recv_error(sk, msg, len, addr_len); 231 232 if (!len) 233 return 0; 234 235 psock = sk_psock_get(sk); 236 if (unlikely(!psock)) 237 return tcp_recvmsg(sk, msg, len, flags, addr_len); 238 239 lock_sock(sk); 240 241 /* We may have received data on the sk_receive_queue pre-accept and 242 * then we can not use read_skb in this context because we haven't 243 * assigned a sk_socket yet so have no link to the ops. The work-around 244 * is to check the sk_receive_queue and in these cases read skbs off 245 * queue again. The read_skb hook is not running at this point because 246 * of lock_sock so we avoid having multiple runners in read_skb. 247 */ 248 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { 249 tcp_data_ready(sk); 250 /* This handles the ENOMEM errors if we both receive data 251 * pre accept and are already under memory pressure. At least 252 * let user know to retry. 253 */ 254 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { 255 copied = -EAGAIN; 256 goto out; 257 } 258 } 259 260 msg_bytes_ready: 261 copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 262 /* The typical case for EFAULT is the socket was gracefully 263 * shutdown with a FIN pkt. So check here the other case is 264 * some error on copy_page_to_iter which would be unexpected. 265 * On fin return correct return code to zero. 266 */ 267 if (copied == -EFAULT) { 268 bool is_fin = is_next_msg_fin(psock); 269 270 if (is_fin) { 271 copied = 0; 272 seq++; 273 goto out; 274 } 275 } 276 seq += copied; 277 if (!copied) { 278 long timeo; 279 int data; 280 281 if (sock_flag(sk, SOCK_DONE)) 282 goto out; 283 284 if (sk->sk_err) { 285 copied = sock_error(sk); 286 goto out; 287 } 288 289 if (sk->sk_shutdown & RCV_SHUTDOWN) 290 goto out; 291 292 if (sk->sk_state == TCP_CLOSE) { 293 copied = -ENOTCONN; 294 goto out; 295 } 296 297 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 298 if (!timeo) { 299 copied = -EAGAIN; 300 goto out; 301 } 302 303 if (signal_pending(current)) { 304 copied = sock_intr_errno(timeo); 305 goto out; 306 } 307 308 data = tcp_msg_wait_data(sk, psock, timeo); 309 if (data && !sk_psock_queue_empty(psock)) 310 goto msg_bytes_ready; 311 copied = -EAGAIN; 312 } 313 out: 314 WRITE_ONCE(tcp->copied_seq, seq); 315 tcp_rcv_space_adjust(sk); 316 if (copied > 0) 317 __tcp_cleanup_rbuf(sk, copied); 318 release_sock(sk); 319 sk_psock_put(sk, psock); 320 return copied; 321 } 322 323 static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 324 int flags, int *addr_len) 325 { 326 struct sk_psock *psock; 327 int copied, ret; 328 329 if (unlikely(flags & MSG_ERRQUEUE)) 330 return inet_recv_error(sk, msg, len, addr_len); 331 332 if (!len) 333 return 0; 334 335 psock = sk_psock_get(sk); 336 if (unlikely(!psock)) 337 return tcp_recvmsg(sk, msg, len, flags, addr_len); 338 if (!skb_queue_empty(&sk->sk_receive_queue) && 339 sk_psock_queue_empty(psock)) { 340 sk_psock_put(sk, psock); 341 return tcp_recvmsg(sk, msg, len, flags, addr_len); 342 } 343 lock_sock(sk); 344 msg_bytes_ready: 345 copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 346 if (!copied) { 347 long timeo; 348 int data; 349 350 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 351 data = tcp_msg_wait_data(sk, psock, timeo); 352 if (data) { 353 if (!sk_psock_queue_empty(psock)) 354 goto msg_bytes_ready; 355 release_sock(sk); 356 sk_psock_put(sk, psock); 357 return tcp_recvmsg(sk, msg, len, flags, addr_len); 358 } 359 copied = -EAGAIN; 360 } 361 ret = copied; 362 release_sock(sk); 363 sk_psock_put(sk, psock); 364 return ret; 365 } 366 367 static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, 368 struct sk_msg *msg, int *copied, int flags) 369 { 370 bool cork = false, enospc = sk_msg_full(msg), redir_ingress; 371 struct sock *sk_redir; 372 u32 tosend, origsize, sent, delta = 0; 373 u32 eval; 374 int ret; 375 376 more_data: 377 if (psock->eval == __SK_NONE) { 378 /* Track delta in msg size to add/subtract it on SK_DROP from 379 * returned to user copied size. This ensures user doesn't 380 * get a positive return code with msg_cut_data and SK_DROP 381 * verdict. 382 */ 383 delta = msg->sg.size; 384 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 385 delta -= msg->sg.size; 386 } 387 388 if (msg->cork_bytes && 389 msg->cork_bytes > msg->sg.size && !enospc) { 390 psock->cork_bytes = msg->cork_bytes - msg->sg.size; 391 if (!psock->cork) { 392 psock->cork = kzalloc(sizeof(*psock->cork), 393 GFP_ATOMIC | __GFP_NOWARN); 394 if (!psock->cork) 395 return -ENOMEM; 396 } 397 memcpy(psock->cork, msg, sizeof(*msg)); 398 return 0; 399 } 400 401 tosend = msg->sg.size; 402 if (psock->apply_bytes && psock->apply_bytes < tosend) 403 tosend = psock->apply_bytes; 404 eval = __SK_NONE; 405 406 switch (psock->eval) { 407 case __SK_PASS: 408 ret = tcp_bpf_push(sk, msg, tosend, flags, true); 409 if (unlikely(ret)) { 410 *copied -= sk_msg_free(sk, msg); 411 break; 412 } 413 sk_msg_apply_bytes(psock, tosend); 414 break; 415 case __SK_REDIRECT: 416 redir_ingress = psock->redir_ingress; 417 sk_redir = psock->sk_redir; 418 sk_msg_apply_bytes(psock, tosend); 419 if (!psock->apply_bytes) { 420 /* Clean up before releasing the sock lock. */ 421 eval = psock->eval; 422 psock->eval = __SK_NONE; 423 psock->sk_redir = NULL; 424 } 425 if (psock->cork) { 426 cork = true; 427 psock->cork = NULL; 428 } 429 sk_msg_return(sk, msg, tosend); 430 release_sock(sk); 431 432 origsize = msg->sg.size; 433 ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 434 msg, tosend, flags); 435 sent = origsize - msg->sg.size; 436 437 if (eval == __SK_REDIRECT) 438 sock_put(sk_redir); 439 440 lock_sock(sk); 441 if (unlikely(ret < 0)) { 442 int free = sk_msg_free_nocharge(sk, msg); 443 444 if (!cork) 445 *copied -= free; 446 } 447 if (cork) { 448 sk_msg_free(sk, msg); 449 kfree(msg); 450 msg = NULL; 451 ret = 0; 452 } 453 break; 454 case __SK_DROP: 455 default: 456 sk_msg_free_partial(sk, msg, tosend); 457 sk_msg_apply_bytes(psock, tosend); 458 *copied -= (tosend + delta); 459 return -EACCES; 460 } 461 462 if (likely(!ret)) { 463 if (!psock->apply_bytes) { 464 psock->eval = __SK_NONE; 465 if (psock->sk_redir) { 466 sock_put(psock->sk_redir); 467 psock->sk_redir = NULL; 468 } 469 } 470 if (msg && 471 msg->sg.data[msg->sg.start].page_link && 472 msg->sg.data[msg->sg.start].length) { 473 if (eval == __SK_REDIRECT) 474 sk_mem_charge(sk, tosend - sent); 475 goto more_data; 476 } 477 } 478 return ret; 479 } 480 481 static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 482 { 483 struct sk_msg tmp, *msg_tx = NULL; 484 int copied = 0, err = 0; 485 struct sk_psock *psock; 486 long timeo; 487 int flags; 488 489 /* Don't let internal flags through */ 490 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); 491 flags |= MSG_NO_SHARED_FRAGS; 492 493 psock = sk_psock_get(sk); 494 if (unlikely(!psock)) 495 return tcp_sendmsg(sk, msg, size); 496 497 lock_sock(sk); 498 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 499 while (msg_data_left(msg)) { 500 bool enospc = false; 501 u32 copy, osize; 502 503 if (sk->sk_err) { 504 err = -sk->sk_err; 505 goto out_err; 506 } 507 508 copy = msg_data_left(msg); 509 if (!sk_stream_memory_free(sk)) 510 goto wait_for_sndbuf; 511 if (psock->cork) { 512 msg_tx = psock->cork; 513 } else { 514 msg_tx = &tmp; 515 sk_msg_init(msg_tx); 516 } 517 518 osize = msg_tx->sg.size; 519 err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); 520 if (err) { 521 if (err != -ENOSPC) 522 goto wait_for_memory; 523 enospc = true; 524 copy = msg_tx->sg.size - osize; 525 } 526 527 err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, 528 copy); 529 if (err < 0) { 530 sk_msg_trim(sk, msg_tx, osize); 531 goto out_err; 532 } 533 534 copied += copy; 535 if (psock->cork_bytes) { 536 if (size > psock->cork_bytes) 537 psock->cork_bytes = 0; 538 else 539 psock->cork_bytes -= size; 540 if (psock->cork_bytes && !enospc) 541 goto out_err; 542 /* All cork bytes are accounted, rerun the prog. */ 543 psock->eval = __SK_NONE; 544 psock->cork_bytes = 0; 545 } 546 547 err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); 548 if (unlikely(err < 0)) 549 goto out_err; 550 continue; 551 wait_for_sndbuf: 552 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 553 wait_for_memory: 554 err = sk_stream_wait_memory(sk, &timeo); 555 if (err) { 556 if (msg_tx && msg_tx != psock->cork) 557 sk_msg_free(sk, msg_tx); 558 goto out_err; 559 } 560 } 561 out_err: 562 if (err < 0) 563 err = sk_stream_error(sk, msg->msg_flags, err); 564 release_sock(sk); 565 sk_psock_put(sk, psock); 566 return copied ? copied : err; 567 } 568 569 enum { 570 TCP_BPF_IPV4, 571 TCP_BPF_IPV6, 572 TCP_BPF_NUM_PROTS, 573 }; 574 575 enum { 576 TCP_BPF_BASE, 577 TCP_BPF_TX, 578 TCP_BPF_RX, 579 TCP_BPF_TXRX, 580 TCP_BPF_NUM_CFGS, 581 }; 582 583 static struct proto *tcpv6_prot_saved __read_mostly; 584 static DEFINE_SPINLOCK(tcpv6_prot_lock); 585 static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; 586 587 static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], 588 struct proto *base) 589 { 590 prot[TCP_BPF_BASE] = *base; 591 prot[TCP_BPF_BASE].destroy = sock_map_destroy; 592 prot[TCP_BPF_BASE].close = sock_map_close; 593 prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; 594 prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; 595 596 prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; 597 prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; 598 599 prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; 600 prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; 601 602 prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX]; 603 prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser; 604 } 605 606 static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) 607 { 608 if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { 609 spin_lock_bh(&tcpv6_prot_lock); 610 if (likely(ops != tcpv6_prot_saved)) { 611 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); 612 smp_store_release(&tcpv6_prot_saved, ops); 613 } 614 spin_unlock_bh(&tcpv6_prot_lock); 615 } 616 } 617 618 static int __init tcp_bpf_v4_build_proto(void) 619 { 620 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); 621 return 0; 622 } 623 late_initcall(tcp_bpf_v4_build_proto); 624 625 static int tcp_bpf_assert_proto_ops(struct proto *ops) 626 { 627 /* In order to avoid retpoline, we make assumptions when we call 628 * into ops if e.g. a psock is not present. Make sure they are 629 * indeed valid assumptions. 630 */ 631 return ops->recvmsg == tcp_recvmsg && 632 ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP; 633 } 634 635 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) 636 { 637 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; 638 int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; 639 640 if (psock->progs.stream_verdict || psock->progs.skb_verdict) { 641 config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX; 642 } 643 644 if (restore) { 645 if (inet_csk_has_ulp(sk)) { 646 /* TLS does not have an unhash proto in SW cases, 647 * but we need to ensure we stop using the sock_map 648 * unhash routine because the associated psock is being 649 * removed. So use the original unhash handler. 650 */ 651 WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); 652 tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); 653 } else { 654 sk->sk_write_space = psock->saved_write_space; 655 /* Pairs with lockless read in sk_clone_lock() */ 656 sock_replace_proto(sk, psock->sk_proto); 657 } 658 return 0; 659 } 660 661 if (sk->sk_family == AF_INET6) { 662 if (tcp_bpf_assert_proto_ops(psock->sk_proto)) 663 return -EINVAL; 664 665 tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); 666 } 667 668 /* Pairs with lockless read in sk_clone_lock() */ 669 sock_replace_proto(sk, &tcp_bpf_prots[family][config]); 670 return 0; 671 } 672 EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); 673 674 /* If a child got cloned from a listening socket that had tcp_bpf 675 * protocol callbacks installed, we need to restore the callbacks to 676 * the default ones because the child does not inherit the psock state 677 * that tcp_bpf callbacks expect. 678 */ 679 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) 680 { 681 struct proto *prot = newsk->sk_prot; 682 683 if (is_insidevar(prot, tcp_bpf_prots)) 684 newsk->sk_prot = sk->sk_prot_creator; 685 } 686 #endif /* CONFIG_BPF_SYSCALL */ 687