1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/skmsg.h> 5 #include <linux/skbuff.h> 6 #include <linux/scatterlist.h> 7 8 #include <net/sock.h> 9 #include <net/tcp.h> 10 #include <net/tls.h> 11 12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) 13 { 14 if (msg->sg.end > msg->sg.start && 15 elem_first_coalesce < msg->sg.end) 16 return true; 17 18 if (msg->sg.end < msg->sg.start && 19 (elem_first_coalesce > msg->sg.start || 20 elem_first_coalesce < msg->sg.end)) 21 return true; 22 23 return false; 24 } 25 26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 27 int elem_first_coalesce) 28 { 29 struct page_frag *pfrag = sk_page_frag(sk); 30 int ret = 0; 31 32 len -= msg->sg.size; 33 while (len > 0) { 34 struct scatterlist *sge; 35 u32 orig_offset; 36 int use, i; 37 38 if (!sk_page_frag_refill(sk, pfrag)) 39 return -ENOMEM; 40 41 orig_offset = pfrag->offset; 42 use = min_t(int, len, pfrag->size - orig_offset); 43 if (!sk_wmem_schedule(sk, use)) 44 return -ENOMEM; 45 46 i = msg->sg.end; 47 sk_msg_iter_var_prev(i); 48 sge = &msg->sg.data[i]; 49 50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) && 51 sg_page(sge) == pfrag->page && 52 sge->offset + sge->length == orig_offset) { 53 sge->length += use; 54 } else { 55 if (sk_msg_full(msg)) { 56 ret = -ENOSPC; 57 break; 58 } 59 60 sge = &msg->sg.data[msg->sg.end]; 61 sg_unmark_end(sge); 62 sg_set_page(sge, pfrag->page, use, orig_offset); 63 get_page(pfrag->page); 64 sk_msg_iter_next(msg, end); 65 } 66 67 sk_mem_charge(sk, use); 68 msg->sg.size += use; 69 pfrag->offset += use; 70 len -= use; 71 } 72 73 return ret; 74 } 75 EXPORT_SYMBOL_GPL(sk_msg_alloc); 76 77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 78 u32 off, u32 len) 79 { 80 int i = src->sg.start; 81 struct scatterlist *sge = sk_msg_elem(src, i); 82 struct scatterlist *sgd = NULL; 83 u32 sge_len, sge_off; 84 85 while (off) { 86 if (sge->length > off) 87 break; 88 off -= sge->length; 89 sk_msg_iter_var_next(i); 90 if (i == src->sg.end && off) 91 return -ENOSPC; 92 sge = sk_msg_elem(src, i); 93 } 94 95 while (len) { 96 sge_len = sge->length - off; 97 if (sge_len > len) 98 sge_len = len; 99 100 if (dst->sg.end) 101 sgd = sk_msg_elem(dst, dst->sg.end - 1); 102 103 if (sgd && 104 (sg_page(sge) == sg_page(sgd)) && 105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) { 106 sgd->length += sge_len; 107 dst->sg.size += sge_len; 108 } else if (!sk_msg_full(dst)) { 109 sge_off = sge->offset + off; 110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off); 111 } else { 112 return -ENOSPC; 113 } 114 115 off = 0; 116 len -= sge_len; 117 sk_mem_charge(sk, sge_len); 118 sk_msg_iter_var_next(i); 119 if (i == src->sg.end && len) 120 return -ENOSPC; 121 sge = sk_msg_elem(src, i); 122 } 123 124 return 0; 125 } 126 EXPORT_SYMBOL_GPL(sk_msg_clone); 127 128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes) 129 { 130 int i = msg->sg.start; 131 132 do { 133 struct scatterlist *sge = sk_msg_elem(msg, i); 134 135 if (bytes < sge->length) { 136 sge->length -= bytes; 137 sge->offset += bytes; 138 sk_mem_uncharge(sk, bytes); 139 break; 140 } 141 142 sk_mem_uncharge(sk, sge->length); 143 bytes -= sge->length; 144 sge->length = 0; 145 sge->offset = 0; 146 sk_msg_iter_var_next(i); 147 } while (bytes && i != msg->sg.end); 148 msg->sg.start = i; 149 } 150 EXPORT_SYMBOL_GPL(sk_msg_return_zero); 151 152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes) 153 { 154 int i = msg->sg.start; 155 156 do { 157 struct scatterlist *sge = &msg->sg.data[i]; 158 int uncharge = (bytes < sge->length) ? bytes : sge->length; 159 160 sk_mem_uncharge(sk, uncharge); 161 bytes -= uncharge; 162 sk_msg_iter_var_next(i); 163 } while (i != msg->sg.end); 164 } 165 EXPORT_SYMBOL_GPL(sk_msg_return); 166 167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i, 168 bool charge) 169 { 170 struct scatterlist *sge = sk_msg_elem(msg, i); 171 u32 len = sge->length; 172 173 /* When the skb owns the memory we free it from consume_skb path. */ 174 if (!msg->skb) { 175 if (charge) 176 sk_mem_uncharge(sk, len); 177 put_page(sg_page(sge)); 178 } 179 memset(sge, 0, sizeof(*sge)); 180 return len; 181 } 182 183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i, 184 bool charge) 185 { 186 struct scatterlist *sge = sk_msg_elem(msg, i); 187 int freed = 0; 188 189 while (msg->sg.size) { 190 msg->sg.size -= sge->length; 191 freed += sk_msg_free_elem(sk, msg, i, charge); 192 sk_msg_iter_var_next(i); 193 sk_msg_check_to_free(msg, i, msg->sg.size); 194 sge = sk_msg_elem(msg, i); 195 } 196 consume_skb(msg->skb); 197 sk_msg_init(msg); 198 return freed; 199 } 200 201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg) 202 { 203 return __sk_msg_free(sk, msg, msg->sg.start, false); 204 } 205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge); 206 207 int sk_msg_free(struct sock *sk, struct sk_msg *msg) 208 { 209 return __sk_msg_free(sk, msg, msg->sg.start, true); 210 } 211 EXPORT_SYMBOL_GPL(sk_msg_free); 212 213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, 214 u32 bytes, bool charge) 215 { 216 struct scatterlist *sge; 217 u32 i = msg->sg.start; 218 219 while (bytes) { 220 sge = sk_msg_elem(msg, i); 221 if (!sge->length) 222 break; 223 if (bytes < sge->length) { 224 if (charge) 225 sk_mem_uncharge(sk, bytes); 226 sge->length -= bytes; 227 sge->offset += bytes; 228 msg->sg.size -= bytes; 229 break; 230 } 231 232 msg->sg.size -= sge->length; 233 bytes -= sge->length; 234 sk_msg_free_elem(sk, msg, i, charge); 235 sk_msg_iter_var_next(i); 236 sk_msg_check_to_free(msg, i, bytes); 237 } 238 msg->sg.start = i; 239 } 240 241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes) 242 { 243 __sk_msg_free_partial(sk, msg, bytes, true); 244 } 245 EXPORT_SYMBOL_GPL(sk_msg_free_partial); 246 247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 248 u32 bytes) 249 { 250 __sk_msg_free_partial(sk, msg, bytes, false); 251 } 252 253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len) 254 { 255 int trim = msg->sg.size - len; 256 u32 i = msg->sg.end; 257 258 if (trim <= 0) { 259 WARN_ON(trim < 0); 260 return; 261 } 262 263 sk_msg_iter_var_prev(i); 264 msg->sg.size = len; 265 while (msg->sg.data[i].length && 266 trim >= msg->sg.data[i].length) { 267 trim -= msg->sg.data[i].length; 268 sk_msg_free_elem(sk, msg, i, true); 269 sk_msg_iter_var_prev(i); 270 if (!trim) 271 goto out; 272 } 273 274 msg->sg.data[i].length -= trim; 275 sk_mem_uncharge(sk, trim); 276 /* Adjust copybreak if it falls into the trimmed part of last buf */ 277 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length) 278 msg->sg.copybreak = msg->sg.data[i].length; 279 out: 280 sk_msg_iter_var_next(i); 281 msg->sg.end = i; 282 283 /* If we trim data a full sg elem before curr pointer update 284 * copybreak and current so that any future copy operations 285 * start at new copy location. 286 * However trimed data that has not yet been used in a copy op 287 * does not require an update. 288 */ 289 if (!msg->sg.size) { 290 msg->sg.curr = msg->sg.start; 291 msg->sg.copybreak = 0; 292 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >= 293 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) { 294 sk_msg_iter_var_prev(i); 295 msg->sg.curr = i; 296 msg->sg.copybreak = msg->sg.data[i].length; 297 } 298 } 299 EXPORT_SYMBOL_GPL(sk_msg_trim); 300 301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 302 struct sk_msg *msg, u32 bytes) 303 { 304 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg); 305 const int to_max_pages = MAX_MSG_FRAGS; 306 struct page *pages[MAX_MSG_FRAGS]; 307 ssize_t orig, copied, use, offset; 308 309 orig = msg->sg.size; 310 while (bytes > 0) { 311 i = 0; 312 maxpages = to_max_pages - num_elems; 313 if (maxpages == 0) { 314 ret = -EFAULT; 315 goto out; 316 } 317 318 copied = iov_iter_get_pages(from, pages, bytes, maxpages, 319 &offset); 320 if (copied <= 0) { 321 ret = -EFAULT; 322 goto out; 323 } 324 325 iov_iter_advance(from, copied); 326 bytes -= copied; 327 msg->sg.size += copied; 328 329 while (copied) { 330 use = min_t(int, copied, PAGE_SIZE - offset); 331 sg_set_page(&msg->sg.data[msg->sg.end], 332 pages[i], use, offset); 333 sg_unmark_end(&msg->sg.data[msg->sg.end]); 334 sk_mem_charge(sk, use); 335 336 offset = 0; 337 copied -= use; 338 sk_msg_iter_next(msg, end); 339 num_elems++; 340 i++; 341 } 342 /* When zerocopy is mixed with sk_msg_*copy* operations we 343 * may have a copybreak set in this case clear and prefer 344 * zerocopy remainder when possible. 345 */ 346 msg->sg.copybreak = 0; 347 msg->sg.curr = msg->sg.end; 348 } 349 out: 350 /* Revert iov_iter updates, msg will need to use 'trim' later if it 351 * also needs to be cleared. 352 */ 353 if (ret) 354 iov_iter_revert(from, msg->sg.size - orig); 355 return ret; 356 } 357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter); 358 359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 360 struct sk_msg *msg, u32 bytes) 361 { 362 int ret = -ENOSPC, i = msg->sg.curr; 363 struct scatterlist *sge; 364 u32 copy, buf_size; 365 void *to; 366 367 do { 368 sge = sk_msg_elem(msg, i); 369 /* This is possible if a trim operation shrunk the buffer */ 370 if (msg->sg.copybreak >= sge->length) { 371 msg->sg.copybreak = 0; 372 sk_msg_iter_var_next(i); 373 if (i == msg->sg.end) 374 break; 375 sge = sk_msg_elem(msg, i); 376 } 377 378 buf_size = sge->length - msg->sg.copybreak; 379 copy = (buf_size > bytes) ? bytes : buf_size; 380 to = sg_virt(sge) + msg->sg.copybreak; 381 msg->sg.copybreak += copy; 382 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) 383 ret = copy_from_iter_nocache(to, copy, from); 384 else 385 ret = copy_from_iter(to, copy, from); 386 if (ret != copy) { 387 ret = -EFAULT; 388 goto out; 389 } 390 bytes -= copy; 391 if (!bytes) 392 break; 393 msg->sg.copybreak = 0; 394 sk_msg_iter_var_next(i); 395 } while (i != msg->sg.end); 396 out: 397 msg->sg.curr = i; 398 return ret; 399 } 400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); 401 402 int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo) 403 { 404 DEFINE_WAIT_FUNC(wait, woken_wake_function); 405 int ret = 0; 406 407 if (sk->sk_shutdown & RCV_SHUTDOWN) 408 return 1; 409 410 if (!timeo) 411 return ret; 412 413 add_wait_queue(sk_sleep(sk), &wait); 414 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 415 ret = sk_wait_event(sk, &timeo, 416 !list_empty(&psock->ingress_msg) || 417 !skb_queue_empty(&sk->sk_receive_queue), &wait); 418 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 419 remove_wait_queue(sk_sleep(sk), &wait); 420 return ret; 421 } 422 EXPORT_SYMBOL_GPL(sk_msg_wait_data); 423 424 /* Receive sk_msg from psock->ingress_msg to @msg. */ 425 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 426 int len, int flags) 427 { 428 struct iov_iter *iter = &msg->msg_iter; 429 int peek = flags & MSG_PEEK; 430 struct sk_msg *msg_rx; 431 int i, copied = 0; 432 433 msg_rx = sk_psock_peek_msg(psock); 434 while (copied != len) { 435 struct scatterlist *sge; 436 437 if (unlikely(!msg_rx)) 438 break; 439 440 i = msg_rx->sg.start; 441 do { 442 struct page *page; 443 int copy; 444 445 sge = sk_msg_elem(msg_rx, i); 446 copy = sge->length; 447 page = sg_page(sge); 448 if (copied + copy > len) 449 copy = len - copied; 450 copy = copy_page_to_iter(page, sge->offset, copy, iter); 451 if (!copy) 452 return copied ? copied : -EFAULT; 453 454 copied += copy; 455 if (likely(!peek)) { 456 sge->offset += copy; 457 sge->length -= copy; 458 if (!msg_rx->skb) 459 sk_mem_uncharge(sk, copy); 460 msg_rx->sg.size -= copy; 461 462 if (!sge->length) { 463 sk_msg_iter_var_next(i); 464 if (!msg_rx->skb) 465 put_page(page); 466 } 467 } else { 468 /* Lets not optimize peek case if copy_page_to_iter 469 * didn't copy the entire length lets just break. 470 */ 471 if (copy != sge->length) 472 return copied; 473 sk_msg_iter_var_next(i); 474 } 475 476 if (copied == len) 477 break; 478 } while (i != msg_rx->sg.end); 479 480 if (unlikely(peek)) { 481 msg_rx = sk_psock_next_msg(psock, msg_rx); 482 if (!msg_rx) 483 break; 484 continue; 485 } 486 487 msg_rx->sg.start = i; 488 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) { 489 msg_rx = sk_psock_dequeue_msg(psock); 490 kfree_sk_msg(msg_rx); 491 } 492 msg_rx = sk_psock_peek_msg(psock); 493 } 494 495 return copied; 496 } 497 EXPORT_SYMBOL_GPL(sk_msg_recvmsg); 498 499 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, 500 struct sk_buff *skb) 501 { 502 struct sk_msg *msg; 503 504 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 505 return NULL; 506 507 if (!sk_rmem_schedule(sk, skb, skb->truesize)) 508 return NULL; 509 510 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL); 511 if (unlikely(!msg)) 512 return NULL; 513 514 sk_msg_init(msg); 515 return msg; 516 } 517 518 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, 519 struct sk_psock *psock, 520 struct sock *sk, 521 struct sk_msg *msg) 522 { 523 int num_sge, copied; 524 525 /* skb linearize may fail with ENOMEM, but lets simply try again 526 * later if this happens. Under memory pressure we don't want to 527 * drop the skb. We need to linearize the skb so that the mapping 528 * in skb_to_sgvec can not error. 529 */ 530 if (skb_linearize(skb)) 531 return -EAGAIN; 532 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); 533 if (unlikely(num_sge < 0)) { 534 kfree(msg); 535 return num_sge; 536 } 537 538 copied = skb->len; 539 msg->sg.start = 0; 540 msg->sg.size = copied; 541 msg->sg.end = num_sge; 542 msg->skb = skb; 543 544 sk_psock_queue_msg(psock, msg); 545 sk_psock_data_ready(sk, psock); 546 return copied; 547 } 548 549 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb); 550 551 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) 552 { 553 struct sock *sk = psock->sk; 554 struct sk_msg *msg; 555 556 /* If we are receiving on the same sock skb->sk is already assigned, 557 * skip memory accounting and owner transition seeing it already set 558 * correctly. 559 */ 560 if (unlikely(skb->sk == sk)) 561 return sk_psock_skb_ingress_self(psock, skb); 562 msg = sk_psock_create_ingress_msg(sk, skb); 563 if (!msg) 564 return -EAGAIN; 565 566 /* This will transition ownership of the data from the socket where 567 * the BPF program was run initiating the redirect to the socket 568 * we will eventually receive this data on. The data will be released 569 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied 570 * into user buffers. 571 */ 572 skb_set_owner_r(skb, sk); 573 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); 574 } 575 576 /* Puts an skb on the ingress queue of the socket already assigned to the 577 * skb. In this case we do not need to check memory limits or skb_set_owner_r 578 * because the skb is already accounted for here. 579 */ 580 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb) 581 { 582 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); 583 struct sock *sk = psock->sk; 584 585 if (unlikely(!msg)) 586 return -EAGAIN; 587 sk_msg_init(msg); 588 skb_set_owner_r(skb, sk); 589 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); 590 } 591 592 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, 593 u32 off, u32 len, bool ingress) 594 { 595 if (!ingress) { 596 if (!sock_writeable(psock->sk)) 597 return -EAGAIN; 598 return skb_send_sock(psock->sk, skb, off, len); 599 } 600 return sk_psock_skb_ingress(psock, skb); 601 } 602 603 static void sk_psock_backlog(struct work_struct *work) 604 { 605 struct sk_psock *psock = container_of(work, struct sk_psock, work); 606 struct sk_psock_work_state *state = &psock->work_state; 607 struct sk_buff *skb; 608 bool ingress; 609 u32 len, off; 610 int ret; 611 612 mutex_lock(&psock->work_mutex); 613 if (state->skb) { 614 skb = state->skb; 615 len = state->len; 616 off = state->off; 617 state->skb = NULL; 618 goto start; 619 } 620 621 while ((skb = skb_dequeue(&psock->ingress_skb))) { 622 len = skb->len; 623 off = 0; 624 start: 625 ingress = skb_bpf_ingress(skb); 626 skb_bpf_redirect_clear(skb); 627 do { 628 ret = -EIO; 629 if (!sock_flag(psock->sk, SOCK_DEAD)) 630 ret = sk_psock_handle_skb(psock, skb, off, 631 len, ingress); 632 if (ret <= 0) { 633 if (ret == -EAGAIN) { 634 state->skb = skb; 635 state->len = len; 636 state->off = off; 637 goto end; 638 } 639 /* Hard errors break pipe and stop xmit. */ 640 sk_psock_report_error(psock, ret ? -ret : EPIPE); 641 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); 642 kfree_skb(skb); 643 goto end; 644 } 645 off += ret; 646 len -= ret; 647 } while (len); 648 649 if (!ingress) 650 kfree_skb(skb); 651 } 652 end: 653 mutex_unlock(&psock->work_mutex); 654 } 655 656 struct sk_psock *sk_psock_init(struct sock *sk, int node) 657 { 658 struct sk_psock *psock; 659 struct proto *prot; 660 661 write_lock_bh(&sk->sk_callback_lock); 662 663 if (sk->sk_user_data) { 664 psock = ERR_PTR(-EBUSY); 665 goto out; 666 } 667 668 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); 669 if (!psock) { 670 psock = ERR_PTR(-ENOMEM); 671 goto out; 672 } 673 674 prot = READ_ONCE(sk->sk_prot); 675 psock->sk = sk; 676 psock->eval = __SK_NONE; 677 psock->sk_proto = prot; 678 psock->saved_unhash = prot->unhash; 679 psock->saved_close = prot->close; 680 psock->saved_write_space = sk->sk_write_space; 681 682 INIT_LIST_HEAD(&psock->link); 683 spin_lock_init(&psock->link_lock); 684 685 INIT_WORK(&psock->work, sk_psock_backlog); 686 mutex_init(&psock->work_mutex); 687 INIT_LIST_HEAD(&psock->ingress_msg); 688 spin_lock_init(&psock->ingress_lock); 689 skb_queue_head_init(&psock->ingress_skb); 690 691 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); 692 refcount_set(&psock->refcnt, 1); 693 694 rcu_assign_sk_user_data_nocopy(sk, psock); 695 sock_hold(sk); 696 697 out: 698 write_unlock_bh(&sk->sk_callback_lock); 699 return psock; 700 } 701 EXPORT_SYMBOL_GPL(sk_psock_init); 702 703 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock) 704 { 705 struct sk_psock_link *link; 706 707 spin_lock_bh(&psock->link_lock); 708 link = list_first_entry_or_null(&psock->link, struct sk_psock_link, 709 list); 710 if (link) 711 list_del(&link->list); 712 spin_unlock_bh(&psock->link_lock); 713 return link; 714 } 715 716 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) 717 { 718 struct sk_msg *msg, *tmp; 719 720 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { 721 list_del(&msg->list); 722 sk_msg_free(psock->sk, msg); 723 kfree(msg); 724 } 725 } 726 727 static void __sk_psock_zap_ingress(struct sk_psock *psock) 728 { 729 struct sk_buff *skb; 730 731 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { 732 skb_bpf_redirect_clear(skb); 733 kfree_skb(skb); 734 } 735 __sk_psock_purge_ingress_msg(psock); 736 } 737 738 static void sk_psock_link_destroy(struct sk_psock *psock) 739 { 740 struct sk_psock_link *link, *tmp; 741 742 list_for_each_entry_safe(link, tmp, &psock->link, list) { 743 list_del(&link->list); 744 sk_psock_free_link(link); 745 } 746 } 747 748 void sk_psock_stop(struct sk_psock *psock, bool wait) 749 { 750 spin_lock_bh(&psock->ingress_lock); 751 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); 752 sk_psock_cork_free(psock); 753 __sk_psock_zap_ingress(psock); 754 spin_unlock_bh(&psock->ingress_lock); 755 756 if (wait) 757 cancel_work_sync(&psock->work); 758 } 759 760 static void sk_psock_done_strp(struct sk_psock *psock); 761 762 static void sk_psock_destroy(struct work_struct *work) 763 { 764 struct sk_psock *psock = container_of(to_rcu_work(work), 765 struct sk_psock, rwork); 766 /* No sk_callback_lock since already detached. */ 767 768 sk_psock_done_strp(psock); 769 770 cancel_work_sync(&psock->work); 771 mutex_destroy(&psock->work_mutex); 772 773 psock_progs_drop(&psock->progs); 774 775 sk_psock_link_destroy(psock); 776 sk_psock_cork_free(psock); 777 778 if (psock->sk_redir) 779 sock_put(psock->sk_redir); 780 sock_put(psock->sk); 781 kfree(psock); 782 } 783 784 void sk_psock_drop(struct sock *sk, struct sk_psock *psock) 785 { 786 sk_psock_stop(psock, false); 787 788 write_lock_bh(&sk->sk_callback_lock); 789 sk_psock_restore_proto(sk, psock); 790 rcu_assign_sk_user_data(sk, NULL); 791 if (psock->progs.stream_parser) 792 sk_psock_stop_strp(sk, psock); 793 else if (psock->progs.stream_verdict || psock->progs.skb_verdict) 794 sk_psock_stop_verdict(sk, psock); 795 write_unlock_bh(&sk->sk_callback_lock); 796 797 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); 798 queue_rcu_work(system_wq, &psock->rwork); 799 } 800 EXPORT_SYMBOL_GPL(sk_psock_drop); 801 802 static int sk_psock_map_verd(int verdict, bool redir) 803 { 804 switch (verdict) { 805 case SK_PASS: 806 return redir ? __SK_REDIRECT : __SK_PASS; 807 case SK_DROP: 808 default: 809 break; 810 } 811 812 return __SK_DROP; 813 } 814 815 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 816 struct sk_msg *msg) 817 { 818 struct bpf_prog *prog; 819 int ret; 820 821 rcu_read_lock(); 822 prog = READ_ONCE(psock->progs.msg_parser); 823 if (unlikely(!prog)) { 824 ret = __SK_PASS; 825 goto out; 826 } 827 828 sk_msg_compute_data_pointers(msg); 829 msg->sk = sk; 830 ret = bpf_prog_run_pin_on_cpu(prog, msg); 831 ret = sk_psock_map_verd(ret, msg->sk_redir); 832 psock->apply_bytes = msg->apply_bytes; 833 if (ret == __SK_REDIRECT) { 834 if (psock->sk_redir) 835 sock_put(psock->sk_redir); 836 psock->sk_redir = msg->sk_redir; 837 if (!psock->sk_redir) { 838 ret = __SK_DROP; 839 goto out; 840 } 841 sock_hold(psock->sk_redir); 842 } 843 out: 844 rcu_read_unlock(); 845 return ret; 846 } 847 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); 848 849 static void sk_psock_skb_redirect(struct sk_buff *skb) 850 { 851 struct sk_psock *psock_other; 852 struct sock *sk_other; 853 854 sk_other = skb_bpf_redirect_fetch(skb); 855 /* This error is a buggy BPF program, it returned a redirect 856 * return code, but then didn't set a redirect interface. 857 */ 858 if (unlikely(!sk_other)) { 859 kfree_skb(skb); 860 return; 861 } 862 psock_other = sk_psock(sk_other); 863 /* This error indicates the socket is being torn down or had another 864 * error that caused the pipe to break. We can't send a packet on 865 * a socket that is in this state so we drop the skb. 866 */ 867 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) { 868 kfree_skb(skb); 869 return; 870 } 871 spin_lock_bh(&psock_other->ingress_lock); 872 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { 873 spin_unlock_bh(&psock_other->ingress_lock); 874 kfree_skb(skb); 875 return; 876 } 877 878 skb_queue_tail(&psock_other->ingress_skb, skb); 879 schedule_work(&psock_other->work); 880 spin_unlock_bh(&psock_other->ingress_lock); 881 } 882 883 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict) 884 { 885 switch (verdict) { 886 case __SK_REDIRECT: 887 sk_psock_skb_redirect(skb); 888 break; 889 case __SK_PASS: 890 case __SK_DROP: 891 default: 892 break; 893 } 894 } 895 896 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) 897 { 898 struct bpf_prog *prog; 899 int ret = __SK_PASS; 900 901 rcu_read_lock(); 902 prog = READ_ONCE(psock->progs.stream_verdict); 903 if (likely(prog)) { 904 skb->sk = psock->sk; 905 skb_dst_drop(skb); 906 skb_bpf_redirect_clear(skb); 907 ret = bpf_prog_run_pin_on_cpu(prog, skb); 908 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 909 skb->sk = NULL; 910 } 911 sk_psock_tls_verdict_apply(skb, psock->sk, ret); 912 rcu_read_unlock(); 913 return ret; 914 } 915 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); 916 917 static void sk_psock_verdict_apply(struct sk_psock *psock, 918 struct sk_buff *skb, int verdict) 919 { 920 struct sock *sk_other; 921 int err = -EIO; 922 923 switch (verdict) { 924 case __SK_PASS: 925 sk_other = psock->sk; 926 if (sock_flag(sk_other, SOCK_DEAD) || 927 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 928 goto out_free; 929 } 930 931 skb_bpf_set_ingress(skb); 932 933 /* If the queue is empty then we can submit directly 934 * into the msg queue. If its not empty we have to 935 * queue work otherwise we may get OOO data. Otherwise, 936 * if sk_psock_skb_ingress errors will be handled by 937 * retrying later from workqueue. 938 */ 939 if (skb_queue_empty(&psock->ingress_skb)) { 940 err = sk_psock_skb_ingress_self(psock, skb); 941 } 942 if (err < 0) { 943 spin_lock_bh(&psock->ingress_lock); 944 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 945 skb_queue_tail(&psock->ingress_skb, skb); 946 schedule_work(&psock->work); 947 } 948 spin_unlock_bh(&psock->ingress_lock); 949 } 950 break; 951 case __SK_REDIRECT: 952 sk_psock_skb_redirect(skb); 953 break; 954 case __SK_DROP: 955 default: 956 out_free: 957 kfree_skb(skb); 958 } 959 } 960 961 static void sk_psock_write_space(struct sock *sk) 962 { 963 struct sk_psock *psock; 964 void (*write_space)(struct sock *sk) = NULL; 965 966 rcu_read_lock(); 967 psock = sk_psock(sk); 968 if (likely(psock)) { 969 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 970 schedule_work(&psock->work); 971 write_space = psock->saved_write_space; 972 } 973 rcu_read_unlock(); 974 if (write_space) 975 write_space(sk); 976 } 977 978 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 979 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) 980 { 981 struct sk_psock *psock; 982 struct bpf_prog *prog; 983 int ret = __SK_DROP; 984 struct sock *sk; 985 986 rcu_read_lock(); 987 sk = strp->sk; 988 psock = sk_psock(sk); 989 if (unlikely(!psock)) { 990 kfree_skb(skb); 991 goto out; 992 } 993 prog = READ_ONCE(psock->progs.stream_verdict); 994 if (likely(prog)) { 995 skb->sk = sk; 996 skb_dst_drop(skb); 997 skb_bpf_redirect_clear(skb); 998 ret = bpf_prog_run_pin_on_cpu(prog, skb); 999 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 1000 skb->sk = NULL; 1001 } 1002 sk_psock_verdict_apply(psock, skb, ret); 1003 out: 1004 rcu_read_unlock(); 1005 } 1006 1007 static int sk_psock_strp_read_done(struct strparser *strp, int err) 1008 { 1009 return err; 1010 } 1011 1012 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb) 1013 { 1014 struct sk_psock *psock = container_of(strp, struct sk_psock, strp); 1015 struct bpf_prog *prog; 1016 int ret = skb->len; 1017 1018 rcu_read_lock(); 1019 prog = READ_ONCE(psock->progs.stream_parser); 1020 if (likely(prog)) { 1021 skb->sk = psock->sk; 1022 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1023 skb->sk = NULL; 1024 } 1025 rcu_read_unlock(); 1026 return ret; 1027 } 1028 1029 /* Called with socket lock held. */ 1030 static void sk_psock_strp_data_ready(struct sock *sk) 1031 { 1032 struct sk_psock *psock; 1033 1034 rcu_read_lock(); 1035 psock = sk_psock(sk); 1036 if (likely(psock)) { 1037 if (tls_sw_has_ctx_rx(sk)) { 1038 psock->saved_data_ready(sk); 1039 } else { 1040 write_lock_bh(&sk->sk_callback_lock); 1041 strp_data_ready(&psock->strp); 1042 write_unlock_bh(&sk->sk_callback_lock); 1043 } 1044 } 1045 rcu_read_unlock(); 1046 } 1047 1048 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) 1049 { 1050 static const struct strp_callbacks cb = { 1051 .rcv_msg = sk_psock_strp_read, 1052 .read_sock_done = sk_psock_strp_read_done, 1053 .parse_msg = sk_psock_strp_parse, 1054 }; 1055 1056 return strp_init(&psock->strp, sk, &cb); 1057 } 1058 1059 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) 1060 { 1061 if (psock->saved_data_ready) 1062 return; 1063 1064 psock->saved_data_ready = sk->sk_data_ready; 1065 sk->sk_data_ready = sk_psock_strp_data_ready; 1066 sk->sk_write_space = sk_psock_write_space; 1067 } 1068 1069 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) 1070 { 1071 if (!psock->saved_data_ready) 1072 return; 1073 1074 sk->sk_data_ready = psock->saved_data_ready; 1075 psock->saved_data_ready = NULL; 1076 strp_stop(&psock->strp); 1077 } 1078 1079 static void sk_psock_done_strp(struct sk_psock *psock) 1080 { 1081 /* Parser has been stopped */ 1082 if (psock->progs.stream_parser) 1083 strp_done(&psock->strp); 1084 } 1085 #else 1086 static void sk_psock_done_strp(struct sk_psock *psock) 1087 { 1088 } 1089 #endif /* CONFIG_BPF_STREAM_PARSER */ 1090 1091 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb, 1092 unsigned int offset, size_t orig_len) 1093 { 1094 struct sock *sk = (struct sock *)desc->arg.data; 1095 struct sk_psock *psock; 1096 struct bpf_prog *prog; 1097 int ret = __SK_DROP; 1098 int len = skb->len; 1099 1100 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */ 1101 skb = skb_clone(skb, GFP_ATOMIC); 1102 if (!skb) { 1103 desc->error = -ENOMEM; 1104 return 0; 1105 } 1106 1107 rcu_read_lock(); 1108 psock = sk_psock(sk); 1109 if (unlikely(!psock)) { 1110 len = 0; 1111 kfree_skb(skb); 1112 goto out; 1113 } 1114 prog = READ_ONCE(psock->progs.stream_verdict); 1115 if (!prog) 1116 prog = READ_ONCE(psock->progs.skb_verdict); 1117 if (likely(prog)) { 1118 skb->sk = sk; 1119 skb_dst_drop(skb); 1120 skb_bpf_redirect_clear(skb); 1121 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1122 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 1123 skb->sk = NULL; 1124 } 1125 sk_psock_verdict_apply(psock, skb, ret); 1126 out: 1127 rcu_read_unlock(); 1128 return len; 1129 } 1130 1131 static void sk_psock_verdict_data_ready(struct sock *sk) 1132 { 1133 struct socket *sock = sk->sk_socket; 1134 read_descriptor_t desc; 1135 1136 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock)) 1137 return; 1138 1139 desc.arg.data = sk; 1140 desc.error = 0; 1141 desc.count = 1; 1142 1143 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv); 1144 } 1145 1146 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) 1147 { 1148 if (psock->saved_data_ready) 1149 return; 1150 1151 psock->saved_data_ready = sk->sk_data_ready; 1152 sk->sk_data_ready = sk_psock_verdict_data_ready; 1153 sk->sk_write_space = sk_psock_write_space; 1154 } 1155 1156 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) 1157 { 1158 if (!psock->saved_data_ready) 1159 return; 1160 1161 sk->sk_data_ready = psock->saved_data_ready; 1162 psock->saved_data_ready = NULL; 1163 } 1164