1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/skmsg.h> 5 #include <linux/skbuff.h> 6 #include <linux/scatterlist.h> 7 8 #include <net/sock.h> 9 #include <net/tcp.h> 10 #include <net/tls.h> 11 #include <trace/events/sock.h> 12 13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) 14 { 15 if (msg->sg.end > msg->sg.start && 16 elem_first_coalesce < msg->sg.end) 17 return true; 18 19 if (msg->sg.end < msg->sg.start && 20 (elem_first_coalesce > msg->sg.start || 21 elem_first_coalesce < msg->sg.end)) 22 return true; 23 24 return false; 25 } 26 27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 28 int elem_first_coalesce) 29 { 30 struct page_frag *pfrag = sk_page_frag(sk); 31 u32 osize = msg->sg.size; 32 int ret = 0; 33 34 len -= msg->sg.size; 35 while (len > 0) { 36 struct scatterlist *sge; 37 u32 orig_offset; 38 int use, i; 39 40 if (!sk_page_frag_refill(sk, pfrag)) { 41 ret = -ENOMEM; 42 goto msg_trim; 43 } 44 45 orig_offset = pfrag->offset; 46 use = min_t(int, len, pfrag->size - orig_offset); 47 if (!sk_wmem_schedule(sk, use)) { 48 ret = -ENOMEM; 49 goto msg_trim; 50 } 51 52 i = msg->sg.end; 53 sk_msg_iter_var_prev(i); 54 sge = &msg->sg.data[i]; 55 56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) && 57 sg_page(sge) == pfrag->page && 58 sge->offset + sge->length == orig_offset) { 59 sge->length += use; 60 } else { 61 if (sk_msg_full(msg)) { 62 ret = -ENOSPC; 63 break; 64 } 65 66 sge = &msg->sg.data[msg->sg.end]; 67 sg_unmark_end(sge); 68 sg_set_page(sge, pfrag->page, use, orig_offset); 69 get_page(pfrag->page); 70 sk_msg_iter_next(msg, end); 71 } 72 73 sk_mem_charge(sk, use); 74 msg->sg.size += use; 75 pfrag->offset += use; 76 len -= use; 77 } 78 79 return ret; 80 81 msg_trim: 82 sk_msg_trim(sk, msg, osize); 83 return ret; 84 } 85 EXPORT_SYMBOL_GPL(sk_msg_alloc); 86 87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 88 u32 off, u32 len) 89 { 90 int i = src->sg.start; 91 struct scatterlist *sge = sk_msg_elem(src, i); 92 struct scatterlist *sgd = NULL; 93 u32 sge_len, sge_off; 94 95 while (off) { 96 if (sge->length > off) 97 break; 98 off -= sge->length; 99 sk_msg_iter_var_next(i); 100 if (i == src->sg.end && off) 101 return -ENOSPC; 102 sge = sk_msg_elem(src, i); 103 } 104 105 while (len) { 106 sge_len = sge->length - off; 107 if (sge_len > len) 108 sge_len = len; 109 110 if (dst->sg.end) 111 sgd = sk_msg_elem(dst, dst->sg.end - 1); 112 113 if (sgd && 114 (sg_page(sge) == sg_page(sgd)) && 115 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) { 116 sgd->length += sge_len; 117 dst->sg.size += sge_len; 118 } else if (!sk_msg_full(dst)) { 119 sge_off = sge->offset + off; 120 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off); 121 } else { 122 return -ENOSPC; 123 } 124 125 off = 0; 126 len -= sge_len; 127 sk_mem_charge(sk, sge_len); 128 sk_msg_iter_var_next(i); 129 if (i == src->sg.end && len) 130 return -ENOSPC; 131 sge = sk_msg_elem(src, i); 132 } 133 134 return 0; 135 } 136 EXPORT_SYMBOL_GPL(sk_msg_clone); 137 138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes) 139 { 140 int i = msg->sg.start; 141 142 do { 143 struct scatterlist *sge = sk_msg_elem(msg, i); 144 145 if (bytes < sge->length) { 146 sge->length -= bytes; 147 sge->offset += bytes; 148 sk_mem_uncharge(sk, bytes); 149 break; 150 } 151 152 sk_mem_uncharge(sk, sge->length); 153 bytes -= sge->length; 154 sge->length = 0; 155 sge->offset = 0; 156 sk_msg_iter_var_next(i); 157 } while (bytes && i != msg->sg.end); 158 msg->sg.start = i; 159 } 160 EXPORT_SYMBOL_GPL(sk_msg_return_zero); 161 162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes) 163 { 164 int i = msg->sg.start; 165 166 do { 167 struct scatterlist *sge = &msg->sg.data[i]; 168 int uncharge = (bytes < sge->length) ? bytes : sge->length; 169 170 sk_mem_uncharge(sk, uncharge); 171 bytes -= uncharge; 172 sk_msg_iter_var_next(i); 173 } while (i != msg->sg.end); 174 } 175 EXPORT_SYMBOL_GPL(sk_msg_return); 176 177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i, 178 bool charge) 179 { 180 struct scatterlist *sge = sk_msg_elem(msg, i); 181 u32 len = sge->length; 182 183 /* When the skb owns the memory we free it from consume_skb path. */ 184 if (!msg->skb) { 185 if (charge) 186 sk_mem_uncharge(sk, len); 187 put_page(sg_page(sge)); 188 } 189 memset(sge, 0, sizeof(*sge)); 190 return len; 191 } 192 193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i, 194 bool charge) 195 { 196 struct scatterlist *sge = sk_msg_elem(msg, i); 197 int freed = 0; 198 199 while (msg->sg.size) { 200 msg->sg.size -= sge->length; 201 freed += sk_msg_free_elem(sk, msg, i, charge); 202 sk_msg_iter_var_next(i); 203 sk_msg_check_to_free(msg, i, msg->sg.size); 204 sge = sk_msg_elem(msg, i); 205 } 206 consume_skb(msg->skb); 207 sk_msg_init(msg); 208 return freed; 209 } 210 211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg) 212 { 213 return __sk_msg_free(sk, msg, msg->sg.start, false); 214 } 215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge); 216 217 int sk_msg_free(struct sock *sk, struct sk_msg *msg) 218 { 219 return __sk_msg_free(sk, msg, msg->sg.start, true); 220 } 221 EXPORT_SYMBOL_GPL(sk_msg_free); 222 223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, 224 u32 bytes, bool charge) 225 { 226 struct scatterlist *sge; 227 u32 i = msg->sg.start; 228 229 while (bytes) { 230 sge = sk_msg_elem(msg, i); 231 if (!sge->length) 232 break; 233 if (bytes < sge->length) { 234 if (charge) 235 sk_mem_uncharge(sk, bytes); 236 sge->length -= bytes; 237 sge->offset += bytes; 238 msg->sg.size -= bytes; 239 break; 240 } 241 242 msg->sg.size -= sge->length; 243 bytes -= sge->length; 244 sk_msg_free_elem(sk, msg, i, charge); 245 sk_msg_iter_var_next(i); 246 sk_msg_check_to_free(msg, i, bytes); 247 } 248 msg->sg.start = i; 249 } 250 251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes) 252 { 253 __sk_msg_free_partial(sk, msg, bytes, true); 254 } 255 EXPORT_SYMBOL_GPL(sk_msg_free_partial); 256 257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 258 u32 bytes) 259 { 260 __sk_msg_free_partial(sk, msg, bytes, false); 261 } 262 263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len) 264 { 265 int trim = msg->sg.size - len; 266 u32 i = msg->sg.end; 267 268 if (trim <= 0) { 269 WARN_ON(trim < 0); 270 return; 271 } 272 273 sk_msg_iter_var_prev(i); 274 msg->sg.size = len; 275 while (msg->sg.data[i].length && 276 trim >= msg->sg.data[i].length) { 277 trim -= msg->sg.data[i].length; 278 sk_msg_free_elem(sk, msg, i, true); 279 sk_msg_iter_var_prev(i); 280 if (!trim) 281 goto out; 282 } 283 284 msg->sg.data[i].length -= trim; 285 sk_mem_uncharge(sk, trim); 286 /* Adjust copybreak if it falls into the trimmed part of last buf */ 287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length) 288 msg->sg.copybreak = msg->sg.data[i].length; 289 out: 290 sk_msg_iter_var_next(i); 291 msg->sg.end = i; 292 293 /* If we trim data a full sg elem before curr pointer update 294 * copybreak and current so that any future copy operations 295 * start at new copy location. 296 * However trimed data that has not yet been used in a copy op 297 * does not require an update. 298 */ 299 if (!msg->sg.size) { 300 msg->sg.curr = msg->sg.start; 301 msg->sg.copybreak = 0; 302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >= 303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) { 304 sk_msg_iter_var_prev(i); 305 msg->sg.curr = i; 306 msg->sg.copybreak = msg->sg.data[i].length; 307 } 308 } 309 EXPORT_SYMBOL_GPL(sk_msg_trim); 310 311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 312 struct sk_msg *msg, u32 bytes) 313 { 314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg); 315 const int to_max_pages = MAX_MSG_FRAGS; 316 struct page *pages[MAX_MSG_FRAGS]; 317 ssize_t orig, copied, use, offset; 318 319 orig = msg->sg.size; 320 while (bytes > 0) { 321 i = 0; 322 maxpages = to_max_pages - num_elems; 323 if (maxpages == 0) { 324 ret = -EFAULT; 325 goto out; 326 } 327 328 copied = iov_iter_get_pages2(from, pages, bytes, maxpages, 329 &offset); 330 if (copied <= 0) { 331 ret = -EFAULT; 332 goto out; 333 } 334 335 bytes -= copied; 336 msg->sg.size += copied; 337 338 while (copied) { 339 use = min_t(int, copied, PAGE_SIZE - offset); 340 sg_set_page(&msg->sg.data[msg->sg.end], 341 pages[i], use, offset); 342 sg_unmark_end(&msg->sg.data[msg->sg.end]); 343 sk_mem_charge(sk, use); 344 345 offset = 0; 346 copied -= use; 347 sk_msg_iter_next(msg, end); 348 num_elems++; 349 i++; 350 } 351 /* When zerocopy is mixed with sk_msg_*copy* operations we 352 * may have a copybreak set in this case clear and prefer 353 * zerocopy remainder when possible. 354 */ 355 msg->sg.copybreak = 0; 356 msg->sg.curr = msg->sg.end; 357 } 358 out: 359 /* Revert iov_iter updates, msg will need to use 'trim' later if it 360 * also needs to be cleared. 361 */ 362 if (ret) 363 iov_iter_revert(from, msg->sg.size - orig); 364 return ret; 365 } 366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter); 367 368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 369 struct sk_msg *msg, u32 bytes) 370 { 371 int ret = -ENOSPC, i = msg->sg.curr; 372 struct scatterlist *sge; 373 u32 copy, buf_size; 374 void *to; 375 376 do { 377 sge = sk_msg_elem(msg, i); 378 /* This is possible if a trim operation shrunk the buffer */ 379 if (msg->sg.copybreak >= sge->length) { 380 msg->sg.copybreak = 0; 381 sk_msg_iter_var_next(i); 382 if (i == msg->sg.end) 383 break; 384 sge = sk_msg_elem(msg, i); 385 } 386 387 buf_size = sge->length - msg->sg.copybreak; 388 copy = (buf_size > bytes) ? bytes : buf_size; 389 to = sg_virt(sge) + msg->sg.copybreak; 390 msg->sg.copybreak += copy; 391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) 392 ret = copy_from_iter_nocache(to, copy, from); 393 else 394 ret = copy_from_iter(to, copy, from); 395 if (ret != copy) { 396 ret = -EFAULT; 397 goto out; 398 } 399 bytes -= copy; 400 if (!bytes) 401 break; 402 msg->sg.copybreak = 0; 403 sk_msg_iter_var_next(i); 404 } while (i != msg->sg.end); 405 out: 406 msg->sg.curr = i; 407 return ret; 408 } 409 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); 410 411 /* Receive sk_msg from psock->ingress_msg to @msg. */ 412 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 413 int len, int flags) 414 { 415 struct iov_iter *iter = &msg->msg_iter; 416 int peek = flags & MSG_PEEK; 417 struct sk_msg *msg_rx; 418 int i, copied = 0; 419 420 msg_rx = sk_psock_peek_msg(psock); 421 while (copied != len) { 422 struct scatterlist *sge; 423 424 if (unlikely(!msg_rx)) 425 break; 426 427 i = msg_rx->sg.start; 428 do { 429 struct page *page; 430 int copy; 431 432 sge = sk_msg_elem(msg_rx, i); 433 copy = sge->length; 434 page = sg_page(sge); 435 if (copied + copy > len) 436 copy = len - copied; 437 copy = copy_page_to_iter(page, sge->offset, copy, iter); 438 if (!copy) { 439 copied = copied ? copied : -EFAULT; 440 goto out; 441 } 442 443 copied += copy; 444 if (likely(!peek)) { 445 sge->offset += copy; 446 sge->length -= copy; 447 if (!msg_rx->skb) 448 sk_mem_uncharge(sk, copy); 449 msg_rx->sg.size -= copy; 450 451 if (!sge->length) { 452 sk_msg_iter_var_next(i); 453 if (!msg_rx->skb) 454 put_page(page); 455 } 456 } else { 457 /* Lets not optimize peek case if copy_page_to_iter 458 * didn't copy the entire length lets just break. 459 */ 460 if (copy != sge->length) 461 goto out; 462 sk_msg_iter_var_next(i); 463 } 464 465 if (copied == len) 466 break; 467 } while ((i != msg_rx->sg.end) && !sg_is_last(sge)); 468 469 if (unlikely(peek)) { 470 msg_rx = sk_psock_next_msg(psock, msg_rx); 471 if (!msg_rx) 472 break; 473 continue; 474 } 475 476 msg_rx->sg.start = i; 477 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) { 478 msg_rx = sk_psock_dequeue_msg(psock); 479 kfree_sk_msg(msg_rx); 480 } 481 msg_rx = sk_psock_peek_msg(psock); 482 } 483 out: 484 if (psock->work_state.skb && copied > 0) 485 schedule_work(&psock->work); 486 return copied; 487 } 488 EXPORT_SYMBOL_GPL(sk_msg_recvmsg); 489 490 bool sk_msg_is_readable(struct sock *sk) 491 { 492 struct sk_psock *psock; 493 bool empty = true; 494 495 rcu_read_lock(); 496 psock = sk_psock(sk); 497 if (likely(psock)) 498 empty = list_empty(&psock->ingress_msg); 499 rcu_read_unlock(); 500 return !empty; 501 } 502 EXPORT_SYMBOL_GPL(sk_msg_is_readable); 503 504 static struct sk_msg *alloc_sk_msg(gfp_t gfp) 505 { 506 struct sk_msg *msg; 507 508 msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN); 509 if (unlikely(!msg)) 510 return NULL; 511 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 512 return msg; 513 } 514 515 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, 516 struct sk_buff *skb) 517 { 518 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 519 return NULL; 520 521 if (!sk_rmem_schedule(sk, skb, skb->truesize)) 522 return NULL; 523 524 return alloc_sk_msg(GFP_KERNEL); 525 } 526 527 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, 528 u32 off, u32 len, 529 struct sk_psock *psock, 530 struct sock *sk, 531 struct sk_msg *msg) 532 { 533 int num_sge, copied; 534 535 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); 536 if (num_sge < 0) { 537 /* skb linearize may fail with ENOMEM, but lets simply try again 538 * later if this happens. Under memory pressure we don't want to 539 * drop the skb. We need to linearize the skb so that the mapping 540 * in skb_to_sgvec can not error. 541 */ 542 if (skb_linearize(skb)) 543 return -EAGAIN; 544 545 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); 546 if (unlikely(num_sge < 0)) 547 return num_sge; 548 } 549 550 copied = len; 551 msg->sg.start = 0; 552 msg->sg.size = copied; 553 msg->sg.end = num_sge; 554 msg->skb = skb; 555 556 sk_psock_queue_msg(psock, msg); 557 sk_psock_data_ready(sk, psock); 558 return copied; 559 } 560 561 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, 562 u32 off, u32 len); 563 564 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, 565 u32 off, u32 len) 566 { 567 struct sock *sk = psock->sk; 568 struct sk_msg *msg; 569 int err; 570 571 /* If we are receiving on the same sock skb->sk is already assigned, 572 * skip memory accounting and owner transition seeing it already set 573 * correctly. 574 */ 575 if (unlikely(skb->sk == sk)) 576 return sk_psock_skb_ingress_self(psock, skb, off, len); 577 msg = sk_psock_create_ingress_msg(sk, skb); 578 if (!msg) 579 return -EAGAIN; 580 581 /* This will transition ownership of the data from the socket where 582 * the BPF program was run initiating the redirect to the socket 583 * we will eventually receive this data on. The data will be released 584 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied 585 * into user buffers. 586 */ 587 skb_set_owner_r(skb, sk); 588 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); 589 if (err < 0) 590 kfree(msg); 591 return err; 592 } 593 594 /* Puts an skb on the ingress queue of the socket already assigned to the 595 * skb. In this case we do not need to check memory limits or skb_set_owner_r 596 * because the skb is already accounted for here. 597 */ 598 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, 599 u32 off, u32 len) 600 { 601 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); 602 struct sock *sk = psock->sk; 603 int err; 604 605 if (unlikely(!msg)) 606 return -EAGAIN; 607 skb_set_owner_r(skb, sk); 608 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); 609 if (err < 0) 610 kfree(msg); 611 return err; 612 } 613 614 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, 615 u32 off, u32 len, bool ingress) 616 { 617 if (!ingress) { 618 if (!sock_writeable(psock->sk)) 619 return -EAGAIN; 620 return skb_send_sock(psock->sk, skb, off, len); 621 } 622 return sk_psock_skb_ingress(psock, skb, off, len); 623 } 624 625 static void sk_psock_skb_state(struct sk_psock *psock, 626 struct sk_psock_work_state *state, 627 struct sk_buff *skb, 628 int len, int off) 629 { 630 spin_lock_bh(&psock->ingress_lock); 631 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 632 state->skb = skb; 633 state->len = len; 634 state->off = off; 635 } else { 636 sock_drop(psock->sk, skb); 637 } 638 spin_unlock_bh(&psock->ingress_lock); 639 } 640 641 static void sk_psock_backlog(struct work_struct *work) 642 { 643 struct sk_psock *psock = container_of(work, struct sk_psock, work); 644 struct sk_psock_work_state *state = &psock->work_state; 645 struct sk_buff *skb = NULL; 646 bool ingress; 647 u32 len, off; 648 int ret; 649 650 mutex_lock(&psock->work_mutex); 651 if (unlikely(state->skb)) { 652 spin_lock_bh(&psock->ingress_lock); 653 skb = state->skb; 654 len = state->len; 655 off = state->off; 656 state->skb = NULL; 657 spin_unlock_bh(&psock->ingress_lock); 658 } 659 if (skb) 660 goto start; 661 662 while ((skb = skb_dequeue(&psock->ingress_skb))) { 663 len = skb->len; 664 off = 0; 665 if (skb_bpf_strparser(skb)) { 666 struct strp_msg *stm = strp_msg(skb); 667 668 off = stm->offset; 669 len = stm->full_len; 670 } 671 start: 672 ingress = skb_bpf_ingress(skb); 673 skb_bpf_redirect_clear(skb); 674 do { 675 ret = -EIO; 676 if (!sock_flag(psock->sk, SOCK_DEAD)) 677 ret = sk_psock_handle_skb(psock, skb, off, 678 len, ingress); 679 if (ret <= 0) { 680 if (ret == -EAGAIN) { 681 sk_psock_skb_state(psock, state, skb, 682 len, off); 683 goto end; 684 } 685 /* Hard errors break pipe and stop xmit. */ 686 sk_psock_report_error(psock, ret ? -ret : EPIPE); 687 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); 688 sock_drop(psock->sk, skb); 689 goto end; 690 } 691 off += ret; 692 len -= ret; 693 } while (len); 694 695 if (!ingress) 696 kfree_skb(skb); 697 } 698 end: 699 mutex_unlock(&psock->work_mutex); 700 } 701 702 struct sk_psock *sk_psock_init(struct sock *sk, int node) 703 { 704 struct sk_psock *psock; 705 struct proto *prot; 706 707 write_lock_bh(&sk->sk_callback_lock); 708 709 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) { 710 psock = ERR_PTR(-EINVAL); 711 goto out; 712 } 713 714 if (sk->sk_user_data) { 715 psock = ERR_PTR(-EBUSY); 716 goto out; 717 } 718 719 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); 720 if (!psock) { 721 psock = ERR_PTR(-ENOMEM); 722 goto out; 723 } 724 725 prot = READ_ONCE(sk->sk_prot); 726 psock->sk = sk; 727 psock->eval = __SK_NONE; 728 psock->sk_proto = prot; 729 psock->saved_unhash = prot->unhash; 730 psock->saved_destroy = prot->destroy; 731 psock->saved_close = prot->close; 732 psock->saved_write_space = sk->sk_write_space; 733 734 INIT_LIST_HEAD(&psock->link); 735 spin_lock_init(&psock->link_lock); 736 737 INIT_WORK(&psock->work, sk_psock_backlog); 738 mutex_init(&psock->work_mutex); 739 INIT_LIST_HEAD(&psock->ingress_msg); 740 spin_lock_init(&psock->ingress_lock); 741 skb_queue_head_init(&psock->ingress_skb); 742 743 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); 744 refcount_set(&psock->refcnt, 1); 745 746 __rcu_assign_sk_user_data_with_flags(sk, psock, 747 SK_USER_DATA_NOCOPY | 748 SK_USER_DATA_PSOCK); 749 sock_hold(sk); 750 751 out: 752 write_unlock_bh(&sk->sk_callback_lock); 753 return psock; 754 } 755 EXPORT_SYMBOL_GPL(sk_psock_init); 756 757 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock) 758 { 759 struct sk_psock_link *link; 760 761 spin_lock_bh(&psock->link_lock); 762 link = list_first_entry_or_null(&psock->link, struct sk_psock_link, 763 list); 764 if (link) 765 list_del(&link->list); 766 spin_unlock_bh(&psock->link_lock); 767 return link; 768 } 769 770 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) 771 { 772 struct sk_msg *msg, *tmp; 773 774 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { 775 list_del(&msg->list); 776 sk_msg_free(psock->sk, msg); 777 kfree(msg); 778 } 779 } 780 781 static void __sk_psock_zap_ingress(struct sk_psock *psock) 782 { 783 struct sk_buff *skb; 784 785 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { 786 skb_bpf_redirect_clear(skb); 787 sock_drop(psock->sk, skb); 788 } 789 kfree_skb(psock->work_state.skb); 790 /* We null the skb here to ensure that calls to sk_psock_backlog 791 * do not pick up the free'd skb. 792 */ 793 psock->work_state.skb = NULL; 794 __sk_psock_purge_ingress_msg(psock); 795 } 796 797 static void sk_psock_link_destroy(struct sk_psock *psock) 798 { 799 struct sk_psock_link *link, *tmp; 800 801 list_for_each_entry_safe(link, tmp, &psock->link, list) { 802 list_del(&link->list); 803 sk_psock_free_link(link); 804 } 805 } 806 807 void sk_psock_stop(struct sk_psock *psock) 808 { 809 spin_lock_bh(&psock->ingress_lock); 810 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); 811 sk_psock_cork_free(psock); 812 __sk_psock_zap_ingress(psock); 813 spin_unlock_bh(&psock->ingress_lock); 814 } 815 816 static void sk_psock_done_strp(struct sk_psock *psock); 817 818 static void sk_psock_destroy(struct work_struct *work) 819 { 820 struct sk_psock *psock = container_of(to_rcu_work(work), 821 struct sk_psock, rwork); 822 /* No sk_callback_lock since already detached. */ 823 824 sk_psock_done_strp(psock); 825 826 cancel_work_sync(&psock->work); 827 mutex_destroy(&psock->work_mutex); 828 829 psock_progs_drop(&psock->progs); 830 831 sk_psock_link_destroy(psock); 832 sk_psock_cork_free(psock); 833 834 if (psock->sk_redir) 835 sock_put(psock->sk_redir); 836 sock_put(psock->sk); 837 kfree(psock); 838 } 839 840 void sk_psock_drop(struct sock *sk, struct sk_psock *psock) 841 { 842 write_lock_bh(&sk->sk_callback_lock); 843 sk_psock_restore_proto(sk, psock); 844 rcu_assign_sk_user_data(sk, NULL); 845 if (psock->progs.stream_parser) 846 sk_psock_stop_strp(sk, psock); 847 else if (psock->progs.stream_verdict || psock->progs.skb_verdict) 848 sk_psock_stop_verdict(sk, psock); 849 write_unlock_bh(&sk->sk_callback_lock); 850 851 sk_psock_stop(psock); 852 853 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); 854 queue_rcu_work(system_wq, &psock->rwork); 855 } 856 EXPORT_SYMBOL_GPL(sk_psock_drop); 857 858 static int sk_psock_map_verd(int verdict, bool redir) 859 { 860 switch (verdict) { 861 case SK_PASS: 862 return redir ? __SK_REDIRECT : __SK_PASS; 863 case SK_DROP: 864 default: 865 break; 866 } 867 868 return __SK_DROP; 869 } 870 871 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 872 struct sk_msg *msg) 873 { 874 struct bpf_prog *prog; 875 int ret; 876 877 rcu_read_lock(); 878 prog = READ_ONCE(psock->progs.msg_parser); 879 if (unlikely(!prog)) { 880 ret = __SK_PASS; 881 goto out; 882 } 883 884 sk_msg_compute_data_pointers(msg); 885 msg->sk = sk; 886 ret = bpf_prog_run_pin_on_cpu(prog, msg); 887 ret = sk_psock_map_verd(ret, msg->sk_redir); 888 psock->apply_bytes = msg->apply_bytes; 889 if (ret == __SK_REDIRECT) { 890 if (psock->sk_redir) { 891 sock_put(psock->sk_redir); 892 psock->sk_redir = NULL; 893 } 894 if (!msg->sk_redir) { 895 ret = __SK_DROP; 896 goto out; 897 } 898 psock->redir_ingress = sk_msg_to_ingress(msg); 899 psock->sk_redir = msg->sk_redir; 900 sock_hold(psock->sk_redir); 901 } 902 out: 903 rcu_read_unlock(); 904 return ret; 905 } 906 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); 907 908 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) 909 { 910 struct sk_psock *psock_other; 911 struct sock *sk_other; 912 913 sk_other = skb_bpf_redirect_fetch(skb); 914 /* This error is a buggy BPF program, it returned a redirect 915 * return code, but then didn't set a redirect interface. 916 */ 917 if (unlikely(!sk_other)) { 918 skb_bpf_redirect_clear(skb); 919 sock_drop(from->sk, skb); 920 return -EIO; 921 } 922 psock_other = sk_psock(sk_other); 923 /* This error indicates the socket is being torn down or had another 924 * error that caused the pipe to break. We can't send a packet on 925 * a socket that is in this state so we drop the skb. 926 */ 927 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) { 928 skb_bpf_redirect_clear(skb); 929 sock_drop(from->sk, skb); 930 return -EIO; 931 } 932 spin_lock_bh(&psock_other->ingress_lock); 933 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { 934 spin_unlock_bh(&psock_other->ingress_lock); 935 skb_bpf_redirect_clear(skb); 936 sock_drop(from->sk, skb); 937 return -EIO; 938 } 939 940 skb_queue_tail(&psock_other->ingress_skb, skb); 941 schedule_work(&psock_other->work); 942 spin_unlock_bh(&psock_other->ingress_lock); 943 return 0; 944 } 945 946 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, 947 struct sk_psock *from, int verdict) 948 { 949 switch (verdict) { 950 case __SK_REDIRECT: 951 sk_psock_skb_redirect(from, skb); 952 break; 953 case __SK_PASS: 954 case __SK_DROP: 955 default: 956 break; 957 } 958 } 959 960 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) 961 { 962 struct bpf_prog *prog; 963 int ret = __SK_PASS; 964 965 rcu_read_lock(); 966 prog = READ_ONCE(psock->progs.stream_verdict); 967 if (likely(prog)) { 968 skb->sk = psock->sk; 969 skb_dst_drop(skb); 970 skb_bpf_redirect_clear(skb); 971 ret = bpf_prog_run_pin_on_cpu(prog, skb); 972 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 973 skb->sk = NULL; 974 } 975 sk_psock_tls_verdict_apply(skb, psock, ret); 976 rcu_read_unlock(); 977 return ret; 978 } 979 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); 980 981 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, 982 int verdict) 983 { 984 struct sock *sk_other; 985 int err = 0; 986 u32 len, off; 987 988 switch (verdict) { 989 case __SK_PASS: 990 err = -EIO; 991 sk_other = psock->sk; 992 if (sock_flag(sk_other, SOCK_DEAD) || 993 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 994 skb_bpf_redirect_clear(skb); 995 goto out_free; 996 } 997 998 skb_bpf_set_ingress(skb); 999 1000 /* If the queue is empty then we can submit directly 1001 * into the msg queue. If its not empty we have to 1002 * queue work otherwise we may get OOO data. Otherwise, 1003 * if sk_psock_skb_ingress errors will be handled by 1004 * retrying later from workqueue. 1005 */ 1006 if (skb_queue_empty(&psock->ingress_skb)) { 1007 len = skb->len; 1008 off = 0; 1009 if (skb_bpf_strparser(skb)) { 1010 struct strp_msg *stm = strp_msg(skb); 1011 1012 off = stm->offset; 1013 len = stm->full_len; 1014 } 1015 err = sk_psock_skb_ingress_self(psock, skb, off, len); 1016 } 1017 if (err < 0) { 1018 spin_lock_bh(&psock->ingress_lock); 1019 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 1020 skb_queue_tail(&psock->ingress_skb, skb); 1021 schedule_work(&psock->work); 1022 err = 0; 1023 } 1024 spin_unlock_bh(&psock->ingress_lock); 1025 if (err < 0) { 1026 skb_bpf_redirect_clear(skb); 1027 goto out_free; 1028 } 1029 } 1030 break; 1031 case __SK_REDIRECT: 1032 err = sk_psock_skb_redirect(psock, skb); 1033 break; 1034 case __SK_DROP: 1035 default: 1036 out_free: 1037 sock_drop(psock->sk, skb); 1038 } 1039 1040 return err; 1041 } 1042 1043 static void sk_psock_write_space(struct sock *sk) 1044 { 1045 struct sk_psock *psock; 1046 void (*write_space)(struct sock *sk) = NULL; 1047 1048 rcu_read_lock(); 1049 psock = sk_psock(sk); 1050 if (likely(psock)) { 1051 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 1052 schedule_work(&psock->work); 1053 write_space = psock->saved_write_space; 1054 } 1055 rcu_read_unlock(); 1056 if (write_space) 1057 write_space(sk); 1058 } 1059 1060 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 1061 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) 1062 { 1063 struct sk_psock *psock; 1064 struct bpf_prog *prog; 1065 int ret = __SK_DROP; 1066 struct sock *sk; 1067 1068 rcu_read_lock(); 1069 sk = strp->sk; 1070 psock = sk_psock(sk); 1071 if (unlikely(!psock)) { 1072 sock_drop(sk, skb); 1073 goto out; 1074 } 1075 prog = READ_ONCE(psock->progs.stream_verdict); 1076 if (likely(prog)) { 1077 skb->sk = sk; 1078 skb_dst_drop(skb); 1079 skb_bpf_redirect_clear(skb); 1080 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1081 if (ret == SK_PASS) 1082 skb_bpf_set_strparser(skb); 1083 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 1084 skb->sk = NULL; 1085 } 1086 sk_psock_verdict_apply(psock, skb, ret); 1087 out: 1088 rcu_read_unlock(); 1089 } 1090 1091 static int sk_psock_strp_read_done(struct strparser *strp, int err) 1092 { 1093 return err; 1094 } 1095 1096 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb) 1097 { 1098 struct sk_psock *psock = container_of(strp, struct sk_psock, strp); 1099 struct bpf_prog *prog; 1100 int ret = skb->len; 1101 1102 rcu_read_lock(); 1103 prog = READ_ONCE(psock->progs.stream_parser); 1104 if (likely(prog)) { 1105 skb->sk = psock->sk; 1106 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1107 skb->sk = NULL; 1108 } 1109 rcu_read_unlock(); 1110 return ret; 1111 } 1112 1113 /* Called with socket lock held. */ 1114 static void sk_psock_strp_data_ready(struct sock *sk) 1115 { 1116 struct sk_psock *psock; 1117 1118 trace_sk_data_ready(sk); 1119 1120 rcu_read_lock(); 1121 psock = sk_psock(sk); 1122 if (likely(psock)) { 1123 if (tls_sw_has_ctx_rx(sk)) { 1124 psock->saved_data_ready(sk); 1125 } else { 1126 write_lock_bh(&sk->sk_callback_lock); 1127 strp_data_ready(&psock->strp); 1128 write_unlock_bh(&sk->sk_callback_lock); 1129 } 1130 } 1131 rcu_read_unlock(); 1132 } 1133 1134 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) 1135 { 1136 static const struct strp_callbacks cb = { 1137 .rcv_msg = sk_psock_strp_read, 1138 .read_sock_done = sk_psock_strp_read_done, 1139 .parse_msg = sk_psock_strp_parse, 1140 }; 1141 1142 return strp_init(&psock->strp, sk, &cb); 1143 } 1144 1145 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) 1146 { 1147 if (psock->saved_data_ready) 1148 return; 1149 1150 psock->saved_data_ready = sk->sk_data_ready; 1151 sk->sk_data_ready = sk_psock_strp_data_ready; 1152 sk->sk_write_space = sk_psock_write_space; 1153 } 1154 1155 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) 1156 { 1157 psock_set_prog(&psock->progs.stream_parser, NULL); 1158 1159 if (!psock->saved_data_ready) 1160 return; 1161 1162 sk->sk_data_ready = psock->saved_data_ready; 1163 psock->saved_data_ready = NULL; 1164 strp_stop(&psock->strp); 1165 } 1166 1167 static void sk_psock_done_strp(struct sk_psock *psock) 1168 { 1169 /* Parser has been stopped */ 1170 if (psock->progs.stream_parser) 1171 strp_done(&psock->strp); 1172 } 1173 #else 1174 static void sk_psock_done_strp(struct sk_psock *psock) 1175 { 1176 } 1177 #endif /* CONFIG_BPF_STREAM_PARSER */ 1178 1179 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) 1180 { 1181 struct sk_psock *psock; 1182 struct bpf_prog *prog; 1183 int ret = __SK_DROP; 1184 int len = skb->len; 1185 1186 skb_get(skb); 1187 1188 rcu_read_lock(); 1189 psock = sk_psock(sk); 1190 if (unlikely(!psock)) { 1191 len = 0; 1192 sock_drop(sk, skb); 1193 goto out; 1194 } 1195 prog = READ_ONCE(psock->progs.stream_verdict); 1196 if (!prog) 1197 prog = READ_ONCE(psock->progs.skb_verdict); 1198 if (likely(prog)) { 1199 skb_dst_drop(skb); 1200 skb_bpf_redirect_clear(skb); 1201 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1202 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 1203 } 1204 ret = sk_psock_verdict_apply(psock, skb, ret); 1205 if (ret < 0) 1206 len = ret; 1207 out: 1208 rcu_read_unlock(); 1209 return len; 1210 } 1211 1212 static void sk_psock_verdict_data_ready(struct sock *sk) 1213 { 1214 struct socket *sock = sk->sk_socket; 1215 1216 trace_sk_data_ready(sk); 1217 1218 if (unlikely(!sock || !sock->ops || !sock->ops->read_skb)) 1219 return; 1220 sock->ops->read_skb(sk, sk_psock_verdict_recv); 1221 } 1222 1223 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) 1224 { 1225 if (psock->saved_data_ready) 1226 return; 1227 1228 psock->saved_data_ready = sk->sk_data_ready; 1229 sk->sk_data_ready = sk_psock_verdict_data_ready; 1230 sk->sk_write_space = sk_psock_write_space; 1231 } 1232 1233 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) 1234 { 1235 psock_set_prog(&psock->progs.stream_verdict, NULL); 1236 psock_set_prog(&psock->progs.skb_verdict, NULL); 1237 1238 if (!psock->saved_data_ready) 1239 return; 1240 1241 sk->sk_data_ready = psock->saved_data_ready; 1242 psock->saved_data_ready = NULL; 1243 } 1244