1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2009 Red Hat, Inc. 3 * Author: Michael S. Tsirkin <mst@redhat.com> 4 * 5 * virtio-net server in host kernel. 6 */ 7 8 #include <linux/compat.h> 9 #include <linux/eventfd.h> 10 #include <linux/vhost.h> 11 #include <linux/virtio_net.h> 12 #include <linux/miscdevice.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/mutex.h> 16 #include <linux/workqueue.h> 17 #include <linux/file.h> 18 #include <linux/slab.h> 19 #include <linux/sched/clock.h> 20 #include <linux/sched/signal.h> 21 #include <linux/vmalloc.h> 22 23 #include <linux/net.h> 24 #include <linux/if_packet.h> 25 #include <linux/if_arp.h> 26 #include <linux/if_tun.h> 27 #include <linux/if_macvlan.h> 28 #include <linux/if_tap.h> 29 #include <linux/if_vlan.h> 30 #include <linux/skb_array.h> 31 #include <linux/skbuff.h> 32 33 #include <net/sock.h> 34 #include <net/xdp.h> 35 36 #include "vhost.h" 37 38 static int experimental_zcopytx = 0; 39 module_param(experimental_zcopytx, int, 0444); 40 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" 41 " 1 -Enable; 0 - Disable"); 42 43 /* Max number of bytes transferred before requeueing the job. 44 * Using this limit prevents one virtqueue from starving others. */ 45 #define VHOST_NET_WEIGHT 0x80000 46 47 /* Max number of packets transferred before requeueing the job. 48 * Using this limit prevents one virtqueue from starving others with small 49 * pkts. 50 */ 51 #define VHOST_NET_PKT_WEIGHT 256 52 53 /* MAX number of TX used buffers for outstanding zerocopy */ 54 #define VHOST_MAX_PEND 128 55 #define VHOST_GOODCOPY_LEN 256 56 57 /* 58 * For transmit, used buffer len is unused; we override it to track buffer 59 * status internally; used for zerocopy tx only. 60 */ 61 /* Lower device DMA failed */ 62 #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3) 63 /* Lower device DMA done */ 64 #define VHOST_DMA_DONE_LEN ((__force __virtio32)2) 65 /* Lower device DMA in progress */ 66 #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1) 67 /* Buffer unused */ 68 #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0) 69 70 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN) 71 72 static const u64 vhost_net_features[VIRTIO_FEATURES_DWORDS] = { 73 VHOST_FEATURES | 74 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | 75 (1ULL << VIRTIO_NET_F_MRG_RXBUF) | 76 (1ULL << VIRTIO_F_ACCESS_PLATFORM) | 77 (1ULL << VIRTIO_F_RING_RESET), 78 VIRTIO_BIT(VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO) | 79 VIRTIO_BIT(VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO), 80 }; 81 82 enum { 83 VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) 84 }; 85 86 enum { 87 VHOST_NET_VQ_RX = 0, 88 VHOST_NET_VQ_TX = 1, 89 VHOST_NET_VQ_MAX = 2, 90 }; 91 92 struct vhost_net_ubuf_ref { 93 /* refcount follows semantics similar to kref: 94 * 0: object is released 95 * 1: no outstanding ubufs 96 * >1: outstanding ubufs 97 */ 98 atomic_t refcount; 99 wait_queue_head_t wait; 100 struct vhost_virtqueue *vq; 101 }; 102 103 #define VHOST_NET_BATCH 64 104 struct vhost_net_buf { 105 void **queue; 106 int tail; 107 int head; 108 }; 109 110 struct vhost_net_virtqueue { 111 struct vhost_virtqueue vq; 112 size_t vhost_hlen; 113 size_t sock_hlen; 114 /* vhost zerocopy support fields below: */ 115 /* last used idx for outstanding DMA zerocopy buffers */ 116 int upend_idx; 117 /* For TX, first used idx for DMA done zerocopy buffers 118 * For RX, number of batched heads 119 */ 120 int done_idx; 121 /* Number of XDP frames batched */ 122 int batched_xdp; 123 /* an array of userspace buffers info */ 124 struct ubuf_info_msgzc *ubuf_info; 125 /* Reference counting for outstanding ubufs. 126 * Protected by vq mutex. Writers must also take device mutex. */ 127 struct vhost_net_ubuf_ref *ubufs; 128 struct ptr_ring *rx_ring; 129 struct vhost_net_buf rxq; 130 /* Batched XDP buffs */ 131 struct xdp_buff *xdp; 132 }; 133 134 struct vhost_net { 135 struct vhost_dev dev; 136 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; 137 struct vhost_poll poll[VHOST_NET_VQ_MAX]; 138 /* Number of TX recently submitted. 139 * Protected by tx vq lock. */ 140 unsigned tx_packets; 141 /* Number of times zerocopy TX recently failed. 142 * Protected by tx vq lock. */ 143 unsigned tx_zcopy_err; 144 /* Flush in progress. Protected by tx vq lock. */ 145 bool tx_flush; 146 /* Private page frag cache */ 147 struct page_frag_cache pf_cache; 148 }; 149 150 static unsigned vhost_net_zcopy_mask __read_mostly; 151 152 static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq) 153 { 154 if (rxq->tail != rxq->head) 155 return rxq->queue[rxq->head]; 156 else 157 return NULL; 158 } 159 160 static int vhost_net_buf_get_size(struct vhost_net_buf *rxq) 161 { 162 return rxq->tail - rxq->head; 163 } 164 165 static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq) 166 { 167 return rxq->tail == rxq->head; 168 } 169 170 static void *vhost_net_buf_consume(struct vhost_net_buf *rxq) 171 { 172 void *ret = vhost_net_buf_get_ptr(rxq); 173 ++rxq->head; 174 return ret; 175 } 176 177 static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) 178 { 179 struct vhost_net_buf *rxq = &nvq->rxq; 180 181 rxq->head = 0; 182 rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, 183 VHOST_NET_BATCH); 184 return rxq->tail; 185 } 186 187 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) 188 { 189 struct vhost_net_buf *rxq = &nvq->rxq; 190 191 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { 192 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, 193 vhost_net_buf_get_size(rxq), 194 tun_ptr_free); 195 rxq->head = rxq->tail = 0; 196 } 197 } 198 199 static int vhost_net_buf_peek_len(void *ptr) 200 { 201 if (tun_is_xdp_frame(ptr)) { 202 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 203 204 return xdpf->len; 205 } 206 207 return __skb_array_len_with_tag(ptr); 208 } 209 210 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) 211 { 212 struct vhost_net_buf *rxq = &nvq->rxq; 213 214 if (!vhost_net_buf_is_empty(rxq)) 215 goto out; 216 217 if (!vhost_net_buf_produce(nvq)) 218 return 0; 219 220 out: 221 return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq)); 222 } 223 224 static void vhost_net_buf_init(struct vhost_net_buf *rxq) 225 { 226 rxq->head = rxq->tail = 0; 227 } 228 229 static void vhost_net_enable_zcopy(int vq) 230 { 231 vhost_net_zcopy_mask |= 0x1 << vq; 232 } 233 234 static struct vhost_net_ubuf_ref * 235 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) 236 { 237 struct vhost_net_ubuf_ref *ubufs; 238 /* No zero copy backend? Nothing to count. */ 239 if (!zcopy) 240 return NULL; 241 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); 242 if (!ubufs) 243 return ERR_PTR(-ENOMEM); 244 atomic_set(&ubufs->refcount, 1); 245 init_waitqueue_head(&ubufs->wait); 246 ubufs->vq = vq; 247 return ubufs; 248 } 249 250 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 251 { 252 int r = atomic_sub_return(1, &ubufs->refcount); 253 if (unlikely(!r)) 254 wake_up(&ubufs->wait); 255 return r; 256 } 257 258 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) 259 { 260 vhost_net_ubuf_put(ubufs); 261 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); 262 } 263 264 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) 265 { 266 vhost_net_ubuf_put_and_wait(ubufs); 267 kfree(ubufs); 268 } 269 270 static void vhost_net_clear_ubuf_info(struct vhost_net *n) 271 { 272 int i; 273 274 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 275 kfree(n->vqs[i].ubuf_info); 276 n->vqs[i].ubuf_info = NULL; 277 } 278 } 279 280 static int vhost_net_set_ubuf_info(struct vhost_net *n) 281 { 282 bool zcopy; 283 int i; 284 285 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 286 zcopy = vhost_net_zcopy_mask & (0x1 << i); 287 if (!zcopy) 288 continue; 289 n->vqs[i].ubuf_info = 290 kmalloc_array(UIO_MAXIOV, 291 sizeof(*n->vqs[i].ubuf_info), 292 GFP_KERNEL); 293 if (!n->vqs[i].ubuf_info) 294 goto err; 295 } 296 return 0; 297 298 err: 299 vhost_net_clear_ubuf_info(n); 300 return -ENOMEM; 301 } 302 303 static void vhost_net_vq_reset(struct vhost_net *n) 304 { 305 int i; 306 307 vhost_net_clear_ubuf_info(n); 308 309 for (i = 0; i < VHOST_NET_VQ_MAX; i++) { 310 n->vqs[i].done_idx = 0; 311 n->vqs[i].upend_idx = 0; 312 n->vqs[i].ubufs = NULL; 313 n->vqs[i].vhost_hlen = 0; 314 n->vqs[i].sock_hlen = 0; 315 vhost_net_buf_init(&n->vqs[i].rxq); 316 } 317 318 } 319 320 static void vhost_net_tx_packet(struct vhost_net *net) 321 { 322 ++net->tx_packets; 323 if (net->tx_packets < 1024) 324 return; 325 net->tx_packets = 0; 326 net->tx_zcopy_err = 0; 327 } 328 329 static void vhost_net_tx_err(struct vhost_net *net) 330 { 331 ++net->tx_zcopy_err; 332 } 333 334 static bool vhost_net_tx_select_zcopy(struct vhost_net *net) 335 { 336 /* TX flush waits for outstanding DMAs to be done. 337 * Don't start new DMAs. 338 */ 339 return !net->tx_flush && 340 net->tx_packets / 64 >= net->tx_zcopy_err; 341 } 342 343 static bool vhost_sock_zcopy(struct socket *sock) 344 { 345 return unlikely(experimental_zcopytx) && 346 sock_flag(sock->sk, SOCK_ZEROCOPY); 347 } 348 349 static bool vhost_sock_xdp(struct socket *sock) 350 { 351 return sock_flag(sock->sk, SOCK_XDP); 352 } 353 354 /* In case of DMA done not in order in lower device driver for some reason. 355 * upend_idx is used to track end of used idx, done_idx is used to track head 356 * of used idx. Once lower device DMA done contiguously, we will signal KVM 357 * guest used idx. 358 */ 359 static void vhost_zerocopy_signal_used(struct vhost_net *net, 360 struct vhost_virtqueue *vq) 361 { 362 struct vhost_net_virtqueue *nvq = 363 container_of(vq, struct vhost_net_virtqueue, vq); 364 int i, add; 365 int j = 0; 366 367 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { 368 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) 369 vhost_net_tx_err(net); 370 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { 371 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; 372 ++j; 373 } else 374 break; 375 } 376 while (j) { 377 add = min(UIO_MAXIOV - nvq->done_idx, j); 378 vhost_add_used_and_signal_n(vq->dev, vq, 379 &vq->heads[nvq->done_idx], add); 380 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; 381 j -= add; 382 } 383 } 384 385 static void vhost_zerocopy_complete(struct sk_buff *skb, 386 struct ubuf_info *ubuf_base, bool success) 387 { 388 struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base); 389 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; 390 struct vhost_virtqueue *vq = ubufs->vq; 391 int cnt; 392 393 rcu_read_lock_bh(); 394 395 /* set len to mark this desc buffers done DMA */ 396 vq->heads[ubuf->desc].len = success ? 397 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; 398 cnt = vhost_net_ubuf_put(ubufs); 399 400 /* 401 * Trigger polling thread if guest stopped submitting new buffers: 402 * in this case, the refcount after decrement will eventually reach 1. 403 * We also trigger polling periodically after each 16 packets 404 * (the value 16 here is more or less arbitrary, it's tuned to trigger 405 * less than 10% of times). 406 */ 407 if (cnt <= 1 || !(cnt % 16)) 408 vhost_poll_queue(&vq->poll); 409 410 rcu_read_unlock_bh(); 411 } 412 413 static const struct ubuf_info_ops vhost_ubuf_ops = { 414 .complete = vhost_zerocopy_complete, 415 }; 416 417 static inline unsigned long busy_clock(void) 418 { 419 return local_clock() >> 10; 420 } 421 422 static bool vhost_can_busy_poll(unsigned long endtime) 423 { 424 return likely(!need_resched() && !time_after(busy_clock(), endtime) && 425 !signal_pending(current)); 426 } 427 428 static void vhost_net_disable_vq(struct vhost_net *n, 429 struct vhost_virtqueue *vq) 430 { 431 struct vhost_net_virtqueue *nvq = 432 container_of(vq, struct vhost_net_virtqueue, vq); 433 struct vhost_poll *poll = n->poll + (nvq - n->vqs); 434 if (!vhost_vq_get_backend(vq)) 435 return; 436 vhost_poll_stop(poll); 437 } 438 439 static int vhost_net_enable_vq(struct vhost_net *n, 440 struct vhost_virtqueue *vq) 441 { 442 struct vhost_net_virtqueue *nvq = 443 container_of(vq, struct vhost_net_virtqueue, vq); 444 struct vhost_poll *poll = n->poll + (nvq - n->vqs); 445 struct socket *sock; 446 447 sock = vhost_vq_get_backend(vq); 448 if (!sock) 449 return 0; 450 451 return vhost_poll_start(poll, sock->file); 452 } 453 454 static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) 455 { 456 struct vhost_virtqueue *vq = &nvq->vq; 457 struct vhost_dev *dev = vq->dev; 458 459 if (!nvq->done_idx) 460 return; 461 462 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); 463 nvq->done_idx = 0; 464 } 465 466 static void vhost_tx_batch(struct vhost_net *net, 467 struct vhost_net_virtqueue *nvq, 468 struct socket *sock, 469 struct msghdr *msghdr) 470 { 471 struct tun_msg_ctl ctl = { 472 .type = TUN_MSG_PTR, 473 .num = nvq->batched_xdp, 474 .ptr = nvq->xdp, 475 }; 476 int i, err; 477 478 if (nvq->batched_xdp == 0) 479 goto signal_used; 480 481 msghdr->msg_control = &ctl; 482 msghdr->msg_controllen = sizeof(ctl); 483 err = sock->ops->sendmsg(sock, msghdr, 0); 484 if (unlikely(err < 0)) { 485 vq_err(&nvq->vq, "Fail to batch sending packets\n"); 486 487 /* free pages owned by XDP; since this is an unlikely error path, 488 * keep it simple and avoid more complex bulk update for the 489 * used pages 490 */ 491 for (i = 0; i < nvq->batched_xdp; ++i) 492 put_page(virt_to_head_page(nvq->xdp[i].data)); 493 nvq->batched_xdp = 0; 494 nvq->done_idx = 0; 495 return; 496 } 497 498 signal_used: 499 vhost_net_signal_used(nvq); 500 nvq->batched_xdp = 0; 501 } 502 503 static int sock_has_rx_data(struct socket *sock) 504 { 505 if (unlikely(!sock)) 506 return 0; 507 508 if (sock->ops->peek_len) 509 return sock->ops->peek_len(sock); 510 511 return skb_queue_empty(&sock->sk->sk_receive_queue); 512 } 513 514 static void vhost_net_busy_poll_try_queue(struct vhost_net *net, 515 struct vhost_virtqueue *vq) 516 { 517 if (!vhost_vq_avail_empty(&net->dev, vq)) { 518 vhost_poll_queue(&vq->poll); 519 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { 520 vhost_disable_notify(&net->dev, vq); 521 vhost_poll_queue(&vq->poll); 522 } 523 } 524 525 static void vhost_net_busy_poll(struct vhost_net *net, 526 struct vhost_virtqueue *rvq, 527 struct vhost_virtqueue *tvq, 528 bool *busyloop_intr, 529 bool poll_rx) 530 { 531 unsigned long busyloop_timeout; 532 unsigned long endtime; 533 struct socket *sock; 534 struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; 535 536 /* Try to hold the vq mutex of the paired virtqueue. We can't 537 * use mutex_lock() here since we could not guarantee a 538 * consistenet lock ordering. 539 */ 540 if (!mutex_trylock(&vq->mutex)) 541 return; 542 543 vhost_disable_notify(&net->dev, vq); 544 sock = vhost_vq_get_backend(rvq); 545 546 busyloop_timeout = poll_rx ? rvq->busyloop_timeout: 547 tvq->busyloop_timeout; 548 549 preempt_disable(); 550 endtime = busy_clock() + busyloop_timeout; 551 552 while (vhost_can_busy_poll(endtime)) { 553 if (vhost_vq_has_work(vq)) { 554 *busyloop_intr = true; 555 break; 556 } 557 558 if ((sock_has_rx_data(sock) && 559 !vhost_vq_avail_empty(&net->dev, rvq)) || 560 !vhost_vq_avail_empty(&net->dev, tvq)) 561 break; 562 563 cpu_relax(); 564 } 565 566 preempt_enable(); 567 568 if (poll_rx || sock_has_rx_data(sock)) 569 vhost_net_busy_poll_try_queue(net, vq); 570 else if (!poll_rx) /* On tx here, sock has no rx data. */ 571 vhost_enable_notify(&net->dev, rvq); 572 573 mutex_unlock(&vq->mutex); 574 } 575 576 static int vhost_net_tx_get_vq_desc(struct vhost_net *net, 577 struct vhost_net_virtqueue *tnvq, 578 unsigned int *out_num, unsigned int *in_num, 579 struct msghdr *msghdr, bool *busyloop_intr) 580 { 581 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; 582 struct vhost_virtqueue *rvq = &rnvq->vq; 583 struct vhost_virtqueue *tvq = &tnvq->vq; 584 585 int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), 586 out_num, in_num, NULL, NULL); 587 588 if (r == tvq->num && tvq->busyloop_timeout) { 589 /* Flush batched packets first */ 590 if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq))) 591 vhost_tx_batch(net, tnvq, 592 vhost_vq_get_backend(tvq), 593 msghdr); 594 595 vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false); 596 597 r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), 598 out_num, in_num, NULL, NULL); 599 } 600 601 return r; 602 } 603 604 static bool vhost_exceeds_maxpend(struct vhost_net *net) 605 { 606 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; 607 struct vhost_virtqueue *vq = &nvq->vq; 608 609 return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV > 610 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); 611 } 612 613 static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, 614 size_t hdr_size, int out) 615 { 616 /* Skip header. TODO: support TSO. */ 617 size_t len = iov_length(vq->iov, out); 618 619 iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len); 620 iov_iter_advance(iter, hdr_size); 621 622 return iov_iter_count(iter); 623 } 624 625 static int get_tx_bufs(struct vhost_net *net, 626 struct vhost_net_virtqueue *nvq, 627 struct msghdr *msg, 628 unsigned int *out, unsigned int *in, 629 size_t *len, bool *busyloop_intr) 630 { 631 struct vhost_virtqueue *vq = &nvq->vq; 632 int ret; 633 634 ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); 635 636 if (ret < 0 || ret == vq->num) 637 return ret; 638 639 if (*in) { 640 vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n", 641 *out, *in); 642 return -EFAULT; 643 } 644 645 /* Sanity check */ 646 *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out); 647 if (*len == 0) { 648 vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n", 649 *len, nvq->vhost_hlen); 650 return -EFAULT; 651 } 652 653 return ret; 654 } 655 656 static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) 657 { 658 return total_len < VHOST_NET_WEIGHT && 659 !vhost_vq_avail_empty(vq->dev, vq); 660 } 661 662 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 663 664 static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, 665 struct iov_iter *from) 666 { 667 struct vhost_virtqueue *vq = &nvq->vq; 668 struct vhost_net *net = container_of(vq->dev, struct vhost_net, 669 dev); 670 struct socket *sock = vhost_vq_get_backend(vq); 671 struct virtio_net_hdr *gso; 672 struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; 673 size_t len = iov_iter_count(from); 674 int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0; 675 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 676 int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen); 677 int sock_hlen = nvq->sock_hlen; 678 void *buf; 679 int copied; 680 int ret; 681 682 if (unlikely(len < nvq->sock_hlen)) 683 return -EFAULT; 684 685 if (SKB_DATA_ALIGN(len + pad) + 686 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 687 return -ENOSPC; 688 689 buflen += SKB_DATA_ALIGN(len + pad); 690 buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL, 691 SMP_CACHE_BYTES); 692 if (unlikely(!buf)) 693 return -ENOMEM; 694 695 copied = copy_from_iter(buf + pad - sock_hlen, len, from); 696 if (copied != len) { 697 ret = -EFAULT; 698 goto err; 699 } 700 701 gso = buf + pad - sock_hlen; 702 703 if (!sock_hlen) 704 memset(buf, 0, pad); 705 706 if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 707 vhost16_to_cpu(vq, gso->csum_start) + 708 vhost16_to_cpu(vq, gso->csum_offset) + 2 > 709 vhost16_to_cpu(vq, gso->hdr_len)) { 710 gso->hdr_len = cpu_to_vhost16(vq, 711 vhost16_to_cpu(vq, gso->csum_start) + 712 vhost16_to_cpu(vq, gso->csum_offset) + 2); 713 714 if (vhost16_to_cpu(vq, gso->hdr_len) > len) { 715 ret = -EINVAL; 716 goto err; 717 } 718 } 719 720 /* pad contains sock_hlen */ 721 memcpy(buf, buf + pad - sock_hlen, sock_hlen); 722 723 xdp_init_buff(xdp, buflen, NULL); 724 xdp_prepare_buff(xdp, buf, pad, len - sock_hlen, true); 725 726 ++nvq->batched_xdp; 727 728 return 0; 729 730 err: 731 page_frag_free(buf); 732 return ret; 733 } 734 735 static void handle_tx_copy(struct vhost_net *net, struct socket *sock) 736 { 737 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; 738 struct vhost_virtqueue *vq = &nvq->vq; 739 unsigned out, in; 740 int head; 741 struct msghdr msg = { 742 .msg_name = NULL, 743 .msg_namelen = 0, 744 .msg_control = NULL, 745 .msg_controllen = 0, 746 .msg_flags = MSG_DONTWAIT, 747 }; 748 size_t len, total_len = 0; 749 int err; 750 int sent_pkts = 0; 751 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); 752 bool busyloop_intr; 753 754 do { 755 busyloop_intr = false; 756 if (nvq->done_idx == VHOST_NET_BATCH) 757 vhost_tx_batch(net, nvq, sock, &msg); 758 759 head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, 760 &busyloop_intr); 761 /* On error, stop handling until the next kick. */ 762 if (unlikely(head < 0)) 763 break; 764 /* Nothing new? Wait for eventfd to tell us they refilled. */ 765 if (head == vq->num) { 766 /* Kicks are disabled at this point, break loop and 767 * process any remaining batched packets. Queue will 768 * be re-enabled afterwards. 769 */ 770 break; 771 } 772 773 total_len += len; 774 775 /* For simplicity, TX batching is only enabled if 776 * sndbuf is unlimited. 777 */ 778 if (sock_can_batch) { 779 err = vhost_net_build_xdp(nvq, &msg.msg_iter); 780 if (!err) { 781 goto done; 782 } else if (unlikely(err != -ENOSPC)) { 783 vhost_tx_batch(net, nvq, sock, &msg); 784 vhost_discard_vq_desc(vq, 1); 785 vhost_net_enable_vq(net, vq); 786 break; 787 } 788 789 /* We can't build XDP buff, go for single 790 * packet path but let's flush batched 791 * packets. 792 */ 793 vhost_tx_batch(net, nvq, sock, &msg); 794 msg.msg_control = NULL; 795 } else { 796 if (tx_can_batch(vq, total_len)) 797 msg.msg_flags |= MSG_MORE; 798 else 799 msg.msg_flags &= ~MSG_MORE; 800 } 801 802 err = sock->ops->sendmsg(sock, &msg, len); 803 if (unlikely(err < 0)) { 804 if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { 805 vhost_discard_vq_desc(vq, 1); 806 vhost_net_enable_vq(net, vq); 807 break; 808 } 809 pr_debug("Fail to send packet: err %d", err); 810 } else if (unlikely(err != len)) 811 pr_debug("Truncated TX packet: len %d != %zd\n", 812 err, len); 813 done: 814 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); 815 vq->heads[nvq->done_idx].len = 0; 816 ++nvq->done_idx; 817 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); 818 819 /* Kicks are still disabled, dispatch any remaining batched msgs. */ 820 vhost_tx_batch(net, nvq, sock, &msg); 821 822 if (unlikely(busyloop_intr)) 823 /* If interrupted while doing busy polling, requeue the 824 * handler to be fair handle_rx as well as other tasks 825 * waiting on cpu. 826 */ 827 vhost_poll_queue(&vq->poll); 828 else 829 /* All of our work has been completed; however, before 830 * leaving the TX handler, do one last check for work, 831 * and requeue handler if necessary. If there is no work, 832 * queue will be reenabled. 833 */ 834 vhost_net_busy_poll_try_queue(net, vq); 835 } 836 837 static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) 838 { 839 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; 840 struct vhost_virtqueue *vq = &nvq->vq; 841 unsigned out, in; 842 int head; 843 struct msghdr msg = { 844 .msg_name = NULL, 845 .msg_namelen = 0, 846 .msg_control = NULL, 847 .msg_controllen = 0, 848 .msg_flags = MSG_DONTWAIT, 849 }; 850 struct tun_msg_ctl ctl; 851 size_t len, total_len = 0; 852 int err; 853 struct vhost_net_ubuf_ref *ubufs; 854 struct ubuf_info_msgzc *ubuf; 855 bool zcopy_used; 856 int sent_pkts = 0; 857 858 do { 859 bool busyloop_intr; 860 861 /* Release DMAs done buffers first */ 862 vhost_zerocopy_signal_used(net, vq); 863 864 busyloop_intr = false; 865 head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, 866 &busyloop_intr); 867 /* On error, stop handling until the next kick. */ 868 if (unlikely(head < 0)) 869 break; 870 /* Nothing new? Wait for eventfd to tell us they refilled. */ 871 if (head == vq->num) { 872 if (unlikely(busyloop_intr)) { 873 vhost_poll_queue(&vq->poll); 874 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { 875 vhost_disable_notify(&net->dev, vq); 876 continue; 877 } 878 break; 879 } 880 881 zcopy_used = len >= VHOST_GOODCOPY_LEN 882 && !vhost_exceeds_maxpend(net) 883 && vhost_net_tx_select_zcopy(net); 884 885 /* use msg_control to pass vhost zerocopy ubuf info to skb */ 886 if (zcopy_used) { 887 ubuf = nvq->ubuf_info + nvq->upend_idx; 888 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); 889 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; 890 ubuf->ctx = nvq->ubufs; 891 ubuf->desc = nvq->upend_idx; 892 ubuf->ubuf.ops = &vhost_ubuf_ops; 893 ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG; 894 refcount_set(&ubuf->ubuf.refcnt, 1); 895 msg.msg_control = &ctl; 896 ctl.type = TUN_MSG_UBUF; 897 ctl.ptr = &ubuf->ubuf; 898 msg.msg_controllen = sizeof(ctl); 899 ubufs = nvq->ubufs; 900 atomic_inc(&ubufs->refcount); 901 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 902 } else { 903 msg.msg_control = NULL; 904 ubufs = NULL; 905 } 906 total_len += len; 907 if (tx_can_batch(vq, total_len) && 908 likely(!vhost_exceeds_maxpend(net))) { 909 msg.msg_flags |= MSG_MORE; 910 } else { 911 msg.msg_flags &= ~MSG_MORE; 912 } 913 914 err = sock->ops->sendmsg(sock, &msg, len); 915 if (unlikely(err < 0)) { 916 bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS; 917 918 if (zcopy_used) { 919 if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) 920 vhost_net_ubuf_put(ubufs); 921 if (retry) 922 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) 923 % UIO_MAXIOV; 924 else 925 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; 926 } 927 if (retry) { 928 vhost_discard_vq_desc(vq, 1); 929 vhost_net_enable_vq(net, vq); 930 break; 931 } 932 pr_debug("Fail to send packet: err %d", err); 933 } else if (unlikely(err != len)) 934 pr_debug("Truncated TX packet: " 935 " len %d != %zd\n", err, len); 936 if (!zcopy_used) 937 vhost_add_used_and_signal(&net->dev, vq, head, 0); 938 else 939 vhost_zerocopy_signal_used(net, vq); 940 vhost_net_tx_packet(net); 941 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); 942 } 943 944 /* Expects to be always run from workqueue - which acts as 945 * read-size critical section for our kind of RCU. */ 946 static void handle_tx(struct vhost_net *net) 947 { 948 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; 949 struct vhost_virtqueue *vq = &nvq->vq; 950 struct socket *sock; 951 952 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); 953 sock = vhost_vq_get_backend(vq); 954 if (!sock) 955 goto out; 956 957 if (!vq_meta_prefetch(vq)) 958 goto out; 959 960 vhost_disable_notify(&net->dev, vq); 961 vhost_net_disable_vq(net, vq); 962 963 if (vhost_sock_zcopy(sock)) 964 handle_tx_zerocopy(net, sock); 965 else 966 handle_tx_copy(net, sock); 967 968 out: 969 mutex_unlock(&vq->mutex); 970 } 971 972 static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) 973 { 974 struct sk_buff *head; 975 int len = 0; 976 unsigned long flags; 977 978 if (rvq->rx_ring) 979 return vhost_net_buf_peek(rvq); 980 981 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); 982 head = skb_peek(&sk->sk_receive_queue); 983 if (likely(head)) { 984 len = head->len; 985 if (skb_vlan_tag_present(head)) 986 len += VLAN_HLEN; 987 } 988 989 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); 990 return len; 991 } 992 993 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, 994 bool *busyloop_intr) 995 { 996 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; 997 struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; 998 struct vhost_virtqueue *rvq = &rnvq->vq; 999 struct vhost_virtqueue *tvq = &tnvq->vq; 1000 int len = peek_head_len(rnvq, sk); 1001 1002 if (!len && rvq->busyloop_timeout) { 1003 /* Flush batched heads first */ 1004 vhost_net_signal_used(rnvq); 1005 /* Both tx vq and rx socket were polled here */ 1006 vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true); 1007 1008 len = peek_head_len(rnvq, sk); 1009 } 1010 1011 return len; 1012 } 1013 1014 /* This is a multi-buffer version of vhost_get_desc, that works if 1015 * vq has read descriptors only. 1016 * @vq - the relevant virtqueue 1017 * @datalen - data length we'll be reading 1018 * @iovcount - returned count of io vectors we fill 1019 * @log - vhost log 1020 * @log_num - log offset 1021 * @quota - headcount quota, 1 for big buffer 1022 * returns number of buffer heads allocated, negative on error 1023 */ 1024 static int get_rx_bufs(struct vhost_virtqueue *vq, 1025 struct vring_used_elem *heads, 1026 int datalen, 1027 unsigned *iovcount, 1028 struct vhost_log *log, 1029 unsigned *log_num, 1030 unsigned int quota) 1031 { 1032 unsigned int out, in; 1033 int seg = 0; 1034 int headcount = 0; 1035 unsigned d; 1036 int r, nlogs = 0; 1037 /* len is always initialized before use since we are always called with 1038 * datalen > 0. 1039 */ 1040 u32 len; 1041 1042 while (datalen > 0 && headcount < quota) { 1043 if (unlikely(seg >= UIO_MAXIOV)) { 1044 r = -ENOBUFS; 1045 goto err; 1046 } 1047 r = vhost_get_vq_desc(vq, vq->iov + seg, 1048 ARRAY_SIZE(vq->iov) - seg, &out, 1049 &in, log, log_num); 1050 if (unlikely(r < 0)) 1051 goto err; 1052 1053 d = r; 1054 if (d == vq->num) { 1055 r = 0; 1056 goto err; 1057 } 1058 if (unlikely(out || in <= 0)) { 1059 vq_err(vq, "unexpected descriptor format for RX: " 1060 "out %d, in %d\n", out, in); 1061 r = -EINVAL; 1062 goto err; 1063 } 1064 if (unlikely(log)) { 1065 nlogs += *log_num; 1066 log += *log_num; 1067 } 1068 heads[headcount].id = cpu_to_vhost32(vq, d); 1069 len = iov_length(vq->iov + seg, in); 1070 heads[headcount].len = cpu_to_vhost32(vq, len); 1071 datalen -= len; 1072 ++headcount; 1073 seg += in; 1074 } 1075 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); 1076 *iovcount = seg; 1077 if (unlikely(log)) 1078 *log_num = nlogs; 1079 1080 /* Detect overrun */ 1081 if (unlikely(datalen > 0)) { 1082 r = UIO_MAXIOV + 1; 1083 goto err; 1084 } 1085 return headcount; 1086 err: 1087 vhost_discard_vq_desc(vq, headcount); 1088 return r; 1089 } 1090 1091 /* Expects to be always run from workqueue - which acts as 1092 * read-size critical section for our kind of RCU. */ 1093 static void handle_rx(struct vhost_net *net) 1094 { 1095 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; 1096 struct vhost_virtqueue *vq = &nvq->vq; 1097 unsigned in, log; 1098 struct vhost_log *vq_log; 1099 struct msghdr msg = { 1100 .msg_name = NULL, 1101 .msg_namelen = 0, 1102 .msg_control = NULL, /* FIXME: get and handle RX aux data. */ 1103 .msg_controllen = 0, 1104 .msg_flags = MSG_DONTWAIT, 1105 }; 1106 struct virtio_net_hdr hdr = { 1107 .flags = 0, 1108 .gso_type = VIRTIO_NET_HDR_GSO_NONE 1109 }; 1110 size_t total_len = 0; 1111 int err, mergeable; 1112 s16 headcount; 1113 size_t vhost_hlen, sock_hlen; 1114 size_t vhost_len, sock_len; 1115 bool busyloop_intr = false; 1116 bool set_num_buffers; 1117 struct socket *sock; 1118 struct iov_iter fixup; 1119 __virtio16 num_buffers; 1120 int recv_pkts = 0; 1121 1122 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); 1123 sock = vhost_vq_get_backend(vq); 1124 if (!sock) 1125 goto out; 1126 1127 if (!vq_meta_prefetch(vq)) 1128 goto out; 1129 1130 vhost_disable_notify(&net->dev, vq); 1131 vhost_net_disable_vq(net, vq); 1132 1133 vhost_hlen = nvq->vhost_hlen; 1134 sock_hlen = nvq->sock_hlen; 1135 1136 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? 1137 vq->log : NULL; 1138 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); 1139 set_num_buffers = mergeable || 1140 vhost_has_feature(vq, VIRTIO_F_VERSION_1); 1141 1142 do { 1143 sock_len = vhost_net_rx_peek_head_len(net, sock->sk, 1144 &busyloop_intr); 1145 if (!sock_len) 1146 break; 1147 sock_len += sock_hlen; 1148 vhost_len = sock_len + vhost_hlen; 1149 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 1150 vhost_len, &in, vq_log, &log, 1151 likely(mergeable) ? UIO_MAXIOV : 1); 1152 /* On error, stop handling until the next kick. */ 1153 if (unlikely(headcount < 0)) 1154 goto out; 1155 /* OK, now we need to know about added descriptors. */ 1156 if (!headcount) { 1157 if (unlikely(busyloop_intr)) { 1158 vhost_poll_queue(&vq->poll); 1159 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { 1160 /* They have slipped one in as we were 1161 * doing that: check again. */ 1162 vhost_disable_notify(&net->dev, vq); 1163 continue; 1164 } 1165 /* Nothing new? Wait for eventfd to tell us 1166 * they refilled. */ 1167 goto out; 1168 } 1169 busyloop_intr = false; 1170 if (nvq->rx_ring) 1171 msg.msg_control = vhost_net_buf_consume(&nvq->rxq); 1172 /* On overrun, truncate and discard */ 1173 if (unlikely(headcount > UIO_MAXIOV)) { 1174 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1); 1175 err = sock->ops->recvmsg(sock, &msg, 1176 1, MSG_DONTWAIT | MSG_TRUNC); 1177 pr_debug("Discarded rx packet: len %zd\n", sock_len); 1178 continue; 1179 } 1180 /* We don't need to be notified again. */ 1181 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len); 1182 fixup = msg.msg_iter; 1183 if (unlikely((vhost_hlen))) { 1184 /* We will supply the header ourselves 1185 * TODO: support TSO. 1186 */ 1187 iov_iter_advance(&msg.msg_iter, vhost_hlen); 1188 } 1189 err = sock->ops->recvmsg(sock, &msg, 1190 sock_len, MSG_DONTWAIT | MSG_TRUNC); 1191 /* Userspace might have consumed the packet meanwhile: 1192 * it's not supposed to do this usually, but might be hard 1193 * to prevent. Discard data we got (if any) and keep going. */ 1194 if (unlikely(err != sock_len)) { 1195 pr_debug("Discarded rx packet: " 1196 " len %d, expected %zd\n", err, sock_len); 1197 vhost_discard_vq_desc(vq, headcount); 1198 continue; 1199 } 1200 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ 1201 if (unlikely(vhost_hlen)) { 1202 if (copy_to_iter(&hdr, sizeof(hdr), 1203 &fixup) != sizeof(hdr)) { 1204 vq_err(vq, "Unable to write vnet_hdr " 1205 "at addr %p\n", vq->iov->iov_base); 1206 goto out; 1207 } 1208 } else { 1209 /* Header came from socket; we'll need to patch 1210 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF 1211 */ 1212 iov_iter_advance(&fixup, sizeof(hdr)); 1213 } 1214 /* TODO: Should check and handle checksum. */ 1215 1216 num_buffers = cpu_to_vhost16(vq, headcount); 1217 if (likely(set_num_buffers) && 1218 copy_to_iter(&num_buffers, sizeof num_buffers, 1219 &fixup) != sizeof num_buffers) { 1220 vq_err(vq, "Failed num_buffers write"); 1221 vhost_discard_vq_desc(vq, headcount); 1222 goto out; 1223 } 1224 nvq->done_idx += headcount; 1225 if (nvq->done_idx > VHOST_NET_BATCH) 1226 vhost_net_signal_used(nvq); 1227 if (unlikely(vq_log)) 1228 vhost_log_write(vq, vq_log, log, vhost_len, 1229 vq->iov, in); 1230 total_len += vhost_len; 1231 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); 1232 1233 if (unlikely(busyloop_intr)) 1234 vhost_poll_queue(&vq->poll); 1235 else if (!sock_len) 1236 vhost_net_enable_vq(net, vq); 1237 out: 1238 vhost_net_signal_used(nvq); 1239 mutex_unlock(&vq->mutex); 1240 } 1241 1242 static void handle_tx_kick(struct vhost_work *work) 1243 { 1244 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1245 poll.work); 1246 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); 1247 1248 handle_tx(net); 1249 } 1250 1251 static void handle_rx_kick(struct vhost_work *work) 1252 { 1253 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1254 poll.work); 1255 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); 1256 1257 handle_rx(net); 1258 } 1259 1260 static void handle_tx_net(struct vhost_work *work) 1261 { 1262 struct vhost_net *net = container_of(work, struct vhost_net, 1263 poll[VHOST_NET_VQ_TX].work); 1264 handle_tx(net); 1265 } 1266 1267 static void handle_rx_net(struct vhost_work *work) 1268 { 1269 struct vhost_net *net = container_of(work, struct vhost_net, 1270 poll[VHOST_NET_VQ_RX].work); 1271 handle_rx(net); 1272 } 1273 1274 static int vhost_net_open(struct inode *inode, struct file *f) 1275 { 1276 struct vhost_net *n; 1277 struct vhost_dev *dev; 1278 struct vhost_virtqueue **vqs; 1279 void **queue; 1280 struct xdp_buff *xdp; 1281 int i; 1282 1283 n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1284 if (!n) 1285 return -ENOMEM; 1286 vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); 1287 if (!vqs) { 1288 kvfree(n); 1289 return -ENOMEM; 1290 } 1291 1292 queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *), 1293 GFP_KERNEL); 1294 if (!queue) { 1295 kfree(vqs); 1296 kvfree(n); 1297 return -ENOMEM; 1298 } 1299 n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; 1300 1301 xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL); 1302 if (!xdp) { 1303 kfree(vqs); 1304 kvfree(n); 1305 kfree(queue); 1306 return -ENOMEM; 1307 } 1308 n->vqs[VHOST_NET_VQ_TX].xdp = xdp; 1309 1310 dev = &n->dev; 1311 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; 1312 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; 1313 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; 1314 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; 1315 for (i = 0; i < VHOST_NET_VQ_MAX; i++) { 1316 n->vqs[i].ubufs = NULL; 1317 n->vqs[i].ubuf_info = NULL; 1318 n->vqs[i].upend_idx = 0; 1319 n->vqs[i].done_idx = 0; 1320 n->vqs[i].batched_xdp = 0; 1321 n->vqs[i].vhost_hlen = 0; 1322 n->vqs[i].sock_hlen = 0; 1323 n->vqs[i].rx_ring = NULL; 1324 vhost_net_buf_init(&n->vqs[i].rxq); 1325 } 1326 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, 1327 UIO_MAXIOV + VHOST_NET_BATCH, 1328 VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true, 1329 NULL); 1330 1331 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev, 1332 vqs[VHOST_NET_VQ_TX]); 1333 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev, 1334 vqs[VHOST_NET_VQ_RX]); 1335 1336 f->private_data = n; 1337 page_frag_cache_init(&n->pf_cache); 1338 1339 return 0; 1340 } 1341 1342 static struct socket *vhost_net_stop_vq(struct vhost_net *n, 1343 struct vhost_virtqueue *vq) 1344 { 1345 struct socket *sock; 1346 struct vhost_net_virtqueue *nvq = 1347 container_of(vq, struct vhost_net_virtqueue, vq); 1348 1349 mutex_lock(&vq->mutex); 1350 sock = vhost_vq_get_backend(vq); 1351 vhost_net_disable_vq(n, vq); 1352 vhost_vq_set_backend(vq, NULL); 1353 vhost_net_buf_unproduce(nvq); 1354 nvq->rx_ring = NULL; 1355 mutex_unlock(&vq->mutex); 1356 return sock; 1357 } 1358 1359 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, 1360 struct socket **rx_sock) 1361 { 1362 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); 1363 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); 1364 } 1365 1366 static void vhost_net_flush(struct vhost_net *n) 1367 { 1368 vhost_dev_flush(&n->dev); 1369 if (n->vqs[VHOST_NET_VQ_TX].ubufs) { 1370 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1371 n->tx_flush = true; 1372 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1373 /* Wait for all lower device DMAs done. */ 1374 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); 1375 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1376 n->tx_flush = false; 1377 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); 1378 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1379 } 1380 } 1381 1382 static int vhost_net_release(struct inode *inode, struct file *f) 1383 { 1384 struct vhost_net *n = f->private_data; 1385 struct socket *tx_sock; 1386 struct socket *rx_sock; 1387 1388 vhost_net_stop(n, &tx_sock, &rx_sock); 1389 vhost_net_flush(n); 1390 vhost_dev_stop(&n->dev); 1391 vhost_dev_cleanup(&n->dev); 1392 vhost_net_vq_reset(n); 1393 if (tx_sock) 1394 sockfd_put(tx_sock); 1395 if (rx_sock) 1396 sockfd_put(rx_sock); 1397 /* Make sure no callbacks are outstanding */ 1398 synchronize_rcu(); 1399 /* We do an extra flush before freeing memory, 1400 * since jobs can re-queue themselves. */ 1401 vhost_net_flush(n); 1402 kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); 1403 kfree(n->vqs[VHOST_NET_VQ_TX].xdp); 1404 kfree(n->dev.vqs); 1405 page_frag_cache_drain(&n->pf_cache); 1406 kvfree(n); 1407 return 0; 1408 } 1409 1410 static struct socket *get_raw_socket(int fd) 1411 { 1412 int r; 1413 struct socket *sock = sockfd_lookup(fd, &r); 1414 1415 if (!sock) 1416 return ERR_PTR(-ENOTSOCK); 1417 1418 /* Parameter checking */ 1419 if (sock->sk->sk_type != SOCK_RAW) { 1420 r = -ESOCKTNOSUPPORT; 1421 goto err; 1422 } 1423 1424 if (sock->sk->sk_family != AF_PACKET) { 1425 r = -EPFNOSUPPORT; 1426 goto err; 1427 } 1428 return sock; 1429 err: 1430 sockfd_put(sock); 1431 return ERR_PTR(r); 1432 } 1433 1434 static struct ptr_ring *get_tap_ptr_ring(struct file *file) 1435 { 1436 struct ptr_ring *ring; 1437 ring = tun_get_tx_ring(file); 1438 if (!IS_ERR(ring)) 1439 goto out; 1440 ring = tap_get_ptr_ring(file); 1441 if (!IS_ERR(ring)) 1442 goto out; 1443 ring = NULL; 1444 out: 1445 return ring; 1446 } 1447 1448 static struct socket *get_tap_socket(int fd) 1449 { 1450 struct file *file = fget(fd); 1451 struct socket *sock; 1452 1453 if (!file) 1454 return ERR_PTR(-EBADF); 1455 sock = tun_get_socket(file); 1456 if (!IS_ERR(sock)) 1457 return sock; 1458 sock = tap_get_socket(file); 1459 if (IS_ERR(sock)) 1460 fput(file); 1461 return sock; 1462 } 1463 1464 static struct socket *get_socket(int fd) 1465 { 1466 struct socket *sock; 1467 1468 /* special case to disable backend */ 1469 if (fd == -1) 1470 return NULL; 1471 sock = get_raw_socket(fd); 1472 if (!IS_ERR(sock)) 1473 return sock; 1474 sock = get_tap_socket(fd); 1475 if (!IS_ERR(sock)) 1476 return sock; 1477 return ERR_PTR(-ENOTSOCK); 1478 } 1479 1480 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) 1481 { 1482 struct socket *sock, *oldsock; 1483 struct vhost_virtqueue *vq; 1484 struct vhost_net_virtqueue *nvq; 1485 struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; 1486 int r; 1487 1488 mutex_lock(&n->dev.mutex); 1489 r = vhost_dev_check_owner(&n->dev); 1490 if (r) 1491 goto err; 1492 1493 if (index >= VHOST_NET_VQ_MAX) { 1494 r = -ENOBUFS; 1495 goto err; 1496 } 1497 vq = &n->vqs[index].vq; 1498 nvq = &n->vqs[index]; 1499 mutex_lock(&vq->mutex); 1500 1501 if (fd == -1) 1502 vhost_clear_msg(&n->dev); 1503 1504 /* Verify that ring has been setup correctly. */ 1505 if (!vhost_vq_access_ok(vq)) { 1506 r = -EFAULT; 1507 goto err_vq; 1508 } 1509 sock = get_socket(fd); 1510 if (IS_ERR(sock)) { 1511 r = PTR_ERR(sock); 1512 goto err_vq; 1513 } 1514 1515 /* start polling new socket */ 1516 oldsock = vhost_vq_get_backend(vq); 1517 if (sock != oldsock) { 1518 ubufs = vhost_net_ubuf_alloc(vq, 1519 sock && vhost_sock_zcopy(sock)); 1520 if (IS_ERR(ubufs)) { 1521 r = PTR_ERR(ubufs); 1522 goto err_ubufs; 1523 } 1524 1525 vhost_net_disable_vq(n, vq); 1526 vhost_vq_set_backend(vq, sock); 1527 vhost_net_buf_unproduce(nvq); 1528 r = vhost_vq_init_access(vq); 1529 if (r) 1530 goto err_used; 1531 r = vhost_net_enable_vq(n, vq); 1532 if (r) 1533 goto err_used; 1534 if (index == VHOST_NET_VQ_RX) { 1535 if (sock) 1536 nvq->rx_ring = get_tap_ptr_ring(sock->file); 1537 else 1538 nvq->rx_ring = NULL; 1539 } 1540 1541 oldubufs = nvq->ubufs; 1542 nvq->ubufs = ubufs; 1543 1544 n->tx_packets = 0; 1545 n->tx_zcopy_err = 0; 1546 n->tx_flush = false; 1547 } 1548 1549 mutex_unlock(&vq->mutex); 1550 1551 if (oldubufs) { 1552 vhost_net_ubuf_put_wait_and_free(oldubufs); 1553 mutex_lock(&vq->mutex); 1554 vhost_zerocopy_signal_used(n, vq); 1555 mutex_unlock(&vq->mutex); 1556 } 1557 1558 if (oldsock) { 1559 vhost_dev_flush(&n->dev); 1560 sockfd_put(oldsock); 1561 } 1562 1563 mutex_unlock(&n->dev.mutex); 1564 return 0; 1565 1566 err_used: 1567 vhost_vq_set_backend(vq, oldsock); 1568 vhost_net_enable_vq(n, vq); 1569 if (ubufs) 1570 vhost_net_ubuf_put_wait_and_free(ubufs); 1571 err_ubufs: 1572 if (sock) 1573 sockfd_put(sock); 1574 err_vq: 1575 mutex_unlock(&vq->mutex); 1576 err: 1577 mutex_unlock(&n->dev.mutex); 1578 return r; 1579 } 1580 1581 static long vhost_net_reset_owner(struct vhost_net *n) 1582 { 1583 struct socket *tx_sock = NULL; 1584 struct socket *rx_sock = NULL; 1585 long err; 1586 struct vhost_iotlb *umem; 1587 1588 mutex_lock(&n->dev.mutex); 1589 err = vhost_dev_check_owner(&n->dev); 1590 if (err) 1591 goto done; 1592 umem = vhost_dev_reset_owner_prepare(); 1593 if (!umem) { 1594 err = -ENOMEM; 1595 goto done; 1596 } 1597 vhost_net_stop(n, &tx_sock, &rx_sock); 1598 vhost_net_flush(n); 1599 vhost_dev_stop(&n->dev); 1600 vhost_dev_reset_owner(&n->dev, umem); 1601 vhost_net_vq_reset(n); 1602 done: 1603 mutex_unlock(&n->dev.mutex); 1604 if (tx_sock) 1605 sockfd_put(tx_sock); 1606 if (rx_sock) 1607 sockfd_put(rx_sock); 1608 return err; 1609 } 1610 1611 static int vhost_net_set_features(struct vhost_net *n, const u64 *features) 1612 { 1613 size_t vhost_hlen, sock_hlen, hdr_len; 1614 int i; 1615 1616 hdr_len = virtio_features_test_bit(features, VIRTIO_NET_F_MRG_RXBUF) || 1617 virtio_features_test_bit(features, VIRTIO_F_VERSION_1) ? 1618 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 1619 sizeof(struct virtio_net_hdr); 1620 1621 if (virtio_features_test_bit(features, 1622 VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO) || 1623 virtio_features_test_bit(features, 1624 VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO)) 1625 hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel); 1626 1627 if (virtio_features_test_bit(features, VHOST_NET_F_VIRTIO_NET_HDR)) { 1628 /* vhost provides vnet_hdr */ 1629 vhost_hlen = hdr_len; 1630 sock_hlen = 0; 1631 } else { 1632 /* socket provides vnet_hdr */ 1633 vhost_hlen = 0; 1634 sock_hlen = hdr_len; 1635 } 1636 mutex_lock(&n->dev.mutex); 1637 if (virtio_features_test_bit(features, VHOST_F_LOG_ALL) && 1638 !vhost_log_access_ok(&n->dev)) 1639 goto out_unlock; 1640 1641 if (virtio_features_test_bit(features, VIRTIO_F_ACCESS_PLATFORM)) { 1642 if (vhost_init_device_iotlb(&n->dev)) 1643 goto out_unlock; 1644 } 1645 1646 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 1647 mutex_lock(&n->vqs[i].vq.mutex); 1648 virtio_features_copy(n->vqs[i].vq.acked_features_array, 1649 features); 1650 n->vqs[i].vhost_hlen = vhost_hlen; 1651 n->vqs[i].sock_hlen = sock_hlen; 1652 mutex_unlock(&n->vqs[i].vq.mutex); 1653 } 1654 mutex_unlock(&n->dev.mutex); 1655 return 0; 1656 1657 out_unlock: 1658 mutex_unlock(&n->dev.mutex); 1659 return -EFAULT; 1660 } 1661 1662 static long vhost_net_set_owner(struct vhost_net *n) 1663 { 1664 int r; 1665 1666 mutex_lock(&n->dev.mutex); 1667 if (vhost_dev_has_owner(&n->dev)) { 1668 r = -EBUSY; 1669 goto out; 1670 } 1671 r = vhost_net_set_ubuf_info(n); 1672 if (r) 1673 goto out; 1674 r = vhost_dev_set_owner(&n->dev); 1675 if (r) 1676 vhost_net_clear_ubuf_info(n); 1677 vhost_net_flush(n); 1678 out: 1679 mutex_unlock(&n->dev.mutex); 1680 return r; 1681 } 1682 1683 static long vhost_net_ioctl(struct file *f, unsigned int ioctl, 1684 unsigned long arg) 1685 { 1686 u64 all_features[VIRTIO_FEATURES_DWORDS]; 1687 struct vhost_net *n = f->private_data; 1688 void __user *argp = (void __user *)arg; 1689 u64 __user *featurep = argp; 1690 struct vhost_vring_file backend; 1691 u64 features, count, copied; 1692 int r, i; 1693 1694 switch (ioctl) { 1695 case VHOST_NET_SET_BACKEND: 1696 if (copy_from_user(&backend, argp, sizeof backend)) 1697 return -EFAULT; 1698 return vhost_net_set_backend(n, backend.index, backend.fd); 1699 case VHOST_GET_FEATURES: 1700 features = vhost_net_features[0]; 1701 if (copy_to_user(featurep, &features, sizeof features)) 1702 return -EFAULT; 1703 return 0; 1704 case VHOST_SET_FEATURES: 1705 if (copy_from_user(&features, featurep, sizeof features)) 1706 return -EFAULT; 1707 if (features & ~vhost_net_features[0]) 1708 return -EOPNOTSUPP; 1709 1710 virtio_features_from_u64(all_features, features); 1711 return vhost_net_set_features(n, all_features); 1712 case VHOST_GET_FEATURES_ARRAY: 1713 if (copy_from_user(&count, featurep, sizeof(count))) 1714 return -EFAULT; 1715 1716 /* Copy the net features, up to the user-provided buffer size */ 1717 argp += sizeof(u64); 1718 copied = min(count, VIRTIO_FEATURES_DWORDS); 1719 if (copy_to_user(argp, vhost_net_features, 1720 copied * sizeof(u64))) 1721 return -EFAULT; 1722 1723 /* Zero the trailing space provided by user-space, if any */ 1724 if (clear_user(argp, size_mul(count - copied, sizeof(u64)))) 1725 return -EFAULT; 1726 return 0; 1727 case VHOST_SET_FEATURES_ARRAY: 1728 if (copy_from_user(&count, featurep, sizeof(count))) 1729 return -EFAULT; 1730 1731 virtio_features_zero(all_features); 1732 argp += sizeof(u64); 1733 copied = min(count, VIRTIO_FEATURES_DWORDS); 1734 if (copy_from_user(all_features, argp, copied * sizeof(u64))) 1735 return -EFAULT; 1736 1737 /* 1738 * Any feature specified by user-space above 1739 * VIRTIO_FEATURES_MAX is not supported by definition. 1740 */ 1741 for (i = copied; i < count; ++i) { 1742 if (copy_from_user(&features, featurep + 1 + i, 1743 sizeof(features))) 1744 return -EFAULT; 1745 if (features) 1746 return -EOPNOTSUPP; 1747 } 1748 1749 for (i = 0; i < VIRTIO_FEATURES_DWORDS; i++) 1750 if (all_features[i] & ~vhost_net_features[i]) 1751 return -EOPNOTSUPP; 1752 1753 return vhost_net_set_features(n, all_features); 1754 case VHOST_GET_BACKEND_FEATURES: 1755 features = VHOST_NET_BACKEND_FEATURES; 1756 if (copy_to_user(featurep, &features, sizeof(features))) 1757 return -EFAULT; 1758 return 0; 1759 case VHOST_SET_BACKEND_FEATURES: 1760 if (copy_from_user(&features, featurep, sizeof(features))) 1761 return -EFAULT; 1762 if (features & ~VHOST_NET_BACKEND_FEATURES) 1763 return -EOPNOTSUPP; 1764 vhost_set_backend_features(&n->dev, features); 1765 return 0; 1766 case VHOST_RESET_OWNER: 1767 return vhost_net_reset_owner(n); 1768 case VHOST_SET_OWNER: 1769 return vhost_net_set_owner(n); 1770 default: 1771 mutex_lock(&n->dev.mutex); 1772 r = vhost_dev_ioctl(&n->dev, ioctl, argp); 1773 if (r == -ENOIOCTLCMD) 1774 r = vhost_vring_ioctl(&n->dev, ioctl, argp); 1775 else 1776 vhost_net_flush(n); 1777 mutex_unlock(&n->dev.mutex); 1778 return r; 1779 } 1780 } 1781 1782 static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 1783 { 1784 struct file *file = iocb->ki_filp; 1785 struct vhost_net *n = file->private_data; 1786 struct vhost_dev *dev = &n->dev; 1787 int noblock = file->f_flags & O_NONBLOCK; 1788 1789 return vhost_chr_read_iter(dev, to, noblock); 1790 } 1791 1792 static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb, 1793 struct iov_iter *from) 1794 { 1795 struct file *file = iocb->ki_filp; 1796 struct vhost_net *n = file->private_data; 1797 struct vhost_dev *dev = &n->dev; 1798 1799 return vhost_chr_write_iter(dev, from); 1800 } 1801 1802 static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait) 1803 { 1804 struct vhost_net *n = file->private_data; 1805 struct vhost_dev *dev = &n->dev; 1806 1807 return vhost_chr_poll(file, dev, wait); 1808 } 1809 1810 static const struct file_operations vhost_net_fops = { 1811 .owner = THIS_MODULE, 1812 .release = vhost_net_release, 1813 .read_iter = vhost_net_chr_read_iter, 1814 .write_iter = vhost_net_chr_write_iter, 1815 .poll = vhost_net_chr_poll, 1816 .unlocked_ioctl = vhost_net_ioctl, 1817 .compat_ioctl = compat_ptr_ioctl, 1818 .open = vhost_net_open, 1819 .llseek = noop_llseek, 1820 }; 1821 1822 static struct miscdevice vhost_net_misc = { 1823 .minor = VHOST_NET_MINOR, 1824 .name = "vhost-net", 1825 .fops = &vhost_net_fops, 1826 }; 1827 1828 static int __init vhost_net_init(void) 1829 { 1830 if (experimental_zcopytx) 1831 vhost_net_enable_zcopy(VHOST_NET_VQ_TX); 1832 return misc_register(&vhost_net_misc); 1833 } 1834 module_init(vhost_net_init); 1835 1836 static void __exit vhost_net_exit(void) 1837 { 1838 misc_deregister(&vhost_net_misc); 1839 } 1840 module_exit(vhost_net_exit); 1841 1842 MODULE_VERSION("0.0.1"); 1843 MODULE_LICENSE("GPL v2"); 1844 MODULE_AUTHOR("Michael S. Tsirkin"); 1845 MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); 1846 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); 1847 MODULE_ALIAS("devname:vhost-net"); 1848