1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/linker_set.h> 36 #include <sys/select.h> 37 #include <sys/uio.h> 38 #include <sys/ioctl.h> 39 #include <machine/vmm_snapshot.h> 40 #include <net/ethernet.h> 41 #include <net/if.h> /* IFNAMSIZ */ 42 43 #include <err.h> 44 #include <errno.h> 45 #include <fcntl.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <stdint.h> 49 #include <string.h> 50 #include <strings.h> 51 #include <unistd.h> 52 #include <assert.h> 53 #include <pthread.h> 54 #include <pthread_np.h> 55 56 #include "bhyverun.h" 57 #include "config.h" 58 #include "debug.h" 59 #include "pci_emul.h" 60 #include "mevent.h" 61 #include "virtio.h" 62 #include "net_utils.h" 63 #include "net_backends.h" 64 #include "iov.h" 65 66 #define VTNET_RINGSZ 1024 67 68 #define VTNET_MAXSEGS 256 69 70 #define VTNET_MAX_PKT_LEN (65536 + 64) 71 72 #define VTNET_MIN_MTU ETHERMIN 73 #define VTNET_MAX_MTU 65535 74 75 #define VTNET_S_HOSTCAPS \ 76 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \ 77 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 78 79 /* 80 * PCI config-space "registers" 81 */ 82 struct virtio_net_config { 83 uint8_t mac[6]; 84 uint16_t status; 85 uint16_t max_virtqueue_pairs; 86 uint16_t mtu; 87 } __packed; 88 89 /* 90 * Queue definitions. 91 */ 92 #define VTNET_RXQ 0 93 #define VTNET_TXQ 1 94 #define VTNET_CTLQ 2 /* NB: not yet supported */ 95 96 #define VTNET_MAXQ 3 97 98 /* 99 * Debug printf 100 */ 101 static int pci_vtnet_debug; 102 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params 103 #define WPRINTF(params) PRINTLN params 104 105 /* 106 * Per-device softc 107 */ 108 struct pci_vtnet_softc { 109 struct virtio_softc vsc_vs; 110 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 111 pthread_mutex_t vsc_mtx; 112 113 net_backend_t *vsc_be; 114 115 bool features_negotiated; /* protected by rx_mtx */ 116 117 int resetting; /* protected by tx_mtx */ 118 119 uint64_t vsc_features; /* negotiated features */ 120 121 pthread_mutex_t rx_mtx; 122 int rx_merge; /* merged rx bufs in use */ 123 124 pthread_t tx_tid; 125 pthread_mutex_t tx_mtx; 126 pthread_cond_t tx_cond; 127 int tx_in_progress; 128 129 size_t vhdrlen; 130 size_t be_vhdrlen; 131 132 struct virtio_net_config vsc_config; 133 struct virtio_consts vsc_consts; 134 }; 135 136 static void pci_vtnet_reset(void *); 137 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 138 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 139 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 140 static void pci_vtnet_neg_features(void *, uint64_t); 141 #ifdef BHYVE_SNAPSHOT 142 static void pci_vtnet_pause(void *); 143 static void pci_vtnet_resume(void *); 144 static int pci_vtnet_snapshot(void *, struct vm_snapshot_meta *); 145 #endif 146 147 static struct virtio_consts vtnet_vi_consts = { 148 .vc_name = "vtnet", 149 .vc_nvq = VTNET_MAXQ - 1, 150 .vc_cfgsize = sizeof(struct virtio_net_config), 151 .vc_reset = pci_vtnet_reset, 152 .vc_cfgread = pci_vtnet_cfgread, 153 .vc_cfgwrite = pci_vtnet_cfgwrite, 154 .vc_apply_features = pci_vtnet_neg_features, 155 .vc_hv_caps = VTNET_S_HOSTCAPS, 156 #ifdef BHYVE_SNAPSHOT 157 .vc_pause = pci_vtnet_pause, 158 .vc_resume = pci_vtnet_resume, 159 .vc_snapshot = pci_vtnet_snapshot, 160 #endif 161 }; 162 163 static void 164 pci_vtnet_reset(void *vsc) 165 { 166 struct pci_vtnet_softc *sc = vsc; 167 168 DPRINTF(("vtnet: device reset requested !")); 169 170 /* Acquire the RX lock to block RX processing. */ 171 pthread_mutex_lock(&sc->rx_mtx); 172 173 /* 174 * Make sure receive operation is disabled at least until we 175 * re-negotiate the features, since receive operation depends 176 * on the value of sc->rx_merge and the header length, which 177 * are both set in pci_vtnet_neg_features(). 178 * Receive operation will be enabled again once the guest adds 179 * the first receive buffers and kicks us. 180 */ 181 sc->features_negotiated = false; 182 netbe_rx_disable(sc->vsc_be); 183 184 /* Set sc->resetting and give a chance to the TX thread to stop. */ 185 pthread_mutex_lock(&sc->tx_mtx); 186 sc->resetting = 1; 187 while (sc->tx_in_progress) { 188 pthread_mutex_unlock(&sc->tx_mtx); 189 usleep(10000); 190 pthread_mutex_lock(&sc->tx_mtx); 191 } 192 193 /* 194 * Now reset rings, MSI-X vectors, and negotiated capabilities. 195 * Do that with the TX lock held, since we need to reset 196 * sc->resetting. 197 */ 198 vi_reset_dev(&sc->vsc_vs); 199 200 sc->resetting = 0; 201 pthread_mutex_unlock(&sc->tx_mtx); 202 pthread_mutex_unlock(&sc->rx_mtx); 203 } 204 205 static __inline struct iovec * 206 iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen) 207 { 208 struct iovec *riov; 209 210 if (iov[0].iov_len < hlen) { 211 /* 212 * Not enough header space in the first fragment. 213 * That's not ok for us. 214 */ 215 return NULL; 216 } 217 218 iov[0].iov_len -= hlen; 219 if (iov[0].iov_len == 0) { 220 *iovcnt -= 1; 221 if (*iovcnt == 0) { 222 /* 223 * Only space for the header. That's not 224 * enough for us. 225 */ 226 return NULL; 227 } 228 riov = &iov[1]; 229 } else { 230 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen); 231 riov = &iov[0]; 232 } 233 234 return (riov); 235 } 236 237 struct virtio_mrg_rxbuf_info { 238 uint16_t idx; 239 uint16_t pad; 240 uint32_t len; 241 }; 242 243 static void 244 pci_vtnet_rx(struct pci_vtnet_softc *sc) 245 { 246 int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; 247 struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; 248 struct iovec iov[VTNET_MAXSEGS + 1]; 249 struct vqueue_info *vq; 250 struct vi_req req; 251 252 vq = &sc->vsc_queues[VTNET_RXQ]; 253 254 /* Features must be negotiated */ 255 if (!sc->features_negotiated) { 256 return; 257 } 258 259 for (;;) { 260 struct virtio_net_rxhdr *hdr; 261 uint32_t riov_bytes; 262 struct iovec *riov; 263 uint32_t ulen; 264 int riov_len; 265 int n_chains; 266 ssize_t rlen; 267 ssize_t plen; 268 269 plen = netbe_peek_recvlen(sc->vsc_be); 270 if (plen <= 0) { 271 /* 272 * No more packets (plen == 0), or backend errored 273 * (plen < 0). Interrupt if needed and stop. 274 */ 275 vq_endchains(vq, /*used_all_avail=*/0); 276 return; 277 } 278 plen += prepend_hdr_len; 279 280 /* 281 * Get a descriptor chain to store the next ingress 282 * packet. In case of mergeable rx buffers, get as 283 * many chains as necessary in order to make room 284 * for plen bytes. 285 */ 286 riov_bytes = 0; 287 riov_len = 0; 288 riov = iov; 289 n_chains = 0; 290 do { 291 int n = vq_getchain(vq, riov, VTNET_MAXSEGS - riov_len, 292 &req); 293 info[n_chains].idx = req.idx; 294 295 if (n == 0) { 296 /* 297 * No rx buffers. Enable RX kicks and double 298 * check. 299 */ 300 vq_kick_enable(vq); 301 if (!vq_has_descs(vq)) { 302 /* 303 * Still no buffers. Return the unused 304 * chains (if any), interrupt if needed 305 * (including for NOTIFY_ON_EMPTY), and 306 * disable the backend until the next 307 * kick. 308 */ 309 vq_retchains(vq, n_chains); 310 vq_endchains(vq, /*used_all_avail=*/1); 311 netbe_rx_disable(sc->vsc_be); 312 return; 313 } 314 315 /* More rx buffers found, so keep going. */ 316 vq_kick_disable(vq); 317 continue; 318 } 319 assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS); 320 riov_len += n; 321 if (!sc->rx_merge) { 322 n_chains = 1; 323 break; 324 } 325 info[n_chains].len = (uint32_t)count_iov(riov, n); 326 riov_bytes += info[n_chains].len; 327 riov += n; 328 n_chains++; 329 } while (riov_bytes < plen && riov_len < VTNET_MAXSEGS); 330 331 riov = iov; 332 hdr = riov[0].iov_base; 333 if (prepend_hdr_len > 0) { 334 /* 335 * The frontend uses a virtio-net header, but the 336 * backend does not. We need to prepend a zeroed 337 * header. 338 */ 339 riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len); 340 if (riov == NULL) { 341 /* 342 * The first collected chain is nonsensical, 343 * as it is not even enough to store the 344 * virtio-net header. Just drop it. 345 */ 346 vq_relchain(vq, info[0].idx, 0); 347 vq_retchains(vq, n_chains - 1); 348 continue; 349 } 350 memset(hdr, 0, prepend_hdr_len); 351 } 352 353 rlen = netbe_recv(sc->vsc_be, riov, riov_len); 354 if (rlen != plen - prepend_hdr_len) { 355 /* 356 * If this happens it means there is something 357 * wrong with the backend (e.g., some other 358 * process is stealing our packets). 359 */ 360 WPRINTF(("netbe_recv: expected %zd bytes, " 361 "got %zd", plen - prepend_hdr_len, rlen)); 362 vq_retchains(vq, n_chains); 363 continue; 364 } 365 366 ulen = (uint32_t)plen; 367 368 /* 369 * Publish the used buffers to the guest, reporting the 370 * number of bytes that we wrote. 371 */ 372 if (!sc->rx_merge) { 373 vq_relchain(vq, info[0].idx, ulen); 374 } else { 375 uint32_t iolen; 376 int i = 0; 377 378 do { 379 iolen = info[i].len; 380 if (iolen > ulen) { 381 iolen = ulen; 382 } 383 vq_relchain_prepare(vq, info[i].idx, iolen); 384 ulen -= iolen; 385 i++; 386 } while (ulen > 0); 387 388 hdr->vrh_bufs = i; 389 vq_relchain_publish(vq); 390 assert(i == n_chains); 391 } 392 } 393 394 } 395 396 /* 397 * Called when there is read activity on the backend file descriptor. 398 * Each buffer posted by the guest is assumed to be able to contain 399 * an entire ethernet frame + rx header. 400 */ 401 static void 402 pci_vtnet_rx_callback(int fd __unused, enum ev_type type __unused, void *param) 403 { 404 struct pci_vtnet_softc *sc = param; 405 406 pthread_mutex_lock(&sc->rx_mtx); 407 pci_vtnet_rx(sc); 408 pthread_mutex_unlock(&sc->rx_mtx); 409 410 } 411 412 /* Called on RX kick. */ 413 static void 414 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 415 { 416 struct pci_vtnet_softc *sc = vsc; 417 418 /* 419 * A qnotify means that the rx process can now begin. 420 * Enable RX only if features are negotiated. 421 */ 422 pthread_mutex_lock(&sc->rx_mtx); 423 if (!sc->features_negotiated) { 424 pthread_mutex_unlock(&sc->rx_mtx); 425 return; 426 } 427 428 vq_kick_disable(vq); 429 netbe_rx_enable(sc->vsc_be); 430 pthread_mutex_unlock(&sc->rx_mtx); 431 } 432 433 /* TX virtqueue processing, called by the TX thread. */ 434 static void 435 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 436 { 437 struct iovec iov[VTNET_MAXSEGS + 1]; 438 struct iovec *siov = iov; 439 struct vi_req req; 440 ssize_t len; 441 int n; 442 443 /* 444 * Obtain chain of descriptors. The first descriptor also 445 * contains the virtio-net header. 446 */ 447 n = vq_getchain(vq, iov, VTNET_MAXSEGS, &req); 448 assert(n >= 1 && n <= VTNET_MAXSEGS); 449 450 if (sc->vhdrlen != sc->be_vhdrlen) { 451 /* 452 * The frontend uses a virtio-net header, but the backend 453 * does not. We simply strip the header and ignore it, as 454 * it should be zero-filled. 455 */ 456 siov = iov_trim_hdr(siov, &n, sc->vhdrlen); 457 } 458 459 if (siov == NULL) { 460 /* The chain is nonsensical. Just drop it. */ 461 len = 0; 462 } else { 463 len = netbe_send(sc->vsc_be, siov, n); 464 if (len < 0) { 465 /* 466 * If send failed, report that 0 bytes 467 * were read. 468 */ 469 len = 0; 470 } 471 } 472 473 /* 474 * Return the processed chain to the guest, reporting 475 * the number of bytes that we read. 476 */ 477 vq_relchain(vq, req.idx, len); 478 } 479 480 /* Called on TX kick. */ 481 static void 482 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 483 { 484 struct pci_vtnet_softc *sc = vsc; 485 486 /* 487 * Any ring entries to process? 488 */ 489 if (!vq_has_descs(vq)) 490 return; 491 492 /* Signal the tx thread for processing */ 493 pthread_mutex_lock(&sc->tx_mtx); 494 vq_kick_disable(vq); 495 if (sc->tx_in_progress == 0) 496 pthread_cond_signal(&sc->tx_cond); 497 pthread_mutex_unlock(&sc->tx_mtx); 498 } 499 500 /* 501 * Thread which will handle processing of TX desc 502 */ 503 static void * 504 pci_vtnet_tx_thread(void *param) 505 { 506 struct pci_vtnet_softc *sc = param; 507 struct vqueue_info *vq; 508 int error; 509 510 vq = &sc->vsc_queues[VTNET_TXQ]; 511 512 /* 513 * Let us wait till the tx queue pointers get initialised & 514 * first tx signaled 515 */ 516 pthread_mutex_lock(&sc->tx_mtx); 517 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 518 assert(error == 0); 519 520 for (;;) { 521 /* note - tx mutex is locked here */ 522 while (sc->resetting || !vq_has_descs(vq)) { 523 vq_kick_enable(vq); 524 if (!sc->resetting && vq_has_descs(vq)) 525 break; 526 527 sc->tx_in_progress = 0; 528 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 529 assert(error == 0); 530 } 531 vq_kick_disable(vq); 532 sc->tx_in_progress = 1; 533 pthread_mutex_unlock(&sc->tx_mtx); 534 535 do { 536 /* 537 * Run through entries, placing them into 538 * iovecs and sending when an end-of-packet 539 * is found 540 */ 541 pci_vtnet_proctx(sc, vq); 542 } while (vq_has_descs(vq)); 543 544 /* 545 * Generate an interrupt if needed. 546 */ 547 vq_endchains(vq, /*used_all_avail=*/1); 548 549 pthread_mutex_lock(&sc->tx_mtx); 550 } 551 } 552 553 #ifdef notyet 554 static void 555 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 556 { 557 558 DPRINTF(("vtnet: control qnotify!")); 559 } 560 #endif 561 562 static int 563 pci_vtnet_init(struct pci_devinst *pi, nvlist_t *nvl) 564 { 565 struct pci_vtnet_softc *sc; 566 const char *value; 567 char tname[MAXCOMLEN + 1]; 568 unsigned long mtu = ETHERMTU; 569 int err; 570 571 /* 572 * Allocate data structures for further virtio initializations. 573 * sc also contains a copy of vtnet_vi_consts, since capabilities 574 * change depending on the backend. 575 */ 576 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 577 578 sc->vsc_consts = vtnet_vi_consts; 579 pthread_mutex_init(&sc->vsc_mtx, NULL); 580 581 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 582 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 583 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 584 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 585 #ifdef notyet 586 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 587 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 588 #endif 589 590 value = get_config_value_node(nvl, "mac"); 591 if (value != NULL) { 592 err = net_parsemac(value, sc->vsc_config.mac); 593 if (err) { 594 free(sc); 595 return (err); 596 } 597 } else 598 net_genmac(pi, sc->vsc_config.mac); 599 600 value = get_config_value_node(nvl, "mtu"); 601 if (value != NULL) { 602 err = net_parsemtu(value, &mtu); 603 if (err) { 604 free(sc); 605 return (err); 606 } 607 608 if (mtu < VTNET_MIN_MTU || mtu > VTNET_MAX_MTU) { 609 err = EINVAL; 610 errno = EINVAL; 611 free(sc); 612 return (err); 613 } 614 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MTU; 615 } 616 sc->vsc_config.mtu = mtu; 617 618 /* Permit interfaces without a configured backend. */ 619 if (get_config_value_node(nvl, "backend") != NULL) { 620 err = netbe_init(&sc->vsc_be, nvl, pci_vtnet_rx_callback, sc); 621 if (err) { 622 free(sc); 623 return (err); 624 } 625 } 626 627 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MRG_RXBUF | 628 netbe_get_cap(sc->vsc_be); 629 630 /* 631 * Since we do not actually support multiqueue, 632 * set the maximum virtqueue pairs to 1. 633 */ 634 sc->vsc_config.max_virtqueue_pairs = 1; 635 636 /* initialize config space */ 637 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 638 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 639 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 640 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK); 641 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 642 643 /* Link is always up. */ 644 sc->vsc_config.status = 1; 645 646 vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues); 647 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 648 649 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 650 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) { 651 free(sc); 652 return (1); 653 } 654 655 /* use BAR 0 to map config regs in IO space */ 656 vi_set_io_bar(&sc->vsc_vs, 0); 657 658 sc->resetting = 0; 659 660 sc->rx_merge = 0; 661 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 662 pthread_mutex_init(&sc->rx_mtx, NULL); 663 664 /* 665 * Initialize tx semaphore & spawn TX processing thread. 666 * As of now, only one thread for TX desc processing is 667 * spawned. 668 */ 669 sc->tx_in_progress = 0; 670 pthread_mutex_init(&sc->tx_mtx, NULL); 671 pthread_cond_init(&sc->tx_cond, NULL); 672 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 673 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 674 pi->pi_func); 675 pthread_set_name_np(sc->tx_tid, tname); 676 677 return (0); 678 } 679 680 static int 681 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 682 { 683 struct pci_vtnet_softc *sc = vsc; 684 void *ptr; 685 686 if (offset < (int)sizeof(sc->vsc_config.mac)) { 687 assert(offset + size <= (int)sizeof(sc->vsc_config.mac)); 688 /* 689 * The driver is allowed to change the MAC address 690 */ 691 ptr = &sc->vsc_config.mac[offset]; 692 memcpy(ptr, &value, size); 693 } else { 694 /* silently ignore other writes */ 695 DPRINTF(("vtnet: write to readonly reg %d", offset)); 696 } 697 698 return (0); 699 } 700 701 static int 702 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 703 { 704 struct pci_vtnet_softc *sc = vsc; 705 void *ptr; 706 707 ptr = (uint8_t *)&sc->vsc_config + offset; 708 memcpy(retval, ptr, size); 709 return (0); 710 } 711 712 static void 713 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 714 { 715 struct pci_vtnet_softc *sc = vsc; 716 717 sc->vsc_features = negotiated_features; 718 719 if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) { 720 sc->vhdrlen = sizeof(struct virtio_net_rxhdr); 721 sc->rx_merge = 1; 722 } else { 723 /* 724 * Without mergeable rx buffers, virtio-net header is 2 725 * bytes shorter than sizeof(struct virtio_net_rxhdr). 726 */ 727 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 728 sc->rx_merge = 0; 729 } 730 731 /* Tell the backend to enable some capabilities it has advertised. */ 732 netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); 733 sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); 734 assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); 735 736 pthread_mutex_lock(&sc->rx_mtx); 737 sc->features_negotiated = true; 738 pthread_mutex_unlock(&sc->rx_mtx); 739 } 740 741 #ifdef BHYVE_SNAPSHOT 742 static void 743 pci_vtnet_pause(void *vsc) 744 { 745 struct pci_vtnet_softc *sc = vsc; 746 747 DPRINTF(("vtnet: device pause requested !\n")); 748 749 /* Acquire the RX lock to block RX processing. */ 750 pthread_mutex_lock(&sc->rx_mtx); 751 752 /* Wait for the transmit thread to finish its processing. */ 753 pthread_mutex_lock(&sc->tx_mtx); 754 while (sc->tx_in_progress) { 755 pthread_mutex_unlock(&sc->tx_mtx); 756 usleep(10000); 757 pthread_mutex_lock(&sc->tx_mtx); 758 } 759 } 760 761 static void 762 pci_vtnet_resume(void *vsc) 763 { 764 struct pci_vtnet_softc *sc = vsc; 765 766 DPRINTF(("vtnet: device resume requested !\n")); 767 768 pthread_mutex_unlock(&sc->tx_mtx); 769 /* The RX lock should have been acquired in vtnet_pause. */ 770 pthread_mutex_unlock(&sc->rx_mtx); 771 } 772 773 static int 774 pci_vtnet_snapshot(void *vsc, struct vm_snapshot_meta *meta) 775 { 776 int ret; 777 struct pci_vtnet_softc *sc = vsc; 778 779 DPRINTF(("vtnet: device snapshot requested !\n")); 780 781 /* 782 * Queues and consts should have been saved by the more generic 783 * vi_pci_snapshot function. We need to save only our features and 784 * config. 785 */ 786 787 SNAPSHOT_VAR_OR_LEAVE(sc->vsc_features, meta, ret, done); 788 SNAPSHOT_VAR_OR_LEAVE(sc->features_negotiated, meta, ret, done); 789 790 /* Force reapply negociated features at restore time */ 791 if (meta->op == VM_SNAPSHOT_RESTORE && 792 sc->features_negotiated) { 793 pci_vtnet_neg_features(sc, sc->vsc_features); 794 netbe_rx_enable(sc->vsc_be); 795 } 796 797 SNAPSHOT_VAR_OR_LEAVE(sc->vsc_config, meta, ret, done); 798 SNAPSHOT_VAR_OR_LEAVE(sc->rx_merge, meta, ret, done); 799 800 SNAPSHOT_VAR_OR_LEAVE(sc->vhdrlen, meta, ret, done); 801 SNAPSHOT_VAR_OR_LEAVE(sc->be_vhdrlen, meta, ret, done); 802 803 done: 804 return (ret); 805 } 806 #endif 807 808 static const struct pci_devemu pci_de_vnet = { 809 .pe_emu = "virtio-net", 810 .pe_init = pci_vtnet_init, 811 .pe_legacy_config = netbe_legacy_config, 812 .pe_barwrite = vi_pci_write, 813 .pe_barread = vi_pci_read, 814 #ifdef BHYVE_SNAPSHOT 815 .pe_snapshot = vi_pci_snapshot, 816 .pe_pause = vi_pci_pause, 817 .pe_resume = vi_pci_resume, 818 #endif 819 }; 820 PCI_EMUL_SET(pci_de_vnet); 821