1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/linker_set.h> 36 #include <sys/select.h> 37 #include <sys/uio.h> 38 #include <sys/ioctl.h> 39 #include <machine/vmm_snapshot.h> 40 #include <net/ethernet.h> 41 #include <net/if.h> /* IFNAMSIZ */ 42 43 #include <err.h> 44 #include <errno.h> 45 #include <fcntl.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <stdint.h> 49 #include <string.h> 50 #include <strings.h> 51 #include <unistd.h> 52 #include <assert.h> 53 #include <pthread.h> 54 #include <pthread_np.h> 55 56 #include "bhyverun.h" 57 #include "debug.h" 58 #include "pci_emul.h" 59 #include "mevent.h" 60 #include "virtio.h" 61 #include "net_utils.h" 62 #include "net_backends.h" 63 #include "iov.h" 64 65 #define VTNET_RINGSZ 1024 66 67 #define VTNET_MAXSEGS 256 68 69 #define VTNET_MAX_PKT_LEN (65536 + 64) 70 71 #define VTNET_MIN_MTU ETHERMIN 72 #define VTNET_MAX_MTU 65535 73 74 #define VTNET_S_HOSTCAPS \ 75 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \ 76 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 77 78 /* 79 * PCI config-space "registers" 80 */ 81 struct virtio_net_config { 82 uint8_t mac[6]; 83 uint16_t status; 84 uint16_t max_virtqueue_pairs; 85 uint16_t mtu; 86 } __packed; 87 88 /* 89 * Queue definitions. 90 */ 91 #define VTNET_RXQ 0 92 #define VTNET_TXQ 1 93 #define VTNET_CTLQ 2 /* NB: not yet supported */ 94 95 #define VTNET_MAXQ 3 96 97 /* 98 * Debug printf 99 */ 100 static int pci_vtnet_debug; 101 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params 102 #define WPRINTF(params) PRINTLN params 103 104 /* 105 * Per-device softc 106 */ 107 struct pci_vtnet_softc { 108 struct virtio_softc vsc_vs; 109 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 110 pthread_mutex_t vsc_mtx; 111 112 net_backend_t *vsc_be; 113 114 int resetting; /* protected by tx_mtx */ 115 116 uint64_t vsc_features; /* negotiated features */ 117 118 pthread_mutex_t rx_mtx; 119 int rx_merge; /* merged rx bufs in use */ 120 121 pthread_t tx_tid; 122 pthread_mutex_t tx_mtx; 123 pthread_cond_t tx_cond; 124 int tx_in_progress; 125 126 size_t vhdrlen; 127 size_t be_vhdrlen; 128 129 struct virtio_net_config vsc_config; 130 struct virtio_consts vsc_consts; 131 }; 132 133 static void pci_vtnet_reset(void *); 134 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 135 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 136 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 137 static void pci_vtnet_neg_features(void *, uint64_t); 138 #ifdef BHYVE_SNAPSHOT 139 static void pci_vtnet_pause(void *); 140 static void pci_vtnet_resume(void *); 141 static int pci_vtnet_snapshot(void *, struct vm_snapshot_meta *); 142 #endif 143 144 static struct virtio_consts vtnet_vi_consts = { 145 "vtnet", /* our name */ 146 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */ 147 sizeof(struct virtio_net_config), /* config reg size */ 148 pci_vtnet_reset, /* reset */ 149 NULL, /* device-wide qnotify -- not used */ 150 pci_vtnet_cfgread, /* read PCI config */ 151 pci_vtnet_cfgwrite, /* write PCI config */ 152 pci_vtnet_neg_features, /* apply negotiated features */ 153 VTNET_S_HOSTCAPS, /* our capabilities */ 154 #ifdef BHYVE_SNAPSHOT 155 pci_vtnet_pause, /* pause rx/tx threads */ 156 pci_vtnet_resume, /* resume rx/tx threads */ 157 pci_vtnet_snapshot, /* save / restore device state */ 158 #endif 159 }; 160 161 static void 162 pci_vtnet_reset(void *vsc) 163 { 164 struct pci_vtnet_softc *sc = vsc; 165 166 DPRINTF(("vtnet: device reset requested !")); 167 168 /* Acquire the RX lock to block RX processing. */ 169 pthread_mutex_lock(&sc->rx_mtx); 170 171 /* 172 * Make sure receive operation is disabled at least until we 173 * re-negotiate the features, since receive operation depends 174 * on the value of sc->rx_merge and the header length, which 175 * are both set in pci_vtnet_neg_features(). 176 * Receive operation will be enabled again once the guest adds 177 * the first receive buffers and kicks us. 178 */ 179 netbe_rx_disable(sc->vsc_be); 180 181 /* Set sc->resetting and give a chance to the TX thread to stop. */ 182 pthread_mutex_lock(&sc->tx_mtx); 183 sc->resetting = 1; 184 while (sc->tx_in_progress) { 185 pthread_mutex_unlock(&sc->tx_mtx); 186 usleep(10000); 187 pthread_mutex_lock(&sc->tx_mtx); 188 } 189 190 /* 191 * Now reset rings, MSI-X vectors, and negotiated capabilities. 192 * Do that with the TX lock held, since we need to reset 193 * sc->resetting. 194 */ 195 vi_reset_dev(&sc->vsc_vs); 196 197 sc->resetting = 0; 198 pthread_mutex_unlock(&sc->tx_mtx); 199 pthread_mutex_unlock(&sc->rx_mtx); 200 } 201 202 static __inline struct iovec * 203 iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen) 204 { 205 struct iovec *riov; 206 207 if (iov[0].iov_len < hlen) { 208 /* 209 * Not enough header space in the first fragment. 210 * That's not ok for us. 211 */ 212 return NULL; 213 } 214 215 iov[0].iov_len -= hlen; 216 if (iov[0].iov_len == 0) { 217 *iovcnt -= 1; 218 if (*iovcnt == 0) { 219 /* 220 * Only space for the header. That's not 221 * enough for us. 222 */ 223 return NULL; 224 } 225 riov = &iov[1]; 226 } else { 227 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen); 228 riov = &iov[0]; 229 } 230 231 return (riov); 232 } 233 234 struct virtio_mrg_rxbuf_info { 235 uint16_t idx; 236 uint16_t pad; 237 uint32_t len; 238 }; 239 240 static void 241 pci_vtnet_rx(struct pci_vtnet_softc *sc) 242 { 243 int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; 244 struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; 245 struct iovec iov[VTNET_MAXSEGS + 1]; 246 struct vqueue_info *vq; 247 248 vq = &sc->vsc_queues[VTNET_RXQ]; 249 for (;;) { 250 struct virtio_net_rxhdr *hdr; 251 uint32_t riov_bytes; 252 struct iovec *riov; 253 uint32_t ulen; 254 int riov_len; 255 int n_chains; 256 ssize_t rlen; 257 ssize_t plen; 258 259 plen = netbe_peek_recvlen(sc->vsc_be); 260 if (plen <= 0) { 261 /* 262 * No more packets (plen == 0), or backend errored 263 * (plen < 0). Interrupt if needed and stop. 264 */ 265 vq_endchains(vq, /*used_all_avail=*/0); 266 return; 267 } 268 plen += prepend_hdr_len; 269 270 /* 271 * Get a descriptor chain to store the next ingress 272 * packet. In case of mergeable rx buffers, get as 273 * many chains as necessary in order to make room 274 * for plen bytes. 275 */ 276 riov_bytes = 0; 277 riov_len = 0; 278 riov = iov; 279 n_chains = 0; 280 do { 281 int n = vq_getchain(vq, &info[n_chains].idx, riov, 282 VTNET_MAXSEGS - riov_len, NULL); 283 284 if (n == 0) { 285 /* 286 * No rx buffers. Enable RX kicks and double 287 * check. 288 */ 289 vq_kick_enable(vq); 290 if (!vq_has_descs(vq)) { 291 /* 292 * Still no buffers. Return the unused 293 * chains (if any), interrupt if needed 294 * (including for NOTIFY_ON_EMPTY), and 295 * disable the backend until the next 296 * kick. 297 */ 298 vq_retchains(vq, n_chains); 299 vq_endchains(vq, /*used_all_avail=*/1); 300 netbe_rx_disable(sc->vsc_be); 301 return; 302 } 303 304 /* More rx buffers found, so keep going. */ 305 vq_kick_disable(vq); 306 continue; 307 } 308 assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS); 309 riov_len += n; 310 if (!sc->rx_merge) { 311 n_chains = 1; 312 break; 313 } 314 info[n_chains].len = (uint32_t)count_iov(riov, n); 315 riov_bytes += info[n_chains].len; 316 riov += n; 317 n_chains++; 318 } while (riov_bytes < plen && riov_len < VTNET_MAXSEGS); 319 320 riov = iov; 321 hdr = riov[0].iov_base; 322 if (prepend_hdr_len > 0) { 323 /* 324 * The frontend uses a virtio-net header, but the 325 * backend does not. We need to prepend a zeroed 326 * header. 327 */ 328 riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len); 329 if (riov == NULL) { 330 /* 331 * The first collected chain is nonsensical, 332 * as it is not even enough to store the 333 * virtio-net header. Just drop it. 334 */ 335 vq_relchain(vq, info[0].idx, 0); 336 vq_retchains(vq, n_chains - 1); 337 continue; 338 } 339 memset(hdr, 0, prepend_hdr_len); 340 } 341 342 rlen = netbe_recv(sc->vsc_be, riov, riov_len); 343 if (rlen != plen - prepend_hdr_len) { 344 /* 345 * If this happens it means there is something 346 * wrong with the backend (e.g., some other 347 * process is stealing our packets). 348 */ 349 WPRINTF(("netbe_recv: expected %zd bytes, " 350 "got %zd", plen - prepend_hdr_len, rlen)); 351 vq_retchains(vq, n_chains); 352 continue; 353 } 354 355 ulen = (uint32_t)plen; 356 357 /* 358 * Publish the used buffers to the guest, reporting the 359 * number of bytes that we wrote. 360 */ 361 if (!sc->rx_merge) { 362 vq_relchain(vq, info[0].idx, ulen); 363 } else { 364 uint32_t iolen; 365 int i = 0; 366 367 do { 368 iolen = info[i].len; 369 if (iolen > ulen) { 370 iolen = ulen; 371 } 372 vq_relchain_prepare(vq, info[i].idx, iolen); 373 ulen -= iolen; 374 i++; 375 } while (ulen > 0); 376 377 hdr->vrh_bufs = i; 378 vq_relchain_publish(vq); 379 assert(i == n_chains); 380 } 381 } 382 383 } 384 385 /* 386 * Called when there is read activity on the backend file descriptor. 387 * Each buffer posted by the guest is assumed to be able to contain 388 * an entire ethernet frame + rx header. 389 */ 390 static void 391 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param) 392 { 393 struct pci_vtnet_softc *sc = param; 394 395 pthread_mutex_lock(&sc->rx_mtx); 396 pci_vtnet_rx(sc); 397 pthread_mutex_unlock(&sc->rx_mtx); 398 399 } 400 401 /* Called on RX kick. */ 402 static void 403 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 404 { 405 struct pci_vtnet_softc *sc = vsc; 406 407 /* 408 * A qnotify means that the rx process can now begin. 409 */ 410 pthread_mutex_lock(&sc->rx_mtx); 411 vq_kick_disable(vq); 412 netbe_rx_enable(sc->vsc_be); 413 pthread_mutex_unlock(&sc->rx_mtx); 414 } 415 416 /* TX virtqueue processing, called by the TX thread. */ 417 static void 418 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 419 { 420 struct iovec iov[VTNET_MAXSEGS + 1]; 421 struct iovec *siov = iov; 422 uint16_t idx; 423 ssize_t len; 424 int n; 425 426 /* 427 * Obtain chain of descriptors. The first descriptor also 428 * contains the virtio-net header. 429 */ 430 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 431 assert(n >= 1 && n <= VTNET_MAXSEGS); 432 433 if (sc->vhdrlen != sc->be_vhdrlen) { 434 /* 435 * The frontend uses a virtio-net header, but the backend 436 * does not. We simply strip the header and ignore it, as 437 * it should be zero-filled. 438 */ 439 siov = iov_trim_hdr(siov, &n, sc->vhdrlen); 440 } 441 442 if (siov == NULL) { 443 /* The chain is nonsensical. Just drop it. */ 444 len = 0; 445 } else { 446 len = netbe_send(sc->vsc_be, siov, n); 447 if (len < 0) { 448 /* 449 * If send failed, report that 0 bytes 450 * were read. 451 */ 452 len = 0; 453 } 454 } 455 456 /* 457 * Return the processed chain to the guest, reporting 458 * the number of bytes that we read. 459 */ 460 vq_relchain(vq, idx, len); 461 } 462 463 /* Called on TX kick. */ 464 static void 465 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 466 { 467 struct pci_vtnet_softc *sc = vsc; 468 469 /* 470 * Any ring entries to process? 471 */ 472 if (!vq_has_descs(vq)) 473 return; 474 475 /* Signal the tx thread for processing */ 476 pthread_mutex_lock(&sc->tx_mtx); 477 vq_kick_disable(vq); 478 if (sc->tx_in_progress == 0) 479 pthread_cond_signal(&sc->tx_cond); 480 pthread_mutex_unlock(&sc->tx_mtx); 481 } 482 483 /* 484 * Thread which will handle processing of TX desc 485 */ 486 static void * 487 pci_vtnet_tx_thread(void *param) 488 { 489 struct pci_vtnet_softc *sc = param; 490 struct vqueue_info *vq; 491 int error; 492 493 vq = &sc->vsc_queues[VTNET_TXQ]; 494 495 /* 496 * Let us wait till the tx queue pointers get initialised & 497 * first tx signaled 498 */ 499 pthread_mutex_lock(&sc->tx_mtx); 500 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 501 assert(error == 0); 502 503 for (;;) { 504 /* note - tx mutex is locked here */ 505 while (sc->resetting || !vq_has_descs(vq)) { 506 vq_kick_enable(vq); 507 if (!sc->resetting && vq_has_descs(vq)) 508 break; 509 510 sc->tx_in_progress = 0; 511 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 512 assert(error == 0); 513 } 514 vq_kick_disable(vq); 515 sc->tx_in_progress = 1; 516 pthread_mutex_unlock(&sc->tx_mtx); 517 518 do { 519 /* 520 * Run through entries, placing them into 521 * iovecs and sending when an end-of-packet 522 * is found 523 */ 524 pci_vtnet_proctx(sc, vq); 525 } while (vq_has_descs(vq)); 526 527 /* 528 * Generate an interrupt if needed. 529 */ 530 vq_endchains(vq, /*used_all_avail=*/1); 531 532 pthread_mutex_lock(&sc->tx_mtx); 533 } 534 } 535 536 #ifdef notyet 537 static void 538 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 539 { 540 541 DPRINTF(("vtnet: control qnotify!")); 542 } 543 #endif 544 545 static int 546 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 547 { 548 struct pci_vtnet_softc *sc; 549 char tname[MAXCOMLEN + 1]; 550 int mac_provided; 551 int mtu_provided; 552 unsigned long mtu = ETHERMTU; 553 554 /* 555 * Allocate data structures for further virtio initializations. 556 * sc also contains a copy of vtnet_vi_consts, since capabilities 557 * change depending on the backend. 558 */ 559 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 560 561 sc->vsc_consts = vtnet_vi_consts; 562 pthread_mutex_init(&sc->vsc_mtx, NULL); 563 564 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 565 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 566 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 567 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 568 #ifdef notyet 569 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 570 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 571 #endif 572 573 /* 574 * Attempt to open the backend device and read the MAC address 575 * if specified. 576 */ 577 mac_provided = 0; 578 mtu_provided = 0; 579 if (opts != NULL) { 580 char *optscopy; 581 char *vtopts; 582 int err = 0; 583 584 /* Get the device name. */ 585 optscopy = vtopts = strdup(opts); 586 (void) strsep(&vtopts, ","); 587 588 /* 589 * Parse the list of options in the form 590 * key1=value1,...,keyN=valueN. 591 */ 592 while (vtopts != NULL) { 593 char *value = vtopts; 594 char *key; 595 596 key = strsep(&value, "="); 597 if (value == NULL) 598 break; 599 vtopts = value; 600 (void) strsep(&vtopts, ","); 601 602 if (strcmp(key, "mac") == 0) { 603 err = net_parsemac(value, sc->vsc_config.mac); 604 if (err) 605 break; 606 mac_provided = 1; 607 } else if (strcmp(key, "mtu") == 0) { 608 err = net_parsemtu(value, &mtu); 609 if (err) 610 break; 611 612 if (mtu < VTNET_MIN_MTU || mtu > VTNET_MAX_MTU) { 613 err = EINVAL; 614 errno = EINVAL; 615 break; 616 } 617 mtu_provided = 1; 618 } 619 } 620 621 free(optscopy); 622 623 if (err) { 624 free(sc); 625 return (err); 626 } 627 628 err = netbe_init(&sc->vsc_be, opts, pci_vtnet_rx_callback, 629 sc); 630 631 if (err) { 632 free(sc); 633 return (err); 634 } 635 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MRG_RXBUF | 636 netbe_get_cap(sc->vsc_be); 637 } 638 639 if (!mac_provided) { 640 net_genmac(pi, sc->vsc_config.mac); 641 } 642 643 sc->vsc_config.mtu = mtu; 644 if (mtu_provided) { 645 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MTU; 646 } 647 648 /* 649 * Since we do not actually support multiqueue, 650 * set the maximum virtqueue pairs to 1. 651 */ 652 sc->vsc_config.max_virtqueue_pairs = 1; 653 654 /* initialize config space */ 655 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 656 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 657 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 658 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET); 659 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 660 661 /* Link is up if we managed to open backend device. */ 662 sc->vsc_config.status = (opts == NULL || sc->vsc_be); 663 664 vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues); 665 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 666 667 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 668 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) { 669 free(sc); 670 return (1); 671 } 672 673 /* use BAR 0 to map config regs in IO space */ 674 vi_set_io_bar(&sc->vsc_vs, 0); 675 676 sc->resetting = 0; 677 678 sc->rx_merge = 0; 679 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 680 pthread_mutex_init(&sc->rx_mtx, NULL); 681 682 /* 683 * Initialize tx semaphore & spawn TX processing thread. 684 * As of now, only one thread for TX desc processing is 685 * spawned. 686 */ 687 sc->tx_in_progress = 0; 688 pthread_mutex_init(&sc->tx_mtx, NULL); 689 pthread_cond_init(&sc->tx_cond, NULL); 690 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 691 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 692 pi->pi_func); 693 pthread_set_name_np(sc->tx_tid, tname); 694 695 return (0); 696 } 697 698 static int 699 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 700 { 701 struct pci_vtnet_softc *sc = vsc; 702 void *ptr; 703 704 if (offset < (int)sizeof(sc->vsc_config.mac)) { 705 assert(offset + size <= (int)sizeof(sc->vsc_config.mac)); 706 /* 707 * The driver is allowed to change the MAC address 708 */ 709 ptr = &sc->vsc_config.mac[offset]; 710 memcpy(ptr, &value, size); 711 } else { 712 /* silently ignore other writes */ 713 DPRINTF(("vtnet: write to readonly reg %d", offset)); 714 } 715 716 return (0); 717 } 718 719 static int 720 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 721 { 722 struct pci_vtnet_softc *sc = vsc; 723 void *ptr; 724 725 ptr = (uint8_t *)&sc->vsc_config + offset; 726 memcpy(retval, ptr, size); 727 return (0); 728 } 729 730 static void 731 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 732 { 733 struct pci_vtnet_softc *sc = vsc; 734 735 sc->vsc_features = negotiated_features; 736 737 if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) { 738 sc->vhdrlen = sizeof(struct virtio_net_rxhdr); 739 sc->rx_merge = 1; 740 } else { 741 /* 742 * Without mergeable rx buffers, virtio-net header is 2 743 * bytes shorter than sizeof(struct virtio_net_rxhdr). 744 */ 745 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 746 sc->rx_merge = 0; 747 } 748 749 /* Tell the backend to enable some capabilities it has advertised. */ 750 netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); 751 sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); 752 assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); 753 } 754 755 #ifdef BHYVE_SNAPSHOT 756 static void 757 pci_vtnet_pause(void *vsc) 758 { 759 struct pci_vtnet_softc *sc = vsc; 760 761 DPRINTF(("vtnet: device pause requested !\n")); 762 763 /* Acquire the RX lock to block RX processing. */ 764 pthread_mutex_lock(&sc->rx_mtx); 765 766 /* Wait for the transmit thread to finish its processing. */ 767 pthread_mutex_lock(&sc->tx_mtx); 768 while (sc->tx_in_progress) { 769 pthread_mutex_unlock(&sc->tx_mtx); 770 usleep(10000); 771 pthread_mutex_lock(&sc->tx_mtx); 772 } 773 } 774 775 static void 776 pci_vtnet_resume(void *vsc) 777 { 778 struct pci_vtnet_softc *sc = vsc; 779 780 DPRINTF(("vtnet: device resume requested !\n")); 781 782 pthread_mutex_unlock(&sc->tx_mtx); 783 /* The RX lock should have been acquired in vtnet_pause. */ 784 pthread_mutex_unlock(&sc->rx_mtx); 785 } 786 787 static int 788 pci_vtnet_snapshot(void *vsc, struct vm_snapshot_meta *meta) 789 { 790 int ret; 791 struct pci_vtnet_softc *sc = vsc; 792 793 DPRINTF(("vtnet: device snapshot requested !\n")); 794 795 /* 796 * Queues and consts should have been saved by the more generic 797 * vi_pci_snapshot function. We need to save only our features and 798 * config. 799 */ 800 801 SNAPSHOT_VAR_OR_LEAVE(sc->vsc_features, meta, ret, done); 802 803 /* Force reapply negociated features at restore time */ 804 if (meta->op == VM_SNAPSHOT_RESTORE) { 805 pci_vtnet_neg_features(sc, sc->vsc_features); 806 netbe_rx_enable(sc->vsc_be); 807 } 808 809 SNAPSHOT_VAR_OR_LEAVE(sc->vsc_config, meta, ret, done); 810 SNAPSHOT_VAR_OR_LEAVE(sc->rx_merge, meta, ret, done); 811 812 SNAPSHOT_VAR_OR_LEAVE(sc->vhdrlen, meta, ret, done); 813 SNAPSHOT_VAR_OR_LEAVE(sc->be_vhdrlen, meta, ret, done); 814 815 done: 816 return (ret); 817 } 818 #endif 819 820 static struct pci_devemu pci_de_vnet = { 821 .pe_emu = "virtio-net", 822 .pe_init = pci_vtnet_init, 823 .pe_barwrite = vi_pci_write, 824 .pe_barread = vi_pci_read, 825 #ifdef BHYVE_SNAPSHOT 826 .pe_snapshot = vi_pci_snapshot, 827 .pe_pause = vi_pci_pause, 828 .pe_resume = vi_pci_resume, 829 #endif 830 }; 831 PCI_EMUL_SET(pci_de_vnet); 832