1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/linker_set.h> 36 #include <sys/select.h> 37 #include <sys/uio.h> 38 #include <sys/ioctl.h> 39 #include <net/ethernet.h> 40 #include <net/if.h> /* IFNAMSIZ */ 41 42 #include <err.h> 43 #include <errno.h> 44 #include <fcntl.h> 45 #include <stdio.h> 46 #include <stdlib.h> 47 #include <stdint.h> 48 #include <string.h> 49 #include <strings.h> 50 #include <unistd.h> 51 #include <assert.h> 52 #include <pthread.h> 53 #include <pthread_np.h> 54 55 #include "bhyverun.h" 56 #include "debug.h" 57 #include "pci_emul.h" 58 #include "mevent.h" 59 #include "virtio.h" 60 #include "net_utils.h" 61 #include "net_backends.h" 62 #include "iov.h" 63 64 #define VTNET_RINGSZ 1024 65 66 #define VTNET_MAXSEGS 256 67 68 #define VTNET_MAX_PKT_LEN (65536 + 64) 69 70 #define VTNET_S_HOSTCAPS \ 71 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \ 72 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 73 74 /* 75 * PCI config-space "registers" 76 */ 77 struct virtio_net_config { 78 uint8_t mac[6]; 79 uint16_t status; 80 } __packed; 81 82 /* 83 * Queue definitions. 84 */ 85 #define VTNET_RXQ 0 86 #define VTNET_TXQ 1 87 #define VTNET_CTLQ 2 /* NB: not yet supported */ 88 89 #define VTNET_MAXQ 3 90 91 /* 92 * Debug printf 93 */ 94 static int pci_vtnet_debug; 95 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params 96 #define WPRINTF(params) PRINTLN params 97 98 /* 99 * Per-device softc 100 */ 101 struct pci_vtnet_softc { 102 struct virtio_softc vsc_vs; 103 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 104 pthread_mutex_t vsc_mtx; 105 106 net_backend_t *vsc_be; 107 108 int resetting; /* protected by tx_mtx */ 109 110 uint64_t vsc_features; /* negotiated features */ 111 112 pthread_mutex_t rx_mtx; 113 int rx_merge; /* merged rx bufs in use */ 114 115 pthread_t tx_tid; 116 pthread_mutex_t tx_mtx; 117 pthread_cond_t tx_cond; 118 int tx_in_progress; 119 120 size_t vhdrlen; 121 size_t be_vhdrlen; 122 123 struct virtio_net_config vsc_config; 124 struct virtio_consts vsc_consts; 125 }; 126 127 static void pci_vtnet_reset(void *); 128 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 129 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 130 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 131 static void pci_vtnet_neg_features(void *, uint64_t); 132 133 static struct virtio_consts vtnet_vi_consts = { 134 "vtnet", /* our name */ 135 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */ 136 sizeof(struct virtio_net_config), /* config reg size */ 137 pci_vtnet_reset, /* reset */ 138 NULL, /* device-wide qnotify -- not used */ 139 pci_vtnet_cfgread, /* read PCI config */ 140 pci_vtnet_cfgwrite, /* write PCI config */ 141 pci_vtnet_neg_features, /* apply negotiated features */ 142 VTNET_S_HOSTCAPS, /* our capabilities */ 143 }; 144 145 static void 146 pci_vtnet_reset(void *vsc) 147 { 148 struct pci_vtnet_softc *sc = vsc; 149 150 DPRINTF(("vtnet: device reset requested !")); 151 152 /* Acquire the RX lock to block RX processing. */ 153 pthread_mutex_lock(&sc->rx_mtx); 154 155 /* 156 * Make sure receive operation is disabled at least until we 157 * re-negotiate the features, since receive operation depends 158 * on the value of sc->rx_merge and the header length, which 159 * are both set in pci_vtnet_neg_features(). 160 * Receive operation will be enabled again once the guest adds 161 * the first receive buffers and kicks us. 162 */ 163 netbe_rx_disable(sc->vsc_be); 164 165 /* Set sc->resetting and give a chance to the TX thread to stop. */ 166 pthread_mutex_lock(&sc->tx_mtx); 167 sc->resetting = 1; 168 while (sc->tx_in_progress) { 169 pthread_mutex_unlock(&sc->tx_mtx); 170 usleep(10000); 171 pthread_mutex_lock(&sc->tx_mtx); 172 } 173 174 /* 175 * Now reset rings, MSI-X vectors, and negotiated capabilities. 176 * Do that with the TX lock held, since we need to reset 177 * sc->resetting. 178 */ 179 vi_reset_dev(&sc->vsc_vs); 180 181 sc->resetting = 0; 182 pthread_mutex_unlock(&sc->tx_mtx); 183 pthread_mutex_unlock(&sc->rx_mtx); 184 } 185 186 static __inline struct iovec * 187 iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen) 188 { 189 struct iovec *riov; 190 191 if (iov[0].iov_len < hlen) { 192 /* 193 * Not enough header space in the first fragment. 194 * That's not ok for us. 195 */ 196 return NULL; 197 } 198 199 iov[0].iov_len -= hlen; 200 if (iov[0].iov_len == 0) { 201 *iovcnt -= 1; 202 if (*iovcnt == 0) { 203 /* 204 * Only space for the header. That's not 205 * enough for us. 206 */ 207 return NULL; 208 } 209 riov = &iov[1]; 210 } else { 211 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen); 212 riov = &iov[0]; 213 } 214 215 return (riov); 216 } 217 218 struct virtio_mrg_rxbuf_info { 219 uint16_t idx; 220 uint16_t pad; 221 uint32_t len; 222 }; 223 224 static void 225 pci_vtnet_rx(struct pci_vtnet_softc *sc) 226 { 227 int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; 228 struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; 229 struct iovec iov[VTNET_MAXSEGS + 1]; 230 struct vqueue_info *vq; 231 232 vq = &sc->vsc_queues[VTNET_RXQ]; 233 for (;;) { 234 struct virtio_net_rxhdr *hdr; 235 uint32_t riov_bytes; 236 struct iovec *riov; 237 uint32_t ulen; 238 int riov_len; 239 int n_chains; 240 ssize_t rlen; 241 ssize_t plen; 242 243 plen = netbe_peek_recvlen(sc->vsc_be); 244 if (plen <= 0) { 245 /* 246 * No more packets (plen == 0), or backend errored 247 * (plen < 0). Interrupt if needed and stop. 248 */ 249 vq_endchains(vq, /*used_all_avail=*/0); 250 return; 251 } 252 plen += prepend_hdr_len; 253 254 /* 255 * Get a descriptor chain to store the next ingress 256 * packet. In case of mergeable rx buffers, get as 257 * many chains as necessary in order to make room 258 * for plen bytes. 259 */ 260 riov_bytes = 0; 261 riov_len = 0; 262 riov = iov; 263 n_chains = 0; 264 do { 265 int n = vq_getchain(vq, &info[n_chains].idx, riov, 266 VTNET_MAXSEGS - riov_len, NULL); 267 268 if (n == 0) { 269 /* 270 * No rx buffers. Enable RX kicks and double 271 * check. 272 */ 273 vq_kick_enable(vq); 274 if (!vq_has_descs(vq)) { 275 /* 276 * Still no buffers. Return the unused 277 * chains (if any), interrupt if needed 278 * (including for NOTIFY_ON_EMPTY), and 279 * disable the backend until the next 280 * kick. 281 */ 282 vq_retchains(vq, n_chains); 283 vq_endchains(vq, /*used_all_avail=*/1); 284 netbe_rx_disable(sc->vsc_be); 285 return; 286 } 287 288 /* More rx buffers found, so keep going. */ 289 vq_kick_disable(vq); 290 continue; 291 } 292 assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS); 293 riov_len += n; 294 if (!sc->rx_merge) { 295 n_chains = 1; 296 break; 297 } 298 info[n_chains].len = (uint32_t)count_iov(riov, n); 299 riov_bytes += info[n_chains].len; 300 riov += n; 301 n_chains++; 302 } while (riov_bytes < plen && riov_len < VTNET_MAXSEGS); 303 304 riov = iov; 305 hdr = riov[0].iov_base; 306 if (prepend_hdr_len > 0) { 307 /* 308 * The frontend uses a virtio-net header, but the 309 * backend does not. We need to prepend a zeroed 310 * header. 311 */ 312 riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len); 313 if (riov == NULL) { 314 /* 315 * The first collected chain is nonsensical, 316 * as it is not even enough to store the 317 * virtio-net header. Just drop it. 318 */ 319 vq_relchain(vq, info[0].idx, 0); 320 vq_retchains(vq, n_chains - 1); 321 continue; 322 } 323 memset(hdr, 0, prepend_hdr_len); 324 } 325 326 rlen = netbe_recv(sc->vsc_be, riov, riov_len); 327 if (rlen != plen - prepend_hdr_len) { 328 /* 329 * If this happens it means there is something 330 * wrong with the backend (e.g., some other 331 * process is stealing our packets). 332 */ 333 WPRINTF(("netbe_recv: expected %zd bytes, " 334 "got %zd", plen - prepend_hdr_len, rlen)); 335 vq_retchains(vq, n_chains); 336 continue; 337 } 338 339 ulen = (uint32_t)plen; 340 341 /* 342 * Publish the used buffers to the guest, reporting the 343 * number of bytes that we wrote. 344 */ 345 if (!sc->rx_merge) { 346 vq_relchain(vq, info[0].idx, ulen); 347 } else { 348 uint32_t iolen; 349 int i = 0; 350 351 do { 352 iolen = info[i].len; 353 if (iolen > ulen) { 354 iolen = ulen; 355 } 356 vq_relchain_prepare(vq, info[i].idx, iolen); 357 ulen -= iolen; 358 i++; 359 } while (ulen > 0); 360 361 hdr->vrh_bufs = i; 362 vq_relchain_publish(vq); 363 assert(i == n_chains); 364 } 365 } 366 367 } 368 369 /* 370 * Called when there is read activity on the backend file descriptor. 371 * Each buffer posted by the guest is assumed to be able to contain 372 * an entire ethernet frame + rx header. 373 */ 374 static void 375 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param) 376 { 377 struct pci_vtnet_softc *sc = param; 378 379 pthread_mutex_lock(&sc->rx_mtx); 380 pci_vtnet_rx(sc); 381 pthread_mutex_unlock(&sc->rx_mtx); 382 383 } 384 385 /* Called on RX kick. */ 386 static void 387 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 388 { 389 struct pci_vtnet_softc *sc = vsc; 390 391 /* 392 * A qnotify means that the rx process can now begin. 393 */ 394 pthread_mutex_lock(&sc->rx_mtx); 395 vq_kick_disable(vq); 396 netbe_rx_enable(sc->vsc_be); 397 pthread_mutex_unlock(&sc->rx_mtx); 398 } 399 400 /* TX virtqueue processing, called by the TX thread. */ 401 static void 402 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 403 { 404 struct iovec iov[VTNET_MAXSEGS + 1]; 405 struct iovec *siov = iov; 406 uint16_t idx; 407 ssize_t len; 408 int n; 409 410 /* 411 * Obtain chain of descriptors. The first descriptor also 412 * contains the virtio-net header. 413 */ 414 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 415 assert(n >= 1 && n <= VTNET_MAXSEGS); 416 417 if (sc->vhdrlen != sc->be_vhdrlen) { 418 /* 419 * The frontend uses a virtio-net header, but the backend 420 * does not. We simply strip the header and ignore it, as 421 * it should be zero-filled. 422 */ 423 siov = iov_trim_hdr(siov, &n, sc->vhdrlen); 424 } 425 426 if (siov == NULL) { 427 /* The chain is nonsensical. Just drop it. */ 428 len = 0; 429 } else { 430 len = netbe_send(sc->vsc_be, siov, n); 431 if (len < 0) { 432 /* 433 * If send failed, report that 0 bytes 434 * were read. 435 */ 436 len = 0; 437 } 438 } 439 440 /* 441 * Return the processed chain to the guest, reporting 442 * the number of bytes that we read. 443 */ 444 vq_relchain(vq, idx, len); 445 } 446 447 /* Called on TX kick. */ 448 static void 449 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 450 { 451 struct pci_vtnet_softc *sc = vsc; 452 453 /* 454 * Any ring entries to process? 455 */ 456 if (!vq_has_descs(vq)) 457 return; 458 459 /* Signal the tx thread for processing */ 460 pthread_mutex_lock(&sc->tx_mtx); 461 vq_kick_disable(vq); 462 if (sc->tx_in_progress == 0) 463 pthread_cond_signal(&sc->tx_cond); 464 pthread_mutex_unlock(&sc->tx_mtx); 465 } 466 467 /* 468 * Thread which will handle processing of TX desc 469 */ 470 static void * 471 pci_vtnet_tx_thread(void *param) 472 { 473 struct pci_vtnet_softc *sc = param; 474 struct vqueue_info *vq; 475 int error; 476 477 vq = &sc->vsc_queues[VTNET_TXQ]; 478 479 /* 480 * Let us wait till the tx queue pointers get initialised & 481 * first tx signaled 482 */ 483 pthread_mutex_lock(&sc->tx_mtx); 484 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 485 assert(error == 0); 486 487 for (;;) { 488 /* note - tx mutex is locked here */ 489 while (sc->resetting || !vq_has_descs(vq)) { 490 vq_kick_enable(vq); 491 if (!sc->resetting && vq_has_descs(vq)) 492 break; 493 494 sc->tx_in_progress = 0; 495 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 496 assert(error == 0); 497 } 498 vq_kick_disable(vq); 499 sc->tx_in_progress = 1; 500 pthread_mutex_unlock(&sc->tx_mtx); 501 502 do { 503 /* 504 * Run through entries, placing them into 505 * iovecs and sending when an end-of-packet 506 * is found 507 */ 508 pci_vtnet_proctx(sc, vq); 509 } while (vq_has_descs(vq)); 510 511 /* 512 * Generate an interrupt if needed. 513 */ 514 vq_endchains(vq, /*used_all_avail=*/1); 515 516 pthread_mutex_lock(&sc->tx_mtx); 517 } 518 } 519 520 #ifdef notyet 521 static void 522 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 523 { 524 525 DPRINTF(("vtnet: control qnotify!")); 526 } 527 #endif 528 529 static int 530 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 531 { 532 struct pci_vtnet_softc *sc; 533 char tname[MAXCOMLEN + 1]; 534 int mac_provided; 535 536 /* 537 * Allocate data structures for further virtio initializations. 538 * sc also contains a copy of vtnet_vi_consts, since capabilities 539 * change depending on the backend. 540 */ 541 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 542 543 sc->vsc_consts = vtnet_vi_consts; 544 pthread_mutex_init(&sc->vsc_mtx, NULL); 545 546 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 547 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 548 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 549 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 550 #ifdef notyet 551 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 552 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 553 #endif 554 555 /* 556 * Attempt to open the backend device and read the MAC address 557 * if specified. 558 */ 559 mac_provided = 0; 560 if (opts != NULL) { 561 char *devname; 562 char *vtopts; 563 int err = 0; 564 565 /* Get the device name. */ 566 devname = vtopts = strdup(opts); 567 (void) strsep(&vtopts, ","); 568 569 /* 570 * Parse the list of options in the form 571 * key1=value1,...,keyN=valueN. 572 */ 573 while (vtopts != NULL) { 574 char *value = vtopts; 575 char *key; 576 577 key = strsep(&value, "="); 578 if (value == NULL) 579 break; 580 vtopts = value; 581 (void) strsep(&vtopts, ","); 582 583 if (strcmp(key, "mac") == 0) { 584 err = net_parsemac(value, sc->vsc_config.mac); 585 if (err) 586 break; 587 mac_provided = 1; 588 } 589 } 590 591 if (err) { 592 free(devname); 593 free(sc); 594 return (err); 595 } 596 597 err = netbe_init(&sc->vsc_be, devname, pci_vtnet_rx_callback, 598 sc); 599 free(devname); 600 if (err) { 601 free(sc); 602 return (err); 603 } 604 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MRG_RXBUF | 605 netbe_get_cap(sc->vsc_be); 606 } 607 608 if (!mac_provided) { 609 net_genmac(pi, sc->vsc_config.mac); 610 } 611 612 /* initialize config space */ 613 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 614 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 615 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 616 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET); 617 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 618 619 /* Link is up if we managed to open backend device. */ 620 sc->vsc_config.status = (opts == NULL || sc->vsc_be); 621 622 vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues); 623 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 624 625 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 626 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) { 627 free(sc); 628 return (1); 629 } 630 631 /* use BAR 0 to map config regs in IO space */ 632 vi_set_io_bar(&sc->vsc_vs, 0); 633 634 sc->resetting = 0; 635 636 sc->rx_merge = 0; 637 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 638 pthread_mutex_init(&sc->rx_mtx, NULL); 639 640 /* 641 * Initialize tx semaphore & spawn TX processing thread. 642 * As of now, only one thread for TX desc processing is 643 * spawned. 644 */ 645 sc->tx_in_progress = 0; 646 pthread_mutex_init(&sc->tx_mtx, NULL); 647 pthread_cond_init(&sc->tx_cond, NULL); 648 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 649 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 650 pi->pi_func); 651 pthread_set_name_np(sc->tx_tid, tname); 652 653 return (0); 654 } 655 656 static int 657 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 658 { 659 struct pci_vtnet_softc *sc = vsc; 660 void *ptr; 661 662 if (offset < (int)sizeof(sc->vsc_config.mac)) { 663 assert(offset + size <= (int)sizeof(sc->vsc_config.mac)); 664 /* 665 * The driver is allowed to change the MAC address 666 */ 667 ptr = &sc->vsc_config.mac[offset]; 668 memcpy(ptr, &value, size); 669 } else { 670 /* silently ignore other writes */ 671 DPRINTF(("vtnet: write to readonly reg %d", offset)); 672 } 673 674 return (0); 675 } 676 677 static int 678 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 679 { 680 struct pci_vtnet_softc *sc = vsc; 681 void *ptr; 682 683 ptr = (uint8_t *)&sc->vsc_config + offset; 684 memcpy(retval, ptr, size); 685 return (0); 686 } 687 688 static void 689 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 690 { 691 struct pci_vtnet_softc *sc = vsc; 692 693 sc->vsc_features = negotiated_features; 694 695 if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) { 696 sc->vhdrlen = sizeof(struct virtio_net_rxhdr); 697 sc->rx_merge = 1; 698 } else { 699 /* 700 * Without mergeable rx buffers, virtio-net header is 2 701 * bytes shorter than sizeof(struct virtio_net_rxhdr). 702 */ 703 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 704 sc->rx_merge = 0; 705 } 706 707 /* Tell the backend to enable some capabilities it has advertised. */ 708 netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); 709 sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); 710 assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); 711 } 712 713 static struct pci_devemu pci_de_vnet = { 714 .pe_emu = "virtio-net", 715 .pe_init = pci_vtnet_init, 716 .pe_barwrite = vi_pci_write, 717 .pe_barread = vi_pci_read 718 }; 719 PCI_EMUL_SET(pci_de_vnet); 720