1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 30 #include <sys/param.h> 31 #include <sys/linker_set.h> 32 #include <sys/select.h> 33 #include <sys/uio.h> 34 #include <sys/ioctl.h> 35 #include <net/ethernet.h> 36 #include <net/if.h> /* IFNAMSIZ */ 37 38 #include <err.h> 39 #include <errno.h> 40 #include <fcntl.h> 41 #include <stdio.h> 42 #include <stdlib.h> 43 #include <stdint.h> 44 #include <string.h> 45 #include <strings.h> 46 #include <unistd.h> 47 #include <assert.h> 48 #include <pthread.h> 49 #include <pthread_np.h> 50 51 #include "bhyverun.h" 52 #include "config.h" 53 #include "debug.h" 54 #include "pci_emul.h" 55 #include "mevent.h" 56 #include "virtio.h" 57 #include "net_utils.h" 58 #include "net_backends.h" 59 #include "iov.h" 60 61 #define VTNET_RINGSZ 1024 62 63 #define VTNET_MAXSEGS 256 64 65 #define VTNET_MAX_PKT_LEN (65536 + 64) 66 67 #define VTNET_MIN_MTU ETHERMIN 68 #define VTNET_MAX_MTU 65535 69 70 #define VTNET_S_HOSTCAPS \ 71 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \ 72 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 73 74 /* 75 * PCI config-space "registers" 76 */ 77 struct virtio_net_config { 78 uint8_t mac[6]; 79 uint16_t status; 80 uint16_t max_virtqueue_pairs; 81 uint16_t mtu; 82 } __packed; 83 84 /* 85 * Queue definitions. 86 */ 87 #define VTNET_RXQ 0 88 #define VTNET_TXQ 1 89 #define VTNET_CTLQ 2 /* NB: not yet supported */ 90 91 #define VTNET_MAXQ 3 92 93 /* 94 * Debug printf 95 */ 96 static int pci_vtnet_debug; 97 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params 98 #define WPRINTF(params) PRINTLN params 99 100 /* 101 * Per-device softc 102 */ 103 struct pci_vtnet_softc { 104 struct virtio_softc vsc_vs; 105 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 106 pthread_mutex_t vsc_mtx; 107 108 net_backend_t *vsc_be; 109 110 bool features_negotiated; /* protected by rx_mtx */ 111 112 int resetting; /* protected by tx_mtx */ 113 114 uint64_t vsc_features; /* negotiated features */ 115 116 pthread_mutex_t rx_mtx; 117 int rx_merge; /* merged rx bufs in use */ 118 119 pthread_t tx_tid; 120 pthread_mutex_t tx_mtx; 121 pthread_cond_t tx_cond; 122 int tx_in_progress; 123 124 size_t vhdrlen; 125 size_t be_vhdrlen; 126 127 struct virtio_net_config vsc_config; 128 struct virtio_consts vsc_consts; 129 }; 130 131 static void pci_vtnet_reset(void *); 132 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 133 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 134 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 135 static void pci_vtnet_neg_features(void *, uint64_t); 136 137 static struct virtio_consts vtnet_vi_consts = { 138 .vc_name = "vtnet", 139 .vc_nvq = VTNET_MAXQ - 1, 140 .vc_cfgsize = sizeof(struct virtio_net_config), 141 .vc_reset = pci_vtnet_reset, 142 .vc_cfgread = pci_vtnet_cfgread, 143 .vc_cfgwrite = pci_vtnet_cfgwrite, 144 .vc_apply_features = pci_vtnet_neg_features, 145 .vc_hv_caps = VTNET_S_HOSTCAPS, 146 }; 147 148 static void 149 pci_vtnet_reset(void *vsc) 150 { 151 struct pci_vtnet_softc *sc = vsc; 152 153 DPRINTF(("vtnet: device reset requested !")); 154 155 /* Acquire the RX lock to block RX processing. */ 156 pthread_mutex_lock(&sc->rx_mtx); 157 158 /* 159 * Make sure receive operation is disabled at least until we 160 * re-negotiate the features, since receive operation depends 161 * on the value of sc->rx_merge and the header length, which 162 * are both set in pci_vtnet_neg_features(). 163 * Receive operation will be enabled again once the guest adds 164 * the first receive buffers and kicks us. 165 */ 166 sc->features_negotiated = false; 167 netbe_rx_disable(sc->vsc_be); 168 169 /* Set sc->resetting and give a chance to the TX thread to stop. */ 170 pthread_mutex_lock(&sc->tx_mtx); 171 sc->resetting = 1; 172 while (sc->tx_in_progress) { 173 pthread_mutex_unlock(&sc->tx_mtx); 174 usleep(10000); 175 pthread_mutex_lock(&sc->tx_mtx); 176 } 177 178 /* 179 * Now reset rings, MSI-X vectors, and negotiated capabilities. 180 * Do that with the TX lock held, since we need to reset 181 * sc->resetting. 182 */ 183 vi_reset_dev(&sc->vsc_vs); 184 185 sc->resetting = 0; 186 pthread_mutex_unlock(&sc->tx_mtx); 187 pthread_mutex_unlock(&sc->rx_mtx); 188 } 189 190 static __inline struct iovec * 191 iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen) 192 { 193 struct iovec *riov; 194 195 if (iov[0].iov_len < hlen) { 196 /* 197 * Not enough header space in the first fragment. 198 * That's not ok for us. 199 */ 200 return NULL; 201 } 202 203 iov[0].iov_len -= hlen; 204 if (iov[0].iov_len == 0) { 205 *iovcnt -= 1; 206 if (*iovcnt == 0) { 207 /* 208 * Only space for the header. That's not 209 * enough for us. 210 */ 211 return NULL; 212 } 213 riov = &iov[1]; 214 } else { 215 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen); 216 riov = &iov[0]; 217 } 218 219 return (riov); 220 } 221 222 struct virtio_mrg_rxbuf_info { 223 uint16_t idx; 224 uint16_t pad; 225 uint32_t len; 226 }; 227 228 static void 229 pci_vtnet_rx(struct pci_vtnet_softc *sc) 230 { 231 int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; 232 struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; 233 struct iovec iov[VTNET_MAXSEGS + 1]; 234 struct vqueue_info *vq; 235 struct vi_req req; 236 237 vq = &sc->vsc_queues[VTNET_RXQ]; 238 239 /* Features must be negotiated */ 240 if (!sc->features_negotiated) { 241 return; 242 } 243 244 for (;;) { 245 struct virtio_net_rxhdr *hdr; 246 uint32_t riov_bytes; 247 struct iovec *riov; 248 uint32_t ulen; 249 int riov_len; 250 int n_chains; 251 ssize_t rlen; 252 ssize_t plen; 253 254 plen = netbe_peek_recvlen(sc->vsc_be); 255 if (plen <= 0) { 256 /* 257 * No more packets (plen == 0), or backend errored 258 * (plen < 0). Interrupt if needed and stop. 259 */ 260 vq_endchains(vq, /*used_all_avail=*/0); 261 return; 262 } 263 plen += prepend_hdr_len; 264 265 /* 266 * Get a descriptor chain to store the next ingress 267 * packet. In case of mergeable rx buffers, get as 268 * many chains as necessary in order to make room 269 * for plen bytes. 270 */ 271 riov_bytes = 0; 272 riov_len = 0; 273 riov = iov; 274 n_chains = 0; 275 do { 276 int n = vq_getchain(vq, riov, VTNET_MAXSEGS - riov_len, 277 &req); 278 info[n_chains].idx = req.idx; 279 280 if (n == 0) { 281 /* 282 * No rx buffers. Enable RX kicks and double 283 * check. 284 */ 285 vq_kick_enable(vq); 286 if (!vq_has_descs(vq)) { 287 /* 288 * Still no buffers. Return the unused 289 * chains (if any), interrupt if needed 290 * (including for NOTIFY_ON_EMPTY), and 291 * disable the backend until the next 292 * kick. 293 */ 294 vq_retchains(vq, n_chains); 295 vq_endchains(vq, /*used_all_avail=*/1); 296 netbe_rx_disable(sc->vsc_be); 297 return; 298 } 299 300 /* More rx buffers found, so keep going. */ 301 vq_kick_disable(vq); 302 continue; 303 } 304 #ifndef __FreeBSD__ 305 if (n == -1) { 306 /* 307 * An error from vq_getchain() means that 308 * an invalid descriptor was found. 309 */ 310 vq_retchains(vq, n_chains); 311 vq_endchains(vq, /*used_all_avail=*/0); 312 return; 313 } 314 #endif 315 assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS); 316 riov_len += n; 317 if (!sc->rx_merge) { 318 n_chains = 1; 319 break; 320 } 321 #ifndef __FreeBSD__ 322 size_t c = count_iov(riov, n); 323 if (c > UINT32_MAX) { 324 vq_retchains(vq, n_chains); 325 vq_endchains(vq, /*used_all_avail=*/0); 326 return; 327 } 328 info[n_chains].len = (uint32_t)c; 329 #else 330 info[n_chains].len = (uint32_t)count_iov(riov, n); 331 #endif 332 riov_bytes += info[n_chains].len; 333 riov += n; 334 n_chains++; 335 } while (riov_bytes < plen && riov_len < VTNET_MAXSEGS); 336 337 riov = iov; 338 #ifdef __FreeBSD__ 339 hdr = riov[0].iov_base; 340 #else 341 hdr = (struct virtio_net_rxhdr *)riov[0].iov_base; 342 #endif 343 if (prepend_hdr_len > 0) { 344 /* 345 * The frontend uses a virtio-net header, but the 346 * backend does not. We need to prepend a zeroed 347 * header. 348 */ 349 riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len); 350 if (riov == NULL) { 351 /* 352 * The first collected chain is nonsensical, 353 * as it is not even enough to store the 354 * virtio-net header. Just drop it. 355 */ 356 vq_relchain(vq, info[0].idx, 0); 357 vq_retchains(vq, n_chains - 1); 358 continue; 359 } 360 memset(hdr, 0, prepend_hdr_len); 361 } 362 363 rlen = netbe_recv(sc->vsc_be, riov, riov_len); 364 if (rlen != plen - prepend_hdr_len) { 365 /* 366 * If this happens it means there is something 367 * wrong with the backend (e.g., some other 368 * process is stealing our packets). 369 */ 370 WPRINTF(("netbe_recv: expected %zd bytes, " 371 "got %zd", plen - prepend_hdr_len, rlen)); 372 vq_retchains(vq, n_chains); 373 continue; 374 } 375 376 ulen = (uint32_t)plen; 377 378 /* 379 * Publish the used buffers to the guest, reporting the 380 * number of bytes that we wrote. 381 */ 382 if (!sc->rx_merge) { 383 vq_relchain(vq, info[0].idx, ulen); 384 } else { 385 uint32_t iolen; 386 int i = 0; 387 388 do { 389 iolen = info[i].len; 390 if (iolen > ulen) { 391 iolen = ulen; 392 } 393 vq_relchain_prepare(vq, info[i].idx, iolen); 394 ulen -= iolen; 395 i++; 396 } while (ulen > 0); 397 398 hdr->vrh_bufs = i; 399 vq_relchain_publish(vq); 400 assert(i == n_chains); 401 } 402 } 403 404 } 405 406 /* 407 * Called when there is read activity on the backend file descriptor. 408 * Each buffer posted by the guest is assumed to be able to contain 409 * an entire ethernet frame + rx header. 410 */ 411 static void 412 pci_vtnet_rx_callback(int fd __unused, enum ev_type type __unused, void *param) 413 { 414 struct pci_vtnet_softc *sc = param; 415 416 pthread_mutex_lock(&sc->rx_mtx); 417 pci_vtnet_rx(sc); 418 pthread_mutex_unlock(&sc->rx_mtx); 419 420 } 421 422 /* Called on RX kick. */ 423 static void 424 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 425 { 426 struct pci_vtnet_softc *sc = vsc; 427 428 /* 429 * A qnotify means that the rx process can now begin. 430 * Enable RX only if features are negotiated. 431 */ 432 pthread_mutex_lock(&sc->rx_mtx); 433 if (!sc->features_negotiated) { 434 pthread_mutex_unlock(&sc->rx_mtx); 435 return; 436 } 437 438 vq_kick_disable(vq); 439 netbe_rx_enable(sc->vsc_be); 440 pthread_mutex_unlock(&sc->rx_mtx); 441 } 442 443 /* TX virtqueue processing, called by the TX thread. */ 444 static void 445 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 446 { 447 struct iovec iov[VTNET_MAXSEGS + 1]; 448 struct iovec *siov = iov; 449 struct vi_req req; 450 ssize_t len; 451 int n; 452 453 /* 454 * Obtain chain of descriptors. The first descriptor also 455 * contains the virtio-net header. 456 */ 457 n = vq_getchain(vq, iov, VTNET_MAXSEGS, &req); 458 assert(n >= 1 && n <= VTNET_MAXSEGS); 459 460 if (sc->vhdrlen != sc->be_vhdrlen) { 461 /* 462 * The frontend uses a virtio-net header, but the backend 463 * does not. We simply strip the header and ignore it, as 464 * it should be zero-filled. 465 */ 466 siov = iov_trim_hdr(siov, &n, sc->vhdrlen); 467 } 468 469 if (siov == NULL) { 470 /* The chain is nonsensical. Just drop it. */ 471 len = 0; 472 } else { 473 len = netbe_send(sc->vsc_be, siov, n); 474 if (len < 0) { 475 /* 476 * If send failed, report that 0 bytes 477 * were read. 478 */ 479 len = 0; 480 } 481 } 482 483 /* 484 * Return the processed chain to the guest, reporting 485 * the number of bytes that we read. 486 */ 487 vq_relchain(vq, req.idx, len); 488 } 489 490 /* Called on TX kick. */ 491 static void 492 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 493 { 494 struct pci_vtnet_softc *sc = vsc; 495 496 /* 497 * Any ring entries to process? 498 */ 499 if (!vq_has_descs(vq)) 500 return; 501 502 /* Signal the tx thread for processing */ 503 pthread_mutex_lock(&sc->tx_mtx); 504 vq_kick_disable(vq); 505 if (sc->tx_in_progress == 0) 506 pthread_cond_signal(&sc->tx_cond); 507 pthread_mutex_unlock(&sc->tx_mtx); 508 } 509 510 /* 511 * Thread which will handle processing of TX desc 512 */ 513 static void * 514 pci_vtnet_tx_thread(void *param) 515 { 516 struct pci_vtnet_softc *sc = param; 517 struct vqueue_info *vq; 518 int error; 519 520 vq = &sc->vsc_queues[VTNET_TXQ]; 521 522 /* 523 * Let us wait till the tx queue pointers get initialised & 524 * first tx signaled 525 */ 526 pthread_mutex_lock(&sc->tx_mtx); 527 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 528 assert(error == 0); 529 530 for (;;) { 531 /* note - tx mutex is locked here */ 532 while (sc->resetting || !vq_has_descs(vq)) { 533 vq_kick_enable(vq); 534 if (!sc->resetting && vq_has_descs(vq)) 535 break; 536 537 sc->tx_in_progress = 0; 538 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 539 assert(error == 0); 540 } 541 vq_kick_disable(vq); 542 sc->tx_in_progress = 1; 543 pthread_mutex_unlock(&sc->tx_mtx); 544 545 do { 546 /* 547 * Run through entries, placing them into 548 * iovecs and sending when an end-of-packet 549 * is found 550 */ 551 pci_vtnet_proctx(sc, vq); 552 } while (vq_has_descs(vq)); 553 554 /* 555 * Generate an interrupt if needed. 556 */ 557 vq_endchains(vq, /*used_all_avail=*/1); 558 559 pthread_mutex_lock(&sc->tx_mtx); 560 } 561 #ifndef __FreeBSD__ 562 return (NULL); 563 #endif 564 } 565 566 #ifdef notyet 567 static void 568 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 569 { 570 571 DPRINTF(("vtnet: control qnotify!")); 572 } 573 #endif 574 575 static int 576 pci_vtnet_init(struct pci_devinst *pi, nvlist_t *nvl) 577 { 578 struct pci_vtnet_softc *sc; 579 const char *value; 580 char tname[MAXCOMLEN + 1]; 581 unsigned long mtu = ETHERMTU; 582 int err; 583 584 /* 585 * Allocate data structures for further virtio initializations. 586 * sc also contains a copy of vtnet_vi_consts, since capabilities 587 * change depending on the backend. 588 */ 589 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 590 591 sc->vsc_consts = vtnet_vi_consts; 592 pthread_mutex_init(&sc->vsc_mtx, NULL); 593 594 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 595 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 596 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 597 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 598 #ifdef notyet 599 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 600 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 601 #endif 602 603 value = get_config_value_node(nvl, "mac"); 604 if (value != NULL) { 605 err = net_parsemac(value, sc->vsc_config.mac); 606 if (err) { 607 free(sc); 608 return (err); 609 } 610 } else 611 net_genmac(pi, sc->vsc_config.mac); 612 613 value = get_config_value_node(nvl, "mtu"); 614 if (value != NULL) { 615 err = net_parsemtu(value, &mtu); 616 if (err) { 617 free(sc); 618 return (err); 619 } 620 621 if (mtu < VTNET_MIN_MTU || mtu > VTNET_MAX_MTU) { 622 err = EINVAL; 623 errno = EINVAL; 624 free(sc); 625 return (err); 626 } 627 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MTU; 628 } 629 sc->vsc_config.mtu = mtu; 630 631 /* Permit interfaces without a configured backend. */ 632 if (get_config_value_node(nvl, "backend") != NULL) { 633 err = netbe_init(&sc->vsc_be, nvl, pci_vtnet_rx_callback, sc); 634 if (err) { 635 free(sc); 636 return (err); 637 } 638 #ifndef __FreeBSD__ 639 size_t buflen = sizeof (sc->vsc_config.mac); 640 641 err = netbe_get_mac(sc->vsc_be, sc->vsc_config.mac, &buflen); 642 if (err != 0) { 643 free(sc); 644 return (err); 645 } 646 #endif 647 } 648 649 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MRG_RXBUF | 650 netbe_get_cap(sc->vsc_be); 651 652 /* 653 * Since we do not actually support multiqueue, 654 * set the maximum virtqueue pairs to 1. 655 */ 656 sc->vsc_config.max_virtqueue_pairs = 1; 657 658 /* initialize config space */ 659 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 660 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 661 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 662 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK); 663 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 664 665 /* Link is always up. */ 666 sc->vsc_config.status = 1; 667 668 vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues); 669 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 670 671 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 672 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) { 673 free(sc); 674 return (1); 675 } 676 677 /* use BAR 0 to map config regs in IO space */ 678 vi_set_io_bar(&sc->vsc_vs, 0); 679 680 sc->resetting = 0; 681 682 sc->rx_merge = 0; 683 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 684 pthread_mutex_init(&sc->rx_mtx, NULL); 685 686 /* 687 * Initialize tx semaphore & spawn TX processing thread. 688 * As of now, only one thread for TX desc processing is 689 * spawned. 690 */ 691 sc->tx_in_progress = 0; 692 pthread_mutex_init(&sc->tx_mtx, NULL); 693 pthread_cond_init(&sc->tx_cond, NULL); 694 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 695 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 696 pi->pi_func); 697 pthread_set_name_np(sc->tx_tid, tname); 698 699 return (0); 700 } 701 702 static int 703 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 704 { 705 struct pci_vtnet_softc *sc = vsc; 706 void *ptr; 707 708 if (offset < (int)sizeof(sc->vsc_config.mac)) { 709 assert(offset + size <= (int)sizeof(sc->vsc_config.mac)); 710 /* 711 * The driver is allowed to change the MAC address 712 */ 713 ptr = &sc->vsc_config.mac[offset]; 714 memcpy(ptr, &value, size); 715 } else { 716 /* silently ignore other writes */ 717 DPRINTF(("vtnet: write to readonly reg %d", offset)); 718 } 719 720 return (0); 721 } 722 723 static int 724 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 725 { 726 struct pci_vtnet_softc *sc = vsc; 727 void *ptr; 728 729 ptr = (uint8_t *)&sc->vsc_config + offset; 730 memcpy(retval, ptr, size); 731 return (0); 732 } 733 734 static void 735 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 736 { 737 struct pci_vtnet_softc *sc = vsc; 738 739 sc->vsc_features = negotiated_features; 740 741 if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) { 742 sc->vhdrlen = sizeof(struct virtio_net_rxhdr); 743 sc->rx_merge = 1; 744 } else { 745 /* 746 * Without mergeable rx buffers, virtio-net header is 2 747 * bytes shorter than sizeof(struct virtio_net_rxhdr). 748 */ 749 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 750 sc->rx_merge = 0; 751 } 752 753 /* Tell the backend to enable some capabilities it has advertised. */ 754 netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); 755 sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); 756 assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); 757 758 pthread_mutex_lock(&sc->rx_mtx); 759 sc->features_negotiated = true; 760 pthread_mutex_unlock(&sc->rx_mtx); 761 } 762 763 static const struct pci_devemu pci_de_vnet = { 764 .pe_emu = "virtio-net", 765 .pe_init = pci_vtnet_init, 766 .pe_legacy_config = netbe_legacy_config, 767 .pe_barwrite = vi_pci_write, 768 .pe_barread = vi_pci_read, 769 }; 770 PCI_EMUL_SET(pci_de_vnet); 771