1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 31 #include <sys/param.h> 32 #include <sys/linker_set.h> 33 #include <sys/select.h> 34 #include <sys/uio.h> 35 #include <sys/ioctl.h> 36 #include <net/ethernet.h> 37 #include <net/if.h> /* IFNAMSIZ */ 38 39 #include <err.h> 40 #include <errno.h> 41 #include <fcntl.h> 42 #include <stdio.h> 43 #include <stdlib.h> 44 #include <stdint.h> 45 #include <string.h> 46 #include <strings.h> 47 #include <unistd.h> 48 #include <assert.h> 49 #include <pthread.h> 50 #include <pthread_np.h> 51 52 #include "bhyverun.h" 53 #include "config.h" 54 #include "debug.h" 55 #include "pci_emul.h" 56 #include "mevent.h" 57 #include "virtio.h" 58 #include "net_utils.h" 59 #include "net_backends.h" 60 #include "iov.h" 61 62 #define VTNET_RINGSZ 1024 63 64 #define VTNET_MAXSEGS 256 65 66 #define VTNET_MAX_PKT_LEN (65536 + 64) 67 68 #define VTNET_MIN_MTU ETHERMIN 69 #define VTNET_MAX_MTU 65535 70 71 #define VTNET_S_HOSTCAPS \ 72 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \ 73 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 74 75 /* 76 * PCI config-space "registers" 77 */ 78 struct virtio_net_config { 79 uint8_t mac[6]; 80 uint16_t status; 81 uint16_t max_virtqueue_pairs; 82 uint16_t mtu; 83 } __packed; 84 85 /* 86 * Queue definitions. 87 */ 88 #define VTNET_RXQ 0 89 #define VTNET_TXQ 1 90 #define VTNET_CTLQ 2 /* NB: not yet supported */ 91 92 #define VTNET_MAXQ 3 93 94 /* 95 * Debug printf 96 */ 97 static int pci_vtnet_debug; 98 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params 99 #define WPRINTF(params) PRINTLN params 100 101 /* 102 * Per-device softc 103 */ 104 struct pci_vtnet_softc { 105 struct virtio_softc vsc_vs; 106 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 107 pthread_mutex_t vsc_mtx; 108 109 net_backend_t *vsc_be; 110 111 bool features_negotiated; /* protected by rx_mtx */ 112 113 int resetting; /* protected by tx_mtx */ 114 115 uint64_t vsc_features; /* negotiated features */ 116 117 pthread_mutex_t rx_mtx; 118 int rx_merge; /* merged rx bufs in use */ 119 120 pthread_t tx_tid; 121 pthread_mutex_t tx_mtx; 122 pthread_cond_t tx_cond; 123 int tx_in_progress; 124 125 size_t vhdrlen; 126 size_t be_vhdrlen; 127 128 struct virtio_net_config vsc_config; 129 struct virtio_consts vsc_consts; 130 }; 131 132 static void pci_vtnet_reset(void *); 133 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 134 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 135 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 136 static void pci_vtnet_neg_features(void *, uint64_t); 137 138 static struct virtio_consts vtnet_vi_consts = { 139 .vc_name = "vtnet", 140 .vc_nvq = VTNET_MAXQ - 1, 141 .vc_cfgsize = sizeof(struct virtio_net_config), 142 .vc_reset = pci_vtnet_reset, 143 .vc_cfgread = pci_vtnet_cfgread, 144 .vc_cfgwrite = pci_vtnet_cfgwrite, 145 .vc_apply_features = pci_vtnet_neg_features, 146 .vc_hv_caps = VTNET_S_HOSTCAPS, 147 }; 148 149 static void 150 pci_vtnet_reset(void *vsc) 151 { 152 struct pci_vtnet_softc *sc = vsc; 153 154 DPRINTF(("vtnet: device reset requested !")); 155 156 /* Acquire the RX lock to block RX processing. */ 157 pthread_mutex_lock(&sc->rx_mtx); 158 159 /* 160 * Make sure receive operation is disabled at least until we 161 * re-negotiate the features, since receive operation depends 162 * on the value of sc->rx_merge and the header length, which 163 * are both set in pci_vtnet_neg_features(). 164 * Receive operation will be enabled again once the guest adds 165 * the first receive buffers and kicks us. 166 */ 167 sc->features_negotiated = false; 168 netbe_rx_disable(sc->vsc_be); 169 170 /* Set sc->resetting and give a chance to the TX thread to stop. */ 171 pthread_mutex_lock(&sc->tx_mtx); 172 sc->resetting = 1; 173 while (sc->tx_in_progress) { 174 pthread_mutex_unlock(&sc->tx_mtx); 175 usleep(10000); 176 pthread_mutex_lock(&sc->tx_mtx); 177 } 178 179 /* 180 * Now reset rings, MSI-X vectors, and negotiated capabilities. 181 * Do that with the TX lock held, since we need to reset 182 * sc->resetting. 183 */ 184 vi_reset_dev(&sc->vsc_vs); 185 186 sc->resetting = 0; 187 pthread_mutex_unlock(&sc->tx_mtx); 188 pthread_mutex_unlock(&sc->rx_mtx); 189 } 190 191 static __inline struct iovec * 192 iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen) 193 { 194 struct iovec *riov; 195 196 if (iov[0].iov_len < hlen) { 197 /* 198 * Not enough header space in the first fragment. 199 * That's not ok for us. 200 */ 201 return NULL; 202 } 203 204 iov[0].iov_len -= hlen; 205 if (iov[0].iov_len == 0) { 206 *iovcnt -= 1; 207 if (*iovcnt == 0) { 208 /* 209 * Only space for the header. That's not 210 * enough for us. 211 */ 212 return NULL; 213 } 214 riov = &iov[1]; 215 } else { 216 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen); 217 riov = &iov[0]; 218 } 219 220 return (riov); 221 } 222 223 struct virtio_mrg_rxbuf_info { 224 uint16_t idx; 225 uint16_t pad; 226 uint32_t len; 227 }; 228 229 static void 230 pci_vtnet_rx(struct pci_vtnet_softc *sc) 231 { 232 int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; 233 struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; 234 struct iovec iov[VTNET_MAXSEGS + 1]; 235 struct vqueue_info *vq; 236 struct vi_req req; 237 238 vq = &sc->vsc_queues[VTNET_RXQ]; 239 240 /* Features must be negotiated */ 241 if (!sc->features_negotiated) { 242 return; 243 } 244 245 for (;;) { 246 struct virtio_net_rxhdr *hdr; 247 uint32_t riov_bytes; 248 struct iovec *riov; 249 uint32_t ulen; 250 int riov_len; 251 int n_chains; 252 ssize_t rlen; 253 ssize_t plen; 254 255 plen = netbe_peek_recvlen(sc->vsc_be); 256 if (plen <= 0) { 257 /* 258 * No more packets (plen == 0), or backend errored 259 * (plen < 0). Interrupt if needed and stop. 260 */ 261 vq_endchains(vq, /*used_all_avail=*/0); 262 return; 263 } 264 plen += prepend_hdr_len; 265 266 /* 267 * Get a descriptor chain to store the next ingress 268 * packet. In case of mergeable rx buffers, get as 269 * many chains as necessary in order to make room 270 * for plen bytes. 271 */ 272 riov_bytes = 0; 273 riov_len = 0; 274 riov = iov; 275 n_chains = 0; 276 do { 277 int n = vq_getchain(vq, riov, VTNET_MAXSEGS - riov_len, 278 &req); 279 info[n_chains].idx = req.idx; 280 281 if (n == 0) { 282 /* 283 * No rx buffers. Enable RX kicks and double 284 * check. 285 */ 286 vq_kick_enable(vq); 287 if (!vq_has_descs(vq)) { 288 /* 289 * Still no buffers. Return the unused 290 * chains (if any), interrupt if needed 291 * (including for NOTIFY_ON_EMPTY), and 292 * disable the backend until the next 293 * kick. 294 */ 295 vq_retchains(vq, n_chains); 296 vq_endchains(vq, /*used_all_avail=*/1); 297 netbe_rx_disable(sc->vsc_be); 298 return; 299 } 300 301 /* More rx buffers found, so keep going. */ 302 vq_kick_disable(vq); 303 continue; 304 } 305 #ifndef __FreeBSD__ 306 if (n == -1) { 307 /* 308 * An error from vq_getchain() means that 309 * an invalid descriptor was found. 310 */ 311 vq_retchains(vq, n_chains); 312 vq_endchains(vq, /*used_all_avail=*/0); 313 return; 314 } 315 #endif 316 assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS); 317 riov_len += n; 318 if (!sc->rx_merge) { 319 n_chains = 1; 320 break; 321 } 322 #ifndef __FreeBSD__ 323 size_t c = count_iov(riov, n); 324 if (c > UINT32_MAX) { 325 vq_retchains(vq, n_chains); 326 vq_endchains(vq, /*used_all_avail=*/0); 327 return; 328 } 329 info[n_chains].len = (uint32_t)c; 330 #else 331 info[n_chains].len = (uint32_t)count_iov(riov, n); 332 #endif 333 riov_bytes += info[n_chains].len; 334 riov += n; 335 n_chains++; 336 } while (riov_bytes < plen && riov_len < VTNET_MAXSEGS); 337 338 riov = iov; 339 #ifdef __FreeBSD__ 340 hdr = riov[0].iov_base; 341 #else 342 hdr = (struct virtio_net_rxhdr *)riov[0].iov_base; 343 #endif 344 if (prepend_hdr_len > 0) { 345 /* 346 * The frontend uses a virtio-net header, but the 347 * backend does not. We need to prepend a zeroed 348 * header. 349 */ 350 riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len); 351 if (riov == NULL) { 352 /* 353 * The first collected chain is nonsensical, 354 * as it is not even enough to store the 355 * virtio-net header. Just drop it. 356 */ 357 vq_relchain(vq, info[0].idx, 0); 358 vq_retchains(vq, n_chains - 1); 359 continue; 360 } 361 memset(hdr, 0, prepend_hdr_len); 362 } 363 364 rlen = netbe_recv(sc->vsc_be, riov, riov_len); 365 if (rlen != plen - prepend_hdr_len) { 366 /* 367 * If this happens it means there is something 368 * wrong with the backend (e.g., some other 369 * process is stealing our packets). 370 */ 371 WPRINTF(("netbe_recv: expected %zd bytes, " 372 "got %zd", plen - prepend_hdr_len, rlen)); 373 vq_retchains(vq, n_chains); 374 continue; 375 } 376 377 ulen = (uint32_t)plen; 378 379 /* 380 * Publish the used buffers to the guest, reporting the 381 * number of bytes that we wrote. 382 */ 383 if (!sc->rx_merge) { 384 vq_relchain(vq, info[0].idx, ulen); 385 } else { 386 uint32_t iolen; 387 int i = 0; 388 389 do { 390 iolen = info[i].len; 391 if (iolen > ulen) { 392 iolen = ulen; 393 } 394 vq_relchain_prepare(vq, info[i].idx, iolen); 395 ulen -= iolen; 396 i++; 397 } while (ulen > 0); 398 399 hdr->vrh_bufs = i; 400 vq_relchain_publish(vq); 401 assert(i == n_chains); 402 } 403 } 404 405 } 406 407 /* 408 * Called when there is read activity on the backend file descriptor. 409 * Each buffer posted by the guest is assumed to be able to contain 410 * an entire ethernet frame + rx header. 411 */ 412 static void 413 pci_vtnet_rx_callback(int fd __unused, enum ev_type type __unused, void *param) 414 { 415 struct pci_vtnet_softc *sc = param; 416 417 pthread_mutex_lock(&sc->rx_mtx); 418 pci_vtnet_rx(sc); 419 pthread_mutex_unlock(&sc->rx_mtx); 420 421 } 422 423 /* Called on RX kick. */ 424 static void 425 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 426 { 427 struct pci_vtnet_softc *sc = vsc; 428 429 /* 430 * A qnotify means that the rx process can now begin. 431 * Enable RX only if features are negotiated. 432 */ 433 pthread_mutex_lock(&sc->rx_mtx); 434 if (!sc->features_negotiated) { 435 pthread_mutex_unlock(&sc->rx_mtx); 436 return; 437 } 438 439 vq_kick_disable(vq); 440 netbe_rx_enable(sc->vsc_be); 441 pthread_mutex_unlock(&sc->rx_mtx); 442 } 443 444 /* TX virtqueue processing, called by the TX thread. */ 445 static void 446 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 447 { 448 struct iovec iov[VTNET_MAXSEGS + 1]; 449 struct iovec *siov = iov; 450 struct vi_req req; 451 ssize_t len; 452 int n; 453 454 /* 455 * Obtain chain of descriptors. The first descriptor also 456 * contains the virtio-net header. 457 */ 458 n = vq_getchain(vq, iov, VTNET_MAXSEGS, &req); 459 assert(n >= 1 && n <= VTNET_MAXSEGS); 460 461 if (sc->vhdrlen != sc->be_vhdrlen) { 462 /* 463 * The frontend uses a virtio-net header, but the backend 464 * does not. We simply strip the header and ignore it, as 465 * it should be zero-filled. 466 */ 467 siov = iov_trim_hdr(siov, &n, sc->vhdrlen); 468 } 469 470 if (siov == NULL) { 471 /* The chain is nonsensical. Just drop it. */ 472 len = 0; 473 } else { 474 len = netbe_send(sc->vsc_be, siov, n); 475 if (len < 0) { 476 /* 477 * If send failed, report that 0 bytes 478 * were read. 479 */ 480 len = 0; 481 } 482 } 483 484 /* 485 * Return the processed chain to the guest, reporting 486 * the number of bytes that we read. 487 */ 488 vq_relchain(vq, req.idx, len); 489 } 490 491 /* Called on TX kick. */ 492 static void 493 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 494 { 495 struct pci_vtnet_softc *sc = vsc; 496 497 /* 498 * Any ring entries to process? 499 */ 500 if (!vq_has_descs(vq)) 501 return; 502 503 /* Signal the tx thread for processing */ 504 pthread_mutex_lock(&sc->tx_mtx); 505 vq_kick_disable(vq); 506 if (sc->tx_in_progress == 0) 507 pthread_cond_signal(&sc->tx_cond); 508 pthread_mutex_unlock(&sc->tx_mtx); 509 } 510 511 /* 512 * Thread which will handle processing of TX desc 513 */ 514 static void * 515 pci_vtnet_tx_thread(void *param) 516 { 517 struct pci_vtnet_softc *sc = param; 518 struct vqueue_info *vq; 519 int error; 520 521 vq = &sc->vsc_queues[VTNET_TXQ]; 522 523 /* 524 * Let us wait till the tx queue pointers get initialised & 525 * first tx signaled 526 */ 527 pthread_mutex_lock(&sc->tx_mtx); 528 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 529 assert(error == 0); 530 531 for (;;) { 532 /* note - tx mutex is locked here */ 533 while (sc->resetting || !vq_has_descs(vq)) { 534 vq_kick_enable(vq); 535 if (!sc->resetting && vq_has_descs(vq)) 536 break; 537 538 sc->tx_in_progress = 0; 539 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 540 assert(error == 0); 541 } 542 vq_kick_disable(vq); 543 sc->tx_in_progress = 1; 544 pthread_mutex_unlock(&sc->tx_mtx); 545 546 do { 547 /* 548 * Run through entries, placing them into 549 * iovecs and sending when an end-of-packet 550 * is found 551 */ 552 pci_vtnet_proctx(sc, vq); 553 } while (vq_has_descs(vq)); 554 555 /* 556 * Generate an interrupt if needed. 557 */ 558 vq_endchains(vq, /*used_all_avail=*/1); 559 560 pthread_mutex_lock(&sc->tx_mtx); 561 } 562 #ifndef __FreeBSD__ 563 return (NULL); 564 #endif 565 } 566 567 #ifdef notyet 568 static void 569 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 570 { 571 572 DPRINTF(("vtnet: control qnotify!")); 573 } 574 #endif 575 576 static int 577 pci_vtnet_init(struct pci_devinst *pi, nvlist_t *nvl) 578 { 579 struct pci_vtnet_softc *sc; 580 const char *value; 581 char tname[MAXCOMLEN + 1]; 582 unsigned long mtu = ETHERMTU; 583 int err; 584 585 /* 586 * Allocate data structures for further virtio initializations. 587 * sc also contains a copy of vtnet_vi_consts, since capabilities 588 * change depending on the backend. 589 */ 590 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 591 592 sc->vsc_consts = vtnet_vi_consts; 593 pthread_mutex_init(&sc->vsc_mtx, NULL); 594 595 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 596 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 597 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 598 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 599 #ifdef notyet 600 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 601 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 602 #endif 603 604 value = get_config_value_node(nvl, "mac"); 605 if (value != NULL) { 606 err = net_parsemac(value, sc->vsc_config.mac); 607 if (err) { 608 free(sc); 609 return (err); 610 } 611 } else 612 net_genmac(pi, sc->vsc_config.mac); 613 614 value = get_config_value_node(nvl, "mtu"); 615 if (value != NULL) { 616 err = net_parsemtu(value, &mtu); 617 if (err) { 618 free(sc); 619 return (err); 620 } 621 622 if (mtu < VTNET_MIN_MTU || mtu > VTNET_MAX_MTU) { 623 err = EINVAL; 624 errno = EINVAL; 625 free(sc); 626 return (err); 627 } 628 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MTU; 629 } 630 sc->vsc_config.mtu = mtu; 631 632 /* Permit interfaces without a configured backend. */ 633 if (get_config_value_node(nvl, "backend") != NULL) { 634 err = netbe_init(&sc->vsc_be, nvl, pci_vtnet_rx_callback, sc); 635 if (err) { 636 free(sc); 637 return (err); 638 } 639 #ifndef __FreeBSD__ 640 size_t buflen = sizeof (sc->vsc_config.mac); 641 642 err = netbe_get_mac(sc->vsc_be, sc->vsc_config.mac, &buflen); 643 if (err != 0) { 644 free(sc); 645 return (err); 646 } 647 #endif 648 } 649 650 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MRG_RXBUF | 651 netbe_get_cap(sc->vsc_be); 652 653 /* 654 * Since we do not actually support multiqueue, 655 * set the maximum virtqueue pairs to 1. 656 */ 657 sc->vsc_config.max_virtqueue_pairs = 1; 658 659 /* initialize config space */ 660 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 661 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 662 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 663 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK); 664 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 665 666 /* Link is always up. */ 667 sc->vsc_config.status = 1; 668 669 vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues); 670 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 671 672 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 673 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) { 674 free(sc); 675 return (1); 676 } 677 678 /* use BAR 0 to map config regs in IO space */ 679 vi_set_io_bar(&sc->vsc_vs, 0); 680 681 sc->resetting = 0; 682 683 sc->rx_merge = 0; 684 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 685 pthread_mutex_init(&sc->rx_mtx, NULL); 686 687 /* 688 * Initialize tx semaphore & spawn TX processing thread. 689 * As of now, only one thread for TX desc processing is 690 * spawned. 691 */ 692 sc->tx_in_progress = 0; 693 pthread_mutex_init(&sc->tx_mtx, NULL); 694 pthread_cond_init(&sc->tx_cond, NULL); 695 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 696 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 697 pi->pi_func); 698 pthread_set_name_np(sc->tx_tid, tname); 699 700 return (0); 701 } 702 703 static int 704 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 705 { 706 struct pci_vtnet_softc *sc = vsc; 707 void *ptr; 708 709 if (offset < (int)sizeof(sc->vsc_config.mac)) { 710 assert(offset + size <= (int)sizeof(sc->vsc_config.mac)); 711 /* 712 * The driver is allowed to change the MAC address 713 */ 714 ptr = &sc->vsc_config.mac[offset]; 715 memcpy(ptr, &value, size); 716 } else { 717 /* silently ignore other writes */ 718 DPRINTF(("vtnet: write to readonly reg %d", offset)); 719 } 720 721 return (0); 722 } 723 724 static int 725 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 726 { 727 struct pci_vtnet_softc *sc = vsc; 728 void *ptr; 729 730 ptr = (uint8_t *)&sc->vsc_config + offset; 731 memcpy(retval, ptr, size); 732 return (0); 733 } 734 735 static void 736 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 737 { 738 struct pci_vtnet_softc *sc = vsc; 739 740 sc->vsc_features = negotiated_features; 741 742 if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) { 743 sc->vhdrlen = sizeof(struct virtio_net_rxhdr); 744 sc->rx_merge = 1; 745 } else { 746 /* 747 * Without mergeable rx buffers, virtio-net header is 2 748 * bytes shorter than sizeof(struct virtio_net_rxhdr). 749 */ 750 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 751 sc->rx_merge = 0; 752 } 753 754 /* Tell the backend to enable some capabilities it has advertised. */ 755 netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); 756 sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); 757 assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); 758 759 pthread_mutex_lock(&sc->rx_mtx); 760 sc->features_negotiated = true; 761 pthread_mutex_unlock(&sc->rx_mtx); 762 } 763 764 static const struct pci_devemu pci_de_vnet = { 765 .pe_emu = "virtio-net", 766 .pe_init = pci_vtnet_init, 767 .pe_legacy_config = netbe_legacy_config, 768 .pe_barwrite = vi_pci_write, 769 .pe_barread = vi_pci_read, 770 }; 771 PCI_EMUL_SET(pci_de_vnet); 772