1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/linker_set.h> 36 #include <sys/select.h> 37 #include <sys/uio.h> 38 #include <sys/ioctl.h> 39 #include <net/ethernet.h> 40 #include <net/if.h> /* IFNAMSIZ */ 41 42 #include <err.h> 43 #include <errno.h> 44 #include <fcntl.h> 45 #include <stdio.h> 46 #include <stdlib.h> 47 #include <stdint.h> 48 #include <string.h> 49 #include <strings.h> 50 #include <unistd.h> 51 #include <assert.h> 52 #include <pthread.h> 53 #include <pthread_np.h> 54 55 #include "bhyverun.h" 56 #include "config.h" 57 #include "debug.h" 58 #include "pci_emul.h" 59 #include "mevent.h" 60 #include "virtio.h" 61 #include "net_utils.h" 62 #include "net_backends.h" 63 #include "iov.h" 64 65 #define VTNET_RINGSZ 1024 66 67 #define VTNET_MAXSEGS 256 68 69 #define VTNET_MAX_PKT_LEN (65536 + 64) 70 71 #define VTNET_MIN_MTU ETHERMIN 72 #define VTNET_MAX_MTU 65535 73 74 #define VTNET_S_HOSTCAPS \ 75 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \ 76 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 77 78 /* 79 * PCI config-space "registers" 80 */ 81 struct virtio_net_config { 82 uint8_t mac[6]; 83 uint16_t status; 84 uint16_t max_virtqueue_pairs; 85 uint16_t mtu; 86 } __packed; 87 88 /* 89 * Queue definitions. 90 */ 91 #define VTNET_RXQ 0 92 #define VTNET_TXQ 1 93 #define VTNET_CTLQ 2 /* NB: not yet supported */ 94 95 #define VTNET_MAXQ 3 96 97 /* 98 * Debug printf 99 */ 100 static int pci_vtnet_debug; 101 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params 102 #define WPRINTF(params) PRINTLN params 103 104 /* 105 * Per-device softc 106 */ 107 struct pci_vtnet_softc { 108 struct virtio_softc vsc_vs; 109 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 110 pthread_mutex_t vsc_mtx; 111 112 net_backend_t *vsc_be; 113 114 bool features_negotiated; /* protected by rx_mtx */ 115 116 int resetting; /* protected by tx_mtx */ 117 118 uint64_t vsc_features; /* negotiated features */ 119 120 pthread_mutex_t rx_mtx; 121 int rx_merge; /* merged rx bufs in use */ 122 123 pthread_t tx_tid; 124 pthread_mutex_t tx_mtx; 125 pthread_cond_t tx_cond; 126 int tx_in_progress; 127 128 size_t vhdrlen; 129 size_t be_vhdrlen; 130 131 struct virtio_net_config vsc_config; 132 struct virtio_consts vsc_consts; 133 }; 134 135 static void pci_vtnet_reset(void *); 136 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 137 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 138 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 139 static void pci_vtnet_neg_features(void *, uint64_t); 140 141 static struct virtio_consts vtnet_vi_consts = { 142 "vtnet", /* our name */ 143 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */ 144 sizeof(struct virtio_net_config), /* config reg size */ 145 pci_vtnet_reset, /* reset */ 146 NULL, /* device-wide qnotify -- not used */ 147 pci_vtnet_cfgread, /* read PCI config */ 148 pci_vtnet_cfgwrite, /* write PCI config */ 149 pci_vtnet_neg_features, /* apply negotiated features */ 150 VTNET_S_HOSTCAPS, /* our capabilities */ 151 }; 152 153 static void 154 pci_vtnet_reset(void *vsc) 155 { 156 struct pci_vtnet_softc *sc = vsc; 157 158 DPRINTF(("vtnet: device reset requested !")); 159 160 /* Acquire the RX lock to block RX processing. */ 161 pthread_mutex_lock(&sc->rx_mtx); 162 163 /* 164 * Make sure receive operation is disabled at least until we 165 * re-negotiate the features, since receive operation depends 166 * on the value of sc->rx_merge and the header length, which 167 * are both set in pci_vtnet_neg_features(). 168 * Receive operation will be enabled again once the guest adds 169 * the first receive buffers and kicks us. 170 */ 171 sc->features_negotiated = false; 172 netbe_rx_disable(sc->vsc_be); 173 174 /* Set sc->resetting and give a chance to the TX thread to stop. */ 175 pthread_mutex_lock(&sc->tx_mtx); 176 sc->resetting = 1; 177 while (sc->tx_in_progress) { 178 pthread_mutex_unlock(&sc->tx_mtx); 179 usleep(10000); 180 pthread_mutex_lock(&sc->tx_mtx); 181 } 182 183 /* 184 * Now reset rings, MSI-X vectors, and negotiated capabilities. 185 * Do that with the TX lock held, since we need to reset 186 * sc->resetting. 187 */ 188 vi_reset_dev(&sc->vsc_vs); 189 190 sc->resetting = 0; 191 pthread_mutex_unlock(&sc->tx_mtx); 192 pthread_mutex_unlock(&sc->rx_mtx); 193 } 194 195 static __inline struct iovec * 196 iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen) 197 { 198 struct iovec *riov; 199 200 if (iov[0].iov_len < hlen) { 201 /* 202 * Not enough header space in the first fragment. 203 * That's not ok for us. 204 */ 205 return NULL; 206 } 207 208 iov[0].iov_len -= hlen; 209 if (iov[0].iov_len == 0) { 210 *iovcnt -= 1; 211 if (*iovcnt == 0) { 212 /* 213 * Only space for the header. That's not 214 * enough for us. 215 */ 216 return NULL; 217 } 218 riov = &iov[1]; 219 } else { 220 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen); 221 riov = &iov[0]; 222 } 223 224 return (riov); 225 } 226 227 struct virtio_mrg_rxbuf_info { 228 uint16_t idx; 229 uint16_t pad; 230 uint32_t len; 231 }; 232 233 static void 234 pci_vtnet_rx(struct pci_vtnet_softc *sc) 235 { 236 int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; 237 struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; 238 struct iovec iov[VTNET_MAXSEGS + 1]; 239 struct vqueue_info *vq; 240 struct vi_req req; 241 242 vq = &sc->vsc_queues[VTNET_RXQ]; 243 244 /* Features must be negotiated */ 245 if (!sc->features_negotiated) { 246 return; 247 } 248 249 for (;;) { 250 struct virtio_net_rxhdr *hdr; 251 uint32_t riov_bytes; 252 struct iovec *riov; 253 uint32_t ulen; 254 int riov_len; 255 int n_chains; 256 ssize_t rlen; 257 ssize_t plen; 258 259 plen = netbe_peek_recvlen(sc->vsc_be); 260 if (plen <= 0) { 261 /* 262 * No more packets (plen == 0), or backend errored 263 * (plen < 0). Interrupt if needed and stop. 264 */ 265 vq_endchains(vq, /*used_all_avail=*/0); 266 return; 267 } 268 plen += prepend_hdr_len; 269 270 /* 271 * Get a descriptor chain to store the next ingress 272 * packet. In case of mergeable rx buffers, get as 273 * many chains as necessary in order to make room 274 * for plen bytes. 275 */ 276 riov_bytes = 0; 277 riov_len = 0; 278 riov = iov; 279 n_chains = 0; 280 do { 281 int n = vq_getchain(vq, riov, VTNET_MAXSEGS - riov_len, 282 &req); 283 info[n_chains].idx = req.idx; 284 285 if (n == 0) { 286 /* 287 * No rx buffers. Enable RX kicks and double 288 * check. 289 */ 290 vq_kick_enable(vq); 291 if (!vq_has_descs(vq)) { 292 /* 293 * Still no buffers. Return the unused 294 * chains (if any), interrupt if needed 295 * (including for NOTIFY_ON_EMPTY), and 296 * disable the backend until the next 297 * kick. 298 */ 299 vq_retchains(vq, n_chains); 300 vq_endchains(vq, /*used_all_avail=*/1); 301 netbe_rx_disable(sc->vsc_be); 302 return; 303 } 304 305 /* More rx buffers found, so keep going. */ 306 vq_kick_disable(vq); 307 continue; 308 } 309 #ifndef __FreeBSD__ 310 if (n == -1) { 311 /* 312 * An error from vq_getchain() means that 313 * an invalid descriptor was found. 314 */ 315 vq_retchains(vq, n_chains); 316 vq_endchains(vq, /*used_all_avail=*/0); 317 return; 318 } 319 #endif 320 assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS); 321 riov_len += n; 322 if (!sc->rx_merge) { 323 n_chains = 1; 324 break; 325 } 326 #ifndef __FreeBSD__ 327 size_t c = count_iov(riov, n); 328 if (c > UINT32_MAX) { 329 vq_retchains(vq, n_chains); 330 vq_endchains(vq, /*used_all_avail=*/0); 331 return; 332 } 333 info[n_chains].len = (uint32_t)c; 334 #else 335 info[n_chains].len = (uint32_t)count_iov(riov, n); 336 #endif 337 riov_bytes += info[n_chains].len; 338 riov += n; 339 n_chains++; 340 } while (riov_bytes < plen && riov_len < VTNET_MAXSEGS); 341 342 riov = iov; 343 #ifdef __FreeBSD__ 344 hdr = riov[0].iov_base; 345 #else 346 hdr = (struct virtio_net_rxhdr *)riov[0].iov_base; 347 #endif 348 if (prepend_hdr_len > 0) { 349 /* 350 * The frontend uses a virtio-net header, but the 351 * backend does not. We need to prepend a zeroed 352 * header. 353 */ 354 riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len); 355 if (riov == NULL) { 356 /* 357 * The first collected chain is nonsensical, 358 * as it is not even enough to store the 359 * virtio-net header. Just drop it. 360 */ 361 vq_relchain(vq, info[0].idx, 0); 362 vq_retchains(vq, n_chains - 1); 363 continue; 364 } 365 memset(hdr, 0, prepend_hdr_len); 366 } 367 368 rlen = netbe_recv(sc->vsc_be, riov, riov_len); 369 if (rlen != plen - prepend_hdr_len) { 370 /* 371 * If this happens it means there is something 372 * wrong with the backend (e.g., some other 373 * process is stealing our packets). 374 */ 375 WPRINTF(("netbe_recv: expected %zd bytes, " 376 "got %zd", plen - prepend_hdr_len, rlen)); 377 vq_retchains(vq, n_chains); 378 continue; 379 } 380 381 ulen = (uint32_t)plen; 382 383 /* 384 * Publish the used buffers to the guest, reporting the 385 * number of bytes that we wrote. 386 */ 387 if (!sc->rx_merge) { 388 vq_relchain(vq, info[0].idx, ulen); 389 } else { 390 uint32_t iolen; 391 int i = 0; 392 393 do { 394 iolen = info[i].len; 395 if (iolen > ulen) { 396 iolen = ulen; 397 } 398 vq_relchain_prepare(vq, info[i].idx, iolen); 399 ulen -= iolen; 400 i++; 401 } while (ulen > 0); 402 403 hdr->vrh_bufs = i; 404 vq_relchain_publish(vq); 405 assert(i == n_chains); 406 } 407 } 408 409 } 410 411 /* 412 * Called when there is read activity on the backend file descriptor. 413 * Each buffer posted by the guest is assumed to be able to contain 414 * an entire ethernet frame + rx header. 415 */ 416 static void 417 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param) 418 { 419 struct pci_vtnet_softc *sc = param; 420 421 pthread_mutex_lock(&sc->rx_mtx); 422 pci_vtnet_rx(sc); 423 pthread_mutex_unlock(&sc->rx_mtx); 424 425 } 426 427 /* Called on RX kick. */ 428 static void 429 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 430 { 431 struct pci_vtnet_softc *sc = vsc; 432 433 /* 434 * A qnotify means that the rx process can now begin. 435 * Enable RX only if features are negotiated. 436 */ 437 pthread_mutex_lock(&sc->rx_mtx); 438 if (!sc->features_negotiated) { 439 pthread_mutex_unlock(&sc->rx_mtx); 440 return; 441 } 442 443 vq_kick_disable(vq); 444 netbe_rx_enable(sc->vsc_be); 445 pthread_mutex_unlock(&sc->rx_mtx); 446 } 447 448 /* TX virtqueue processing, called by the TX thread. */ 449 static void 450 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 451 { 452 struct iovec iov[VTNET_MAXSEGS + 1]; 453 struct iovec *siov = iov; 454 struct vi_req req; 455 ssize_t len; 456 int n; 457 458 /* 459 * Obtain chain of descriptors. The first descriptor also 460 * contains the virtio-net header. 461 */ 462 n = vq_getchain(vq, iov, VTNET_MAXSEGS, &req); 463 assert(n >= 1 && n <= VTNET_MAXSEGS); 464 465 if (sc->vhdrlen != sc->be_vhdrlen) { 466 /* 467 * The frontend uses a virtio-net header, but the backend 468 * does not. We simply strip the header and ignore it, as 469 * it should be zero-filled. 470 */ 471 siov = iov_trim_hdr(siov, &n, sc->vhdrlen); 472 } 473 474 if (siov == NULL) { 475 /* The chain is nonsensical. Just drop it. */ 476 len = 0; 477 } else { 478 len = netbe_send(sc->vsc_be, siov, n); 479 if (len < 0) { 480 /* 481 * If send failed, report that 0 bytes 482 * were read. 483 */ 484 len = 0; 485 } 486 } 487 488 /* 489 * Return the processed chain to the guest, reporting 490 * the number of bytes that we read. 491 */ 492 vq_relchain(vq, req.idx, len); 493 } 494 495 /* Called on TX kick. */ 496 static void 497 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 498 { 499 struct pci_vtnet_softc *sc = vsc; 500 501 /* 502 * Any ring entries to process? 503 */ 504 if (!vq_has_descs(vq)) 505 return; 506 507 /* Signal the tx thread for processing */ 508 pthread_mutex_lock(&sc->tx_mtx); 509 vq_kick_disable(vq); 510 if (sc->tx_in_progress == 0) 511 pthread_cond_signal(&sc->tx_cond); 512 pthread_mutex_unlock(&sc->tx_mtx); 513 } 514 515 /* 516 * Thread which will handle processing of TX desc 517 */ 518 static void * 519 pci_vtnet_tx_thread(void *param) 520 { 521 struct pci_vtnet_softc *sc = param; 522 struct vqueue_info *vq; 523 int error; 524 525 vq = &sc->vsc_queues[VTNET_TXQ]; 526 527 /* 528 * Let us wait till the tx queue pointers get initialised & 529 * first tx signaled 530 */ 531 pthread_mutex_lock(&sc->tx_mtx); 532 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 533 assert(error == 0); 534 535 for (;;) { 536 /* note - tx mutex is locked here */ 537 while (sc->resetting || !vq_has_descs(vq)) { 538 vq_kick_enable(vq); 539 if (!sc->resetting && vq_has_descs(vq)) 540 break; 541 542 sc->tx_in_progress = 0; 543 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 544 assert(error == 0); 545 } 546 vq_kick_disable(vq); 547 sc->tx_in_progress = 1; 548 pthread_mutex_unlock(&sc->tx_mtx); 549 550 do { 551 /* 552 * Run through entries, placing them into 553 * iovecs and sending when an end-of-packet 554 * is found 555 */ 556 pci_vtnet_proctx(sc, vq); 557 } while (vq_has_descs(vq)); 558 559 /* 560 * Generate an interrupt if needed. 561 */ 562 vq_endchains(vq, /*used_all_avail=*/1); 563 564 pthread_mutex_lock(&sc->tx_mtx); 565 } 566 #ifndef __FreeBSD__ 567 return (NULL); 568 #endif 569 } 570 571 #ifdef notyet 572 static void 573 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 574 { 575 576 DPRINTF(("vtnet: control qnotify!")); 577 } 578 #endif 579 580 static int 581 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl) 582 { 583 struct pci_vtnet_softc *sc; 584 const char *value; 585 char tname[MAXCOMLEN + 1]; 586 unsigned long mtu = ETHERMTU; 587 int err; 588 589 /* 590 * Allocate data structures for further virtio initializations. 591 * sc also contains a copy of vtnet_vi_consts, since capabilities 592 * change depending on the backend. 593 */ 594 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 595 596 sc->vsc_consts = vtnet_vi_consts; 597 pthread_mutex_init(&sc->vsc_mtx, NULL); 598 599 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 600 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 601 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 602 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 603 #ifdef notyet 604 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 605 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 606 #endif 607 608 value = get_config_value_node(nvl, "mac"); 609 if (value != NULL) { 610 err = net_parsemac(value, sc->vsc_config.mac); 611 if (err) { 612 free(sc); 613 return (err); 614 } 615 } else 616 net_genmac(pi, sc->vsc_config.mac); 617 618 value = get_config_value_node(nvl, "mtu"); 619 if (value != NULL) { 620 err = net_parsemtu(value, &mtu); 621 if (err) { 622 free(sc); 623 return (err); 624 } 625 626 if (mtu < VTNET_MIN_MTU || mtu > VTNET_MAX_MTU) { 627 err = EINVAL; 628 errno = EINVAL; 629 free(sc); 630 return (err); 631 } 632 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MTU; 633 } 634 sc->vsc_config.mtu = mtu; 635 636 /* Permit interfaces without a configured backend. */ 637 if (get_config_value_node(nvl, "backend") != NULL) { 638 err = netbe_init(&sc->vsc_be, nvl, pci_vtnet_rx_callback, sc); 639 if (err) { 640 free(sc); 641 return (err); 642 } 643 #ifndef __FreeBSD__ 644 size_t buflen = sizeof (sc->vsc_config.mac); 645 646 err = netbe_get_mac(sc->vsc_be, sc->vsc_config.mac, &buflen); 647 if (err != 0) { 648 free(sc); 649 return (err); 650 } 651 #endif 652 } 653 654 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MRG_RXBUF | 655 netbe_get_cap(sc->vsc_be); 656 657 /* 658 * Since we do not actually support multiqueue, 659 * set the maximum virtqueue pairs to 1. 660 */ 661 sc->vsc_config.max_virtqueue_pairs = 1; 662 663 /* initialize config space */ 664 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 665 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 666 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 667 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK); 668 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 669 670 /* Link is always up. */ 671 sc->vsc_config.status = 1; 672 673 vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues); 674 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 675 676 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 677 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) { 678 free(sc); 679 return (1); 680 } 681 682 /* use BAR 0 to map config regs in IO space */ 683 vi_set_io_bar(&sc->vsc_vs, 0); 684 685 sc->resetting = 0; 686 687 sc->rx_merge = 0; 688 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 689 pthread_mutex_init(&sc->rx_mtx, NULL); 690 691 /* 692 * Initialize tx semaphore & spawn TX processing thread. 693 * As of now, only one thread for TX desc processing is 694 * spawned. 695 */ 696 sc->tx_in_progress = 0; 697 pthread_mutex_init(&sc->tx_mtx, NULL); 698 pthread_cond_init(&sc->tx_cond, NULL); 699 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 700 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 701 pi->pi_func); 702 pthread_set_name_np(sc->tx_tid, tname); 703 704 return (0); 705 } 706 707 static int 708 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 709 { 710 struct pci_vtnet_softc *sc = vsc; 711 void *ptr; 712 713 if (offset < (int)sizeof(sc->vsc_config.mac)) { 714 assert(offset + size <= (int)sizeof(sc->vsc_config.mac)); 715 /* 716 * The driver is allowed to change the MAC address 717 */ 718 ptr = &sc->vsc_config.mac[offset]; 719 memcpy(ptr, &value, size); 720 } else { 721 /* silently ignore other writes */ 722 DPRINTF(("vtnet: write to readonly reg %d", offset)); 723 } 724 725 return (0); 726 } 727 728 static int 729 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 730 { 731 struct pci_vtnet_softc *sc = vsc; 732 void *ptr; 733 734 ptr = (uint8_t *)&sc->vsc_config + offset; 735 memcpy(retval, ptr, size); 736 return (0); 737 } 738 739 static void 740 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 741 { 742 struct pci_vtnet_softc *sc = vsc; 743 744 sc->vsc_features = negotiated_features; 745 746 if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) { 747 sc->vhdrlen = sizeof(struct virtio_net_rxhdr); 748 sc->rx_merge = 1; 749 } else { 750 /* 751 * Without mergeable rx buffers, virtio-net header is 2 752 * bytes shorter than sizeof(struct virtio_net_rxhdr). 753 */ 754 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; 755 sc->rx_merge = 0; 756 } 757 758 /* Tell the backend to enable some capabilities it has advertised. */ 759 netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); 760 sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); 761 assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); 762 763 pthread_mutex_lock(&sc->rx_mtx); 764 sc->features_negotiated = true; 765 pthread_mutex_unlock(&sc->rx_mtx); 766 } 767 768 static struct pci_devemu pci_de_vnet = { 769 .pe_emu = "virtio-net", 770 .pe_init = pci_vtnet_init, 771 .pe_legacy_config = netbe_legacy_config, 772 .pe_barwrite = vi_pci_write, 773 .pe_barread = vi_pci_read, 774 }; 775 PCI_EMUL_SET(pci_de_vnet); 776