1 /* 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * This file and its contents are supplied under the terms of the 28 * Common Development and Distribution License ("CDDL"), version 1.0. 29 * You may only use this file in accordance with the terms of version 30 * 1.0 of the CDDL. 31 * 32 * A full copy of the text of the CDDL should have accompanied this 33 * source. A copy of the CDDL is also available via the Internet at 34 * http://www.illumos.org/license/CDDL. 35 * 36 * Copyright 2015 Pluribus Networks Inc. 37 * Copyright 2019 Joyent, Inc. 38 */ 39 40 #include <sys/cdefs.h> 41 42 #include <sys/param.h> 43 #include <sys/linker_set.h> 44 #include <sys/ioctl.h> 45 #include <sys/viona_io.h> 46 47 #include <errno.h> 48 #include <fcntl.h> 49 #include <stdio.h> 50 #include <stdlib.h> 51 #include <stdint.h> 52 #include <string.h> 53 #include <strings.h> 54 #include <unistd.h> 55 #include <assert.h> 56 #include <pthread.h> 57 #include <signal.h> 58 #include <poll.h> 59 #include <libdladm.h> 60 #include <libdllink.h> 61 #include <libdlvnic.h> 62 63 #include <machine/vmm.h> 64 #include <vmmapi.h> 65 66 #include "bhyverun.h" 67 #include "config.h" 68 #include "pci_emul.h" 69 #include "virtio.h" 70 71 #define VIONA_RINGSZ 1024 72 73 /* 74 * PCI config-space register offsets 75 */ 76 #define VIONA_R_CFG0 24 77 #define VIONA_R_CFG1 25 78 #define VIONA_R_CFG2 26 79 #define VIONA_R_CFG3 27 80 #define VIONA_R_CFG4 28 81 #define VIONA_R_CFG5 29 82 #define VIONA_R_CFG6 30 83 #define VIONA_R_CFG7 31 84 #define VIONA_R_MAX 31 85 86 #define VIONA_REGSZ VIONA_R_MAX+1 87 88 /* 89 * Queue definitions. 90 */ 91 #define VIONA_RXQ 0 92 #define VIONA_TXQ 1 93 #define VIONA_CTLQ 2 94 95 #define VIONA_MAXQ 3 96 97 /* 98 * Debug printf 99 */ 100 static volatile int pci_viona_debug; 101 #define DPRINTF(params) if (pci_viona_debug) printf params 102 #define WPRINTF(params) printf params 103 104 /* 105 * Per-device softc 106 */ 107 struct pci_viona_softc { 108 struct pci_devinst *vsc_pi; 109 pthread_mutex_t vsc_mtx; 110 111 int vsc_curq; 112 int vsc_status; 113 int vsc_isr; 114 115 datalink_id_t vsc_linkid; 116 int vsc_vnafd; 117 118 /* Configurable parameters */ 119 char vsc_linkname[MAXLINKNAMELEN]; 120 uint32_t vsc_feature_mask; 121 uint16_t vsc_vq_size; 122 123 uint32_t vsc_features; 124 uint8_t vsc_macaddr[6]; 125 126 uint64_t vsc_pfn[VIONA_MAXQ]; 127 uint16_t vsc_msix_table_idx[VIONA_MAXQ]; 128 boolean_t vsc_msix_active; 129 }; 130 131 /* 132 * Return the size of IO BAR that maps virtio header and device specific 133 * region. The size would vary depending on whether MSI-X is enabled or 134 * not. 135 */ 136 static uint64_t 137 pci_viona_iosize(struct pci_devinst *pi) 138 { 139 if (pci_msix_enabled(pi)) { 140 return (VIONA_REGSZ); 141 } else { 142 return (VIONA_REGSZ - 143 (VIRTIO_PCI_CONFIG_OFF(1) - VIRTIO_PCI_CONFIG_OFF(0))); 144 } 145 } 146 147 static uint16_t 148 pci_viona_qsize(struct pci_viona_softc *sc, int qnum) 149 { 150 /* XXX no ctl queue currently */ 151 if (qnum == VIONA_CTLQ) { 152 return (0); 153 } 154 155 return (sc->vsc_vq_size); 156 } 157 158 static void 159 pci_viona_ring_reset(struct pci_viona_softc *sc, int ring) 160 { 161 assert(ring < VIONA_MAXQ); 162 163 switch (ring) { 164 case VIONA_RXQ: 165 case VIONA_TXQ: 166 break; 167 case VIONA_CTLQ: 168 default: 169 return; 170 } 171 172 for (;;) { 173 int res; 174 175 res = ioctl(sc->vsc_vnafd, VNA_IOC_RING_RESET, ring); 176 if (res == 0) { 177 break; 178 } else if (errno != EINTR) { 179 WPRINTF(("ioctl viona ring %d reset failed %d\n", 180 ring, errno)); 181 return; 182 } 183 } 184 185 sc->vsc_pfn[ring] = 0; 186 } 187 188 static void 189 pci_viona_update_status(struct pci_viona_softc *sc, uint32_t value) 190 { 191 192 if (value == 0) { 193 DPRINTF(("viona: device reset requested !\n")); 194 pci_viona_ring_reset(sc, VIONA_RXQ); 195 pci_viona_ring_reset(sc, VIONA_TXQ); 196 } 197 198 sc->vsc_status = value; 199 } 200 201 static void * 202 pci_viona_poll_thread(void *param) 203 { 204 struct pci_viona_softc *sc = param; 205 pollfd_t pollset; 206 const int fd = sc->vsc_vnafd; 207 208 pollset.fd = fd; 209 pollset.events = POLLRDBAND; 210 211 for (;;) { 212 if (poll(&pollset, 1, -1) < 0) { 213 if (errno == EINTR || errno == EAGAIN) { 214 continue; 215 } else { 216 WPRINTF(("pci_viona_poll_thread poll()" 217 "error %d\n", errno)); 218 break; 219 } 220 } 221 if (pollset.revents & POLLRDBAND) { 222 vioc_intr_poll_t vip; 223 uint_t i; 224 int res; 225 boolean_t assert_lintr = B_FALSE; 226 const boolean_t do_msix = pci_msix_enabled(sc->vsc_pi); 227 228 res = ioctl(fd, VNA_IOC_INTR_POLL, &vip); 229 for (i = 0; res > 0 && i < VIONA_VQ_MAX; i++) { 230 if (vip.vip_status[i] == 0) { 231 continue; 232 } 233 if (do_msix) { 234 pci_generate_msix(sc->vsc_pi, 235 sc->vsc_msix_table_idx[i]); 236 } else { 237 assert_lintr = B_TRUE; 238 } 239 res = ioctl(fd, VNA_IOC_RING_INTR_CLR, i); 240 if (res != 0) { 241 WPRINTF(("ioctl viona vq %d intr " 242 "clear failed %d\n", i, errno)); 243 } 244 } 245 if (assert_lintr) { 246 pthread_mutex_lock(&sc->vsc_mtx); 247 sc->vsc_isr |= VIRTIO_PCI_ISR_INTR; 248 pci_lintr_assert(sc->vsc_pi); 249 pthread_mutex_unlock(&sc->vsc_mtx); 250 } 251 } 252 } 253 254 pthread_exit(NULL); 255 } 256 257 static void 258 pci_viona_ring_init(struct pci_viona_softc *sc, uint64_t pfn) 259 { 260 int qnum = sc->vsc_curq; 261 vioc_ring_init_t vna_ri; 262 int error; 263 264 assert(qnum < VIONA_MAXQ); 265 266 if (qnum == VIONA_CTLQ) { 267 return; 268 } 269 270 sc->vsc_pfn[qnum] = (pfn << VRING_PFN); 271 272 vna_ri.ri_index = qnum; 273 vna_ri.ri_qsize = pci_viona_qsize(sc, qnum); 274 vna_ri.ri_qaddr = (pfn << VRING_PFN); 275 error = ioctl(sc->vsc_vnafd, VNA_IOC_RING_INIT, &vna_ri); 276 277 if (error != 0) { 278 WPRINTF(("ioctl viona ring %u init failed %d\n", qnum, errno)); 279 } 280 } 281 282 static int 283 pci_viona_viona_init(struct vmctx *ctx, struct pci_viona_softc *sc) 284 { 285 vioc_create_t vna_create; 286 int error; 287 288 sc->vsc_vnafd = open("/dev/viona", O_RDWR | O_EXCL); 289 if (sc->vsc_vnafd == -1) { 290 WPRINTF(("open viona ctl failed: %d\n", errno)); 291 return (-1); 292 } 293 294 vna_create.c_linkid = sc->vsc_linkid; 295 vna_create.c_vmfd = vm_get_device_fd(ctx); 296 error = ioctl(sc->vsc_vnafd, VNA_IOC_CREATE, &vna_create); 297 if (error != 0) { 298 (void) close(sc->vsc_vnafd); 299 WPRINTF(("ioctl viona create failed %d\n", errno)); 300 return (-1); 301 } 302 303 return (0); 304 } 305 306 static int 307 pci_viona_legacy_config(nvlist_t *nvl, const char *opt) 308 { 309 char *config, *name, *tofree, *value; 310 311 if (opt == NULL) 312 return (0); 313 314 config = tofree = strdup(opt); 315 while ((name = strsep(&config, ",")) != NULL) { 316 value = strchr(name, '='); 317 if (value != NULL) { 318 *value++ = '\0'; 319 set_config_value_node(nvl, name, value); 320 } else { 321 set_config_value_node(nvl, "vnic", name); 322 } 323 } 324 free(tofree); 325 return (0); 326 } 327 328 static int 329 pci_viona_parse_opts(struct pci_viona_softc *sc, nvlist_t *nvl) 330 { 331 const char *value; 332 int err = 0; 333 334 sc->vsc_vq_size = VIONA_RINGSZ; 335 sc->vsc_feature_mask = 0; 336 sc->vsc_linkname[0] = '\0'; 337 338 value = get_config_value_node(nvl, "feature_mask"); 339 if (value != NULL) { 340 long num; 341 342 errno = 0; 343 num = strtol(value, NULL, 0); 344 if (errno != 0 || num < 0) { 345 fprintf(stderr, 346 "viona: invalid mask '%s'", value); 347 } else { 348 sc->vsc_feature_mask = num; 349 } 350 } 351 352 value = get_config_value_node(nvl, "vqsize"); 353 if (value != NULL) { 354 long num; 355 356 errno = 0; 357 num = strtol(value, NULL, 0); 358 if (errno != 0) { 359 fprintf(stderr, 360 "viona: invalid vsqize '%s'", value); 361 err = -1; 362 } else if (num <= 2 || num > 32768) { 363 fprintf(stderr, 364 "viona: vqsize out of range", num); 365 err = -1; 366 } else if ((1 << (ffs(num) - 1)) != num) { 367 fprintf(stderr, 368 "viona: vqsize must be power of 2", num); 369 err = -1; 370 } else { 371 sc->vsc_vq_size = num; 372 } 373 } 374 375 value = get_config_value_node(nvl, "vnic"); 376 if (value == NULL) { 377 fprintf(stderr, "viona: vnic name required"); 378 err = -1; 379 } else { 380 (void) strlcpy(sc->vsc_linkname, value, MAXLINKNAMELEN); 381 } 382 383 DPRINTF(("viona=%p dev=%s vqsize=%x feature_mask=%x\n", sc, 384 sc->vsc_linkname, sc->vsc_vq_size, sc->vsc_feature_mask)); 385 return (err); 386 } 387 388 static int 389 pci_viona_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl) 390 { 391 dladm_handle_t handle; 392 dladm_status_t status; 393 dladm_vnic_attr_t attr; 394 char errmsg[DLADM_STRSIZE]; 395 char tname[MAXCOMLEN + 1]; 396 int error, i; 397 struct pci_viona_softc *sc; 398 uint64_t ioport; 399 const char *vnic; 400 pthread_t tid; 401 402 vnic = get_config_value_node(nvl, "vnic"); 403 if (vnic == NULL) { 404 printf("virtio-viona: vnic required\n"); 405 return (1); 406 } 407 408 sc = malloc(sizeof (struct pci_viona_softc)); 409 memset(sc, 0, sizeof (struct pci_viona_softc)); 410 411 pi->pi_arg = sc; 412 sc->vsc_pi = pi; 413 414 pthread_mutex_init(&sc->vsc_mtx, NULL); 415 416 if (pci_viona_parse_opts(sc, nvl) != 0) { 417 free(sc); 418 return (1); 419 } 420 421 if ((status = dladm_open(&handle)) != DLADM_STATUS_OK) { 422 WPRINTF(("could not open /dev/dld")); 423 free(sc); 424 return (1); 425 } 426 427 if ((status = dladm_name2info(handle, sc->vsc_linkname, &sc->vsc_linkid, 428 NULL, NULL, NULL)) != DLADM_STATUS_OK) { 429 WPRINTF(("dladm_name2info() for %s failed: %s\n", vnic, 430 dladm_status2str(status, errmsg))); 431 dladm_close(handle); 432 free(sc); 433 return (1); 434 } 435 436 if ((status = dladm_vnic_info(handle, sc->vsc_linkid, &attr, 437 DLADM_OPT_ACTIVE)) != DLADM_STATUS_OK) { 438 WPRINTF(("dladm_vnic_info() for %s failed: %s\n", vnic, 439 dladm_status2str(status, errmsg))); 440 dladm_close(handle); 441 free(sc); 442 return (1); 443 } 444 445 memcpy(sc->vsc_macaddr, attr.va_mac_addr, ETHERADDRL); 446 447 dladm_close(handle); 448 449 error = pci_viona_viona_init(ctx, sc); 450 if (error != 0) { 451 free(sc); 452 return (1); 453 } 454 455 error = pthread_create(&tid, NULL, pci_viona_poll_thread, sc); 456 assert(error == 0); 457 snprintf(tname, sizeof (tname), "vionapoll:%s", vnic); 458 pthread_set_name_np(tid, tname); 459 460 /* initialize config space */ 461 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 462 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 463 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 464 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK); 465 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 466 467 /* MSI-X support */ 468 for (i = 0; i < VIONA_MAXQ; i++) 469 sc->vsc_msix_table_idx[i] = VIRTIO_MSI_NO_VECTOR; 470 471 /* BAR 1 used to map MSI-X table and PBA */ 472 if (pci_emul_add_msixcap(pi, VIONA_MAXQ, 1)) { 473 free(sc); 474 return (1); 475 } 476 477 /* BAR 0 for legacy-style virtio register access. */ 478 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VIONA_REGSZ); 479 if (error != 0) { 480 WPRINTF(("could not allocate virtio BAR\n")); 481 free(sc); 482 return (1); 483 } 484 485 /* Install ioport hook for virtqueue notification */ 486 ioport = pi->pi_bar[0].addr + VIRTIO_PCI_QUEUE_NOTIFY; 487 error = ioctl(sc->vsc_vnafd, VNA_IOC_SET_NOTIFY_IOP, ioport); 488 if (error != 0) { 489 WPRINTF(("could not install ioport hook at %x\n", ioport)); 490 free(sc); 491 return (1); 492 } 493 494 /* 495 * Need a legacy interrupt for virtio compliance, even though MSI-X 496 * operation is _strongly_ suggested for adequate performance. 497 */ 498 pci_lintr_request(pi); 499 500 return (0); 501 } 502 503 static uint64_t 504 viona_adjust_offset(struct pci_devinst *pi, uint64_t offset) 505 { 506 /* 507 * Device specific offsets used by guest would change based on 508 * whether MSI-X capability is enabled or not 509 */ 510 if (!pci_msix_enabled(pi)) { 511 if (offset >= VIRTIO_PCI_CONFIG_OFF(0)) { 512 return (offset + (VIRTIO_PCI_CONFIG_OFF(1) - 513 VIRTIO_PCI_CONFIG_OFF(0))); 514 } 515 } 516 517 return (offset); 518 } 519 520 static void 521 pci_viona_ring_set_msix(struct pci_devinst *pi, uint_t ring) 522 { 523 struct pci_viona_softc *sc = pi->pi_arg; 524 struct msix_table_entry mte; 525 uint16_t tab_index; 526 vioc_ring_msi_t vrm; 527 int res; 528 529 assert(ring <= VIONA_VQ_TX); 530 531 vrm.rm_index = ring; 532 vrm.rm_addr = 0; 533 vrm.rm_msg = 0; 534 tab_index = sc->vsc_msix_table_idx[ring]; 535 536 if (tab_index != VIRTIO_MSI_NO_VECTOR && sc->vsc_msix_active) { 537 mte = pi->pi_msix.table[tab_index]; 538 if ((mte.vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 539 vrm.rm_addr = mte.addr; 540 vrm.rm_msg = mte.msg_data; 541 } 542 } 543 544 res = ioctl(sc->vsc_vnafd, VNA_IOC_RING_SET_MSI, &vrm); 545 if (res != 0) { 546 WPRINTF(("ioctl viona set_msi %d failed %d\n", ring, errno)); 547 } 548 } 549 550 static void 551 pci_viona_lintrupdate(struct pci_devinst *pi) 552 { 553 struct pci_viona_softc *sc = pi->pi_arg; 554 boolean_t msix_on = B_FALSE; 555 556 pthread_mutex_lock(&sc->vsc_mtx); 557 msix_on = pci_msix_enabled(pi) && (pi->pi_msix.function_mask == 0); 558 if ((sc->vsc_msix_active && !msix_on) || 559 (msix_on && !sc->vsc_msix_active)) { 560 uint_t i; 561 562 sc->vsc_msix_active = msix_on; 563 /* Update in-kernel ring configs */ 564 for (i = 0; i <= VIONA_VQ_TX; i++) { 565 pci_viona_ring_set_msix(pi, i); 566 } 567 } 568 pthread_mutex_unlock(&sc->vsc_mtx); 569 } 570 571 static void 572 pci_viona_msix_update(struct pci_devinst *pi, uint64_t offset) 573 { 574 struct pci_viona_softc *sc = pi->pi_arg; 575 uint_t tab_index, i; 576 577 pthread_mutex_lock(&sc->vsc_mtx); 578 if (!sc->vsc_msix_active) { 579 pthread_mutex_unlock(&sc->vsc_mtx); 580 return; 581 } 582 583 /* 584 * Rather than update every possible MSI-X vector, cheat and use the 585 * offset to calculate the entry within the table. Since this should 586 * only be called when a write to the table succeeds, the index should 587 * be valid. 588 */ 589 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 590 591 for (i = 0; i <= VIONA_VQ_TX; i++) { 592 if (sc->vsc_msix_table_idx[i] != tab_index) { 593 continue; 594 } 595 pci_viona_ring_set_msix(pi, i); 596 } 597 598 pthread_mutex_unlock(&sc->vsc_mtx); 599 } 600 601 static void 602 pci_viona_qnotify(struct pci_viona_softc *sc, int ring) 603 { 604 int error; 605 606 switch (ring) { 607 case VIONA_TXQ: 608 case VIONA_RXQ: 609 error = ioctl(sc->vsc_vnafd, VNA_IOC_RING_KICK, ring); 610 if (error != 0) { 611 WPRINTF(("ioctl viona ring %d kick failed %d\n", 612 ring, errno)); 613 } 614 break; 615 case VIONA_CTLQ: 616 DPRINTF(("viona: control qnotify!\n")); 617 break; 618 default: 619 break; 620 } 621 } 622 623 static void 624 pci_viona_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 625 int baridx, uint64_t offset, int size, uint64_t value) 626 { 627 struct pci_viona_softc *sc = pi->pi_arg; 628 void *ptr; 629 int err = 0; 630 631 if (baridx == pci_msix_table_bar(pi) || 632 baridx == pci_msix_pba_bar(pi)) { 633 if (pci_emul_msix_twrite(pi, offset, size, value) == 0) { 634 pci_viona_msix_update(pi, offset); 635 } 636 return; 637 } 638 639 assert(baridx == 0); 640 641 if (offset + size > pci_viona_iosize(pi)) { 642 DPRINTF(("viona_write: 2big, offset %ld size %d\n", 643 offset, size)); 644 return; 645 } 646 647 pthread_mutex_lock(&sc->vsc_mtx); 648 649 offset = viona_adjust_offset(pi, offset); 650 651 switch (offset) { 652 case VIRTIO_PCI_GUEST_FEATURES: 653 assert(size == 4); 654 value &= ~(sc->vsc_feature_mask); 655 err = ioctl(sc->vsc_vnafd, VNA_IOC_SET_FEATURES, &value); 656 if (err != 0) { 657 WPRINTF(("ioctl feature negotiation returned" 658 " err = %d\n", errno)); 659 } else { 660 sc->vsc_features = value; 661 } 662 break; 663 case VIRTIO_PCI_QUEUE_PFN: 664 assert(size == 4); 665 pci_viona_ring_init(sc, value); 666 break; 667 case VIRTIO_PCI_QUEUE_SEL: 668 assert(size == 2); 669 assert(value < VIONA_MAXQ); 670 sc->vsc_curq = value; 671 break; 672 case VIRTIO_PCI_QUEUE_NOTIFY: 673 assert(size == 2); 674 assert(value < VIONA_MAXQ); 675 pci_viona_qnotify(sc, value); 676 break; 677 case VIRTIO_PCI_STATUS: 678 assert(size == 1); 679 pci_viona_update_status(sc, value); 680 break; 681 case VIRTIO_MSI_CONFIG_VECTOR: 682 assert(size == 2); 683 sc->vsc_msix_table_idx[VIONA_CTLQ] = value; 684 break; 685 case VIRTIO_MSI_QUEUE_VECTOR: 686 assert(size == 2); 687 assert(sc->vsc_curq != VIONA_CTLQ); 688 sc->vsc_msix_table_idx[sc->vsc_curq] = value; 689 pci_viona_ring_set_msix(pi, sc->vsc_curq); 690 break; 691 case VIONA_R_CFG0: 692 case VIONA_R_CFG1: 693 case VIONA_R_CFG2: 694 case VIONA_R_CFG3: 695 case VIONA_R_CFG4: 696 case VIONA_R_CFG5: 697 assert((size + offset) <= (VIONA_R_CFG5 + 1)); 698 ptr = &sc->vsc_macaddr[offset - VIONA_R_CFG0]; 699 /* 700 * The driver is allowed to change the MAC address 701 */ 702 sc->vsc_macaddr[offset - VIONA_R_CFG0] = value; 703 if (size == 1) { 704 *(uint8_t *)ptr = value; 705 } else if (size == 2) { 706 *(uint16_t *)ptr = value; 707 } else { 708 *(uint32_t *)ptr = value; 709 } 710 break; 711 case VIRTIO_PCI_HOST_FEATURES: 712 case VIRTIO_PCI_QUEUE_NUM: 713 case VIRTIO_PCI_ISR: 714 case VIONA_R_CFG6: 715 case VIONA_R_CFG7: 716 DPRINTF(("viona: write to readonly reg %ld\n\r", offset)); 717 break; 718 default: 719 DPRINTF(("viona: unknown i/o write offset %ld\n\r", offset)); 720 value = 0; 721 break; 722 } 723 724 pthread_mutex_unlock(&sc->vsc_mtx); 725 } 726 727 static uint64_t 728 pci_viona_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 729 int baridx, uint64_t offset, int size) 730 { 731 struct pci_viona_softc *sc = pi->pi_arg; 732 void *ptr; 733 uint64_t value; 734 int err = 0; 735 736 if (baridx == pci_msix_table_bar(pi) || 737 baridx == pci_msix_pba_bar(pi)) { 738 return (pci_emul_msix_tread(pi, offset, size)); 739 } 740 741 assert(baridx == 0); 742 743 if (offset + size > pci_viona_iosize(pi)) { 744 DPRINTF(("viona_read: 2big, offset %ld size %d\n", 745 offset, size)); 746 return (0); 747 } 748 749 pthread_mutex_lock(&sc->vsc_mtx); 750 751 offset = viona_adjust_offset(pi, offset); 752 753 switch (offset) { 754 case VIRTIO_PCI_HOST_FEATURES: 755 assert(size == 4); 756 err = ioctl(sc->vsc_vnafd, VNA_IOC_GET_FEATURES, &value); 757 if (err != 0) { 758 WPRINTF(("ioctl get host features returned" 759 " err = %d\n", errno)); 760 } 761 value &= ~sc->vsc_feature_mask; 762 break; 763 case VIRTIO_PCI_GUEST_FEATURES: 764 assert(size == 4); 765 value = sc->vsc_features; /* XXX never read ? */ 766 break; 767 case VIRTIO_PCI_QUEUE_PFN: 768 assert(size == 4); 769 value = sc->vsc_pfn[sc->vsc_curq] >> VRING_PFN; 770 break; 771 case VIRTIO_PCI_QUEUE_NUM: 772 assert(size == 2); 773 value = pci_viona_qsize(sc, sc->vsc_curq); 774 break; 775 case VIRTIO_PCI_QUEUE_SEL: 776 assert(size == 2); 777 value = sc->vsc_curq; /* XXX never read ? */ 778 break; 779 case VIRTIO_PCI_QUEUE_NOTIFY: 780 assert(size == 2); 781 value = sc->vsc_curq; /* XXX never read ? */ 782 break; 783 case VIRTIO_PCI_STATUS: 784 assert(size == 1); 785 value = sc->vsc_status; 786 break; 787 case VIRTIO_PCI_ISR: 788 assert(size == 1); 789 value = sc->vsc_isr; 790 sc->vsc_isr = 0; /* a read clears this flag */ 791 if (value != 0) { 792 pci_lintr_deassert(pi); 793 } 794 break; 795 case VIRTIO_MSI_CONFIG_VECTOR: 796 assert(size == 2); 797 value = sc->vsc_msix_table_idx[VIONA_CTLQ]; 798 break; 799 case VIRTIO_MSI_QUEUE_VECTOR: 800 assert(size == 2); 801 assert(sc->vsc_curq != VIONA_CTLQ); 802 value = sc->vsc_msix_table_idx[sc->vsc_curq]; 803 break; 804 case VIONA_R_CFG0: 805 case VIONA_R_CFG1: 806 case VIONA_R_CFG2: 807 case VIONA_R_CFG3: 808 case VIONA_R_CFG4: 809 case VIONA_R_CFG5: 810 assert((size + offset) <= (VIONA_R_CFG5 + 1)); 811 ptr = &sc->vsc_macaddr[offset - VIONA_R_CFG0]; 812 if (size == 1) { 813 value = *(uint8_t *)ptr; 814 } else if (size == 2) { 815 value = *(uint16_t *)ptr; 816 } else { 817 value = *(uint32_t *)ptr; 818 } 819 break; 820 case VIONA_R_CFG6: 821 assert(size != 4); 822 value = 0x01; /* XXX link always up */ 823 break; 824 case VIONA_R_CFG7: 825 assert(size == 1); 826 value = 0; /* XXX link status in LSB */ 827 break; 828 default: 829 DPRINTF(("viona: unknown i/o read offset %ld\n\r", offset)); 830 value = 0; 831 break; 832 } 833 834 pthread_mutex_unlock(&sc->vsc_mtx); 835 836 return (value); 837 } 838 839 struct pci_devemu pci_de_viona = { 840 .pe_emu = "virtio-net-viona", 841 .pe_init = pci_viona_init, 842 .pe_legacy_config = pci_viona_legacy_config, 843 .pe_barwrite = pci_viona_write, 844 .pe_barread = pci_viona_read, 845 .pe_lintrupdate = pci_viona_lintrupdate 846 }; 847 PCI_EMUL_SET(pci_de_viona); 848