1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 Chris Torek <torek @ torek net> 5 * All rights reserved. 6 * Copyright (c) 2019 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/uio.h> 35 36 #include <machine/atomic.h> 37 38 #include <stdio.h> 39 #include <stdint.h> 40 #include <pthread.h> 41 #include <pthread_np.h> 42 43 #include "bhyverun.h" 44 #include "debug.h" 45 #include "pci_emul.h" 46 #include "virtio.h" 47 48 /* 49 * Functions for dealing with generalized "virtual devices" as 50 * defined by <https://www.google.com/#output=search&q=virtio+spec> 51 */ 52 53 /* 54 * In case we decide to relax the "virtio softc comes at the 55 * front of virtio-based device softc" constraint, let's use 56 * this to convert. 57 */ 58 #define DEV_SOFTC(vs) ((void *)(vs)) 59 60 /* 61 * Link a virtio_softc to its constants, the device softc, and 62 * the PCI emulation. 63 */ 64 void 65 vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc, 66 void *dev_softc, struct pci_devinst *pi, 67 struct vqueue_info *queues) 68 { 69 int i; 70 71 /* vs and dev_softc addresses must match */ 72 assert((void *)vs == dev_softc); 73 vs->vs_vc = vc; 74 vs->vs_pi = pi; 75 pi->pi_arg = vs; 76 77 vs->vs_queues = queues; 78 for (i = 0; i < vc->vc_nvq; i++) { 79 queues[i].vq_vs = vs; 80 queues[i].vq_num = i; 81 } 82 } 83 84 /* 85 * Reset device (device-wide). This erases all queues, i.e., 86 * all the queues become invalid (though we don't wipe out the 87 * internal pointers, we just clear the VQ_ALLOC flag). 88 * 89 * It resets negotiated features to "none". 90 * 91 * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR. 92 */ 93 void 94 vi_reset_dev(struct virtio_softc *vs) 95 { 96 struct vqueue_info *vq; 97 int i, nvq; 98 99 if (vs->vs_mtx) 100 assert(pthread_mutex_isowned_np(vs->vs_mtx)); 101 102 nvq = vs->vs_vc->vc_nvq; 103 for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) { 104 vq->vq_flags = 0; 105 vq->vq_last_avail = 0; 106 vq->vq_next_used = 0; 107 vq->vq_save_used = 0; 108 vq->vq_pfn = 0; 109 vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR; 110 } 111 vs->vs_negotiated_caps = 0; 112 vs->vs_curq = 0; 113 /* vs->vs_status = 0; -- redundant */ 114 if (vs->vs_isr) 115 pci_lintr_deassert(vs->vs_pi); 116 vs->vs_isr = 0; 117 vs->vs_msix_cfg_idx = VIRTIO_MSI_NO_VECTOR; 118 } 119 120 /* 121 * Set I/O BAR (usually 0) to map PCI config registers. 122 */ 123 void 124 vi_set_io_bar(struct virtio_softc *vs, int barnum) 125 { 126 size_t size; 127 128 /* 129 * ??? should we use CFG0 if MSI-X is disabled? 130 * Existing code did not... 131 */ 132 size = VTCFG_R_CFG1 + vs->vs_vc->vc_cfgsize; 133 pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size); 134 } 135 136 /* 137 * Initialize MSI-X vector capabilities if we're to use MSI-X, 138 * or MSI capabilities if not. 139 * 140 * We assume we want one MSI-X vector per queue, here, plus one 141 * for the config vec. 142 */ 143 int 144 vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix) 145 { 146 int nvec; 147 148 if (use_msix) { 149 vs->vs_flags |= VIRTIO_USE_MSIX; 150 VS_LOCK(vs); 151 vi_reset_dev(vs); /* set all vectors to NO_VECTOR */ 152 VS_UNLOCK(vs); 153 nvec = vs->vs_vc->vc_nvq + 1; 154 if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum)) 155 return (1); 156 } else 157 vs->vs_flags &= ~VIRTIO_USE_MSIX; 158 159 /* Only 1 MSI vector for bhyve */ 160 pci_emul_add_msicap(vs->vs_pi, 1); 161 162 /* Legacy interrupts are mandatory for virtio devices */ 163 pci_lintr_request(vs->vs_pi); 164 165 return (0); 166 } 167 168 /* 169 * Initialize the currently-selected virtio queue (vs->vs_curq). 170 * The guest just gave us a page frame number, from which we can 171 * calculate the addresses of the queue. 172 */ 173 void 174 vi_vq_init(struct virtio_softc *vs, uint32_t pfn) 175 { 176 struct vqueue_info *vq; 177 uint64_t phys; 178 size_t size; 179 char *base; 180 181 vq = &vs->vs_queues[vs->vs_curq]; 182 vq->vq_pfn = pfn; 183 phys = (uint64_t)pfn << VRING_PFN; 184 size = vring_size(vq->vq_qsize); 185 base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size); 186 187 /* First page(s) are descriptors... */ 188 vq->vq_desc = (struct virtio_desc *)base; 189 base += vq->vq_qsize * sizeof(struct virtio_desc); 190 191 /* ... immediately followed by "avail" ring (entirely uint16_t's) */ 192 vq->vq_avail = (struct vring_avail *)base; 193 base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t); 194 195 /* Then it's rounded up to the next page... */ 196 base = (char *)roundup2((uintptr_t)base, VRING_ALIGN); 197 198 /* ... and the last page(s) are the used ring. */ 199 vq->vq_used = (struct vring_used *)base; 200 201 /* Mark queue as allocated, and start at 0 when we use it. */ 202 vq->vq_flags = VQ_ALLOC; 203 vq->vq_last_avail = 0; 204 vq->vq_next_used = 0; 205 vq->vq_save_used = 0; 206 } 207 208 /* 209 * Helper inline for vq_getchain(): record the i'th "real" 210 * descriptor. 211 */ 212 static inline void 213 _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx, 214 struct iovec *iov, int n_iov, uint16_t *flags) { 215 216 if (i >= n_iov) 217 return; 218 iov[i].iov_base = paddr_guest2host(ctx, vd->vd_addr, vd->vd_len); 219 iov[i].iov_len = vd->vd_len; 220 if (flags != NULL) 221 flags[i] = vd->vd_flags; 222 } 223 #define VQ_MAX_DESCRIPTORS 512 /* see below */ 224 225 /* 226 * Examine the chain of descriptors starting at the "next one" to 227 * make sure that they describe a sensible request. If so, return 228 * the number of "real" descriptors that would be needed/used in 229 * acting on this request. This may be smaller than the number of 230 * available descriptors, e.g., if there are two available but 231 * they are two separate requests, this just returns 1. Or, it 232 * may be larger: if there are indirect descriptors involved, 233 * there may only be one descriptor available but it may be an 234 * indirect pointing to eight more. We return 8 in this case, 235 * i.e., we do not count the indirect descriptors, only the "real" 236 * ones. 237 * 238 * Basically, this vets the vd_flags and vd_next field of each 239 * descriptor and tells you how many are involved. Since some may 240 * be indirect, this also needs the vmctx (in the pci_devinst 241 * at vs->vs_pi) so that it can find indirect descriptors. 242 * 243 * As we process each descriptor, we copy and adjust it (guest to 244 * host address wise, also using the vmtctx) into the given iov[] 245 * array (of the given size). If the array overflows, we stop 246 * placing values into the array but keep processing descriptors, 247 * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1. 248 * So you, the caller, must not assume that iov[] is as big as the 249 * return value (you can process the same thing twice to allocate 250 * a larger iov array if needed, or supply a zero length to find 251 * out how much space is needed). 252 * 253 * If you want to verify the WRITE flag on each descriptor, pass a 254 * non-NULL "flags" pointer to an array of "uint16_t" of the same size 255 * as n_iov and we'll copy each vd_flags field after unwinding any 256 * indirects. 257 * 258 * If some descriptor(s) are invalid, this prints a diagnostic message 259 * and returns -1. If no descriptors are ready now it simply returns 0. 260 * 261 * You are assumed to have done a vq_ring_ready() if needed (note 262 * that vq_has_descs() does one). 263 */ 264 int 265 vq_getchain(struct vqueue_info *vq, uint16_t *pidx, 266 struct iovec *iov, int n_iov, uint16_t *flags) 267 { 268 int i; 269 u_int ndesc, n_indir; 270 u_int idx, next; 271 volatile struct virtio_desc *vdir, *vindir, *vp; 272 struct vmctx *ctx; 273 struct virtio_softc *vs; 274 const char *name; 275 276 vs = vq->vq_vs; 277 name = vs->vs_vc->vc_name; 278 279 /* 280 * Note: it's the responsibility of the guest not to 281 * update vq->vq_avail->va_idx until all of the descriptors 282 * the guest has written are valid (including all their 283 * vd_next fields and vd_flags). 284 * 285 * Compute (va_idx - last_avail) in integers mod 2**16. This is 286 * the number of descriptors the device has made available 287 * since the last time we updated vq->vq_last_avail. 288 * 289 * We just need to do the subtraction as an unsigned int, 290 * then trim off excess bits. 291 */ 292 idx = vq->vq_last_avail; 293 ndesc = (uint16_t)((u_int)vq->vq_avail->va_idx - idx); 294 if (ndesc == 0) 295 return (0); 296 if (ndesc > vq->vq_qsize) { 297 /* XXX need better way to diagnose issues */ 298 EPRINTLN( 299 "%s: ndesc (%u) out of range, driver confused?", 300 name, (u_int)ndesc); 301 return (-1); 302 } 303 304 /* 305 * Now count/parse "involved" descriptors starting from 306 * the head of the chain. 307 * 308 * To prevent loops, we could be more complicated and 309 * check whether we're re-visiting a previously visited 310 * index, but we just abort if the count gets excessive. 311 */ 312 ctx = vs->vs_pi->pi_vmctx; 313 *pidx = next = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)]; 314 vq->vq_last_avail++; 315 for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) { 316 if (next >= vq->vq_qsize) { 317 EPRINTLN( 318 "%s: descriptor index %u out of range, " 319 "driver confused?", 320 name, next); 321 return (-1); 322 } 323 vdir = &vq->vq_desc[next]; 324 if ((vdir->vd_flags & VRING_DESC_F_INDIRECT) == 0) { 325 _vq_record(i, vdir, ctx, iov, n_iov, flags); 326 i++; 327 } else if ((vs->vs_vc->vc_hv_caps & 328 VIRTIO_RING_F_INDIRECT_DESC) == 0) { 329 EPRINTLN( 330 "%s: descriptor has forbidden INDIRECT flag, " 331 "driver confused?", 332 name); 333 return (-1); 334 } else { 335 n_indir = vdir->vd_len / 16; 336 if ((vdir->vd_len & 0xf) || n_indir == 0) { 337 EPRINTLN( 338 "%s: invalid indir len 0x%x, " 339 "driver confused?", 340 name, (u_int)vdir->vd_len); 341 return (-1); 342 } 343 vindir = paddr_guest2host(ctx, 344 vdir->vd_addr, vdir->vd_len); 345 /* 346 * Indirects start at the 0th, then follow 347 * their own embedded "next"s until those run 348 * out. Each one's indirect flag must be off 349 * (we don't really have to check, could just 350 * ignore errors...). 351 */ 352 next = 0; 353 for (;;) { 354 vp = &vindir[next]; 355 if (vp->vd_flags & VRING_DESC_F_INDIRECT) { 356 EPRINTLN( 357 "%s: indirect desc has INDIR flag," 358 " driver confused?", 359 name); 360 return (-1); 361 } 362 _vq_record(i, vp, ctx, iov, n_iov, flags); 363 if (++i > VQ_MAX_DESCRIPTORS) 364 goto loopy; 365 if ((vp->vd_flags & VRING_DESC_F_NEXT) == 0) 366 break; 367 next = vp->vd_next; 368 if (next >= n_indir) { 369 EPRINTLN( 370 "%s: invalid next %u > %u, " 371 "driver confused?", 372 name, (u_int)next, n_indir); 373 return (-1); 374 } 375 } 376 } 377 if ((vdir->vd_flags & VRING_DESC_F_NEXT) == 0) 378 return (i); 379 } 380 loopy: 381 EPRINTLN( 382 "%s: descriptor loop? count > %d - driver confused?", 383 name, i); 384 return (-1); 385 } 386 387 /* 388 * Return the first n_chain request chains back to the available queue. 389 * 390 * (These chains are the ones you handled when you called vq_getchain() 391 * and used its positive return value.) 392 */ 393 void 394 vq_retchains(struct vqueue_info *vq, uint16_t n_chains) 395 { 396 397 vq->vq_last_avail -= n_chains; 398 } 399 400 void 401 vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen) 402 { 403 volatile struct vring_used *vuh; 404 volatile struct virtio_used *vue; 405 uint16_t mask; 406 407 /* 408 * Notes: 409 * - mask is N-1 where N is a power of 2 so computes x % N 410 * - vuh points to the "used" data shared with guest 411 * - vue points to the "used" ring entry we want to update 412 * 413 * (I apologize for the two fields named vu_idx; the 414 * virtio spec calls the one that vue points to, "id"...) 415 */ 416 mask = vq->vq_qsize - 1; 417 vuh = vq->vq_used; 418 419 vue = &vuh->vu_ring[vq->vq_next_used++ & mask]; 420 vue->vu_idx = idx; 421 vue->vu_tlen = iolen; 422 } 423 424 void 425 vq_relchain_publish(struct vqueue_info *vq) 426 { 427 /* 428 * Ensure the used descriptor is visible before updating the index. 429 * This is necessary on ISAs with memory ordering less strict than x86 430 * (and even on x86 to act as a compiler barrier). 431 */ 432 atomic_thread_fence_rel(); 433 vq->vq_used->vu_idx = vq->vq_next_used; 434 } 435 436 /* 437 * Return specified request chain to the guest, setting its I/O length 438 * to the provided value. 439 * 440 * (This chain is the one you handled when you called vq_getchain() 441 * and used its positive return value.) 442 */ 443 void 444 vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen) 445 { 446 vq_relchain_prepare(vq, idx, iolen); 447 vq_relchain_publish(vq); 448 } 449 450 /* 451 * Driver has finished processing "available" chains and calling 452 * vq_relchain on each one. If driver used all the available 453 * chains, used_all should be set. 454 * 455 * If the "used" index moved we may need to inform the guest, i.e., 456 * deliver an interrupt. Even if the used index did NOT move we 457 * may need to deliver an interrupt, if the avail ring is empty and 458 * we are supposed to interrupt on empty. 459 * 460 * Note that used_all_avail is provided by the caller because it's 461 * a snapshot of the ring state when he decided to finish interrupt 462 * processing -- it's possible that descriptors became available after 463 * that point. (It's also typically a constant 1/True as well.) 464 */ 465 void 466 vq_endchains(struct vqueue_info *vq, int used_all_avail) 467 { 468 struct virtio_softc *vs; 469 uint16_t event_idx, new_idx, old_idx; 470 int intr; 471 472 /* 473 * Interrupt generation: if we're using EVENT_IDX, 474 * interrupt if we've crossed the event threshold. 475 * Otherwise interrupt is generated if we added "used" entries, 476 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT. 477 * 478 * In any case, though, if NOTIFY_ON_EMPTY is set and the 479 * entire avail was processed, we need to interrupt always. 480 */ 481 vs = vq->vq_vs; 482 old_idx = vq->vq_save_used; 483 vq->vq_save_used = new_idx = vq->vq_used->vu_idx; 484 485 /* 486 * Use full memory barrier between vu_idx store from preceding 487 * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or 488 * va_flags below. 489 */ 490 atomic_thread_fence_seq_cst(); 491 if (used_all_avail && 492 (vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY)) 493 intr = 1; 494 else if (vs->vs_negotiated_caps & VIRTIO_RING_F_EVENT_IDX) { 495 event_idx = VQ_USED_EVENT_IDX(vq); 496 /* 497 * This calculation is per docs and the kernel 498 * (see src/sys/dev/virtio/virtio_ring.h). 499 */ 500 intr = (uint16_t)(new_idx - event_idx - 1) < 501 (uint16_t)(new_idx - old_idx); 502 } else { 503 intr = new_idx != old_idx && 504 !(vq->vq_avail->va_flags & VRING_AVAIL_F_NO_INTERRUPT); 505 } 506 if (intr) 507 vq_interrupt(vs, vq); 508 } 509 510 /* Note: these are in sorted order to make for a fast search */ 511 static struct config_reg { 512 uint16_t cr_offset; /* register offset */ 513 uint8_t cr_size; /* size (bytes) */ 514 uint8_t cr_ro; /* true => reg is read only */ 515 const char *cr_name; /* name of reg */ 516 } config_regs[] = { 517 { VTCFG_R_HOSTCAP, 4, 1, "HOSTCAP" }, 518 { VTCFG_R_GUESTCAP, 4, 0, "GUESTCAP" }, 519 { VTCFG_R_PFN, 4, 0, "PFN" }, 520 { VTCFG_R_QNUM, 2, 1, "QNUM" }, 521 { VTCFG_R_QSEL, 2, 0, "QSEL" }, 522 { VTCFG_R_QNOTIFY, 2, 0, "QNOTIFY" }, 523 { VTCFG_R_STATUS, 1, 0, "STATUS" }, 524 { VTCFG_R_ISR, 1, 0, "ISR" }, 525 { VTCFG_R_CFGVEC, 2, 0, "CFGVEC" }, 526 { VTCFG_R_QVEC, 2, 0, "QVEC" }, 527 }; 528 529 static inline struct config_reg * 530 vi_find_cr(int offset) { 531 u_int hi, lo, mid; 532 struct config_reg *cr; 533 534 lo = 0; 535 hi = sizeof(config_regs) / sizeof(*config_regs) - 1; 536 while (hi >= lo) { 537 mid = (hi + lo) >> 1; 538 cr = &config_regs[mid]; 539 if (cr->cr_offset == offset) 540 return (cr); 541 if (cr->cr_offset < offset) 542 lo = mid + 1; 543 else 544 hi = mid - 1; 545 } 546 return (NULL); 547 } 548 549 /* 550 * Handle pci config space reads. 551 * If it's to the MSI-X info, do that. 552 * If it's part of the virtio standard stuff, do that. 553 * Otherwise dispatch to the actual driver. 554 */ 555 uint64_t 556 vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 557 int baridx, uint64_t offset, int size) 558 { 559 struct virtio_softc *vs = pi->pi_arg; 560 struct virtio_consts *vc; 561 struct config_reg *cr; 562 uint64_t virtio_config_size, max; 563 const char *name; 564 uint32_t newoff; 565 uint32_t value; 566 int error; 567 568 if (vs->vs_flags & VIRTIO_USE_MSIX) { 569 if (baridx == pci_msix_table_bar(pi) || 570 baridx == pci_msix_pba_bar(pi)) { 571 return (pci_emul_msix_tread(pi, offset, size)); 572 } 573 } 574 575 /* XXX probably should do something better than just assert() */ 576 assert(baridx == 0); 577 578 if (vs->vs_mtx) 579 pthread_mutex_lock(vs->vs_mtx); 580 581 vc = vs->vs_vc; 582 name = vc->vc_name; 583 value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff; 584 585 if (size != 1 && size != 2 && size != 4) 586 goto bad; 587 588 if (pci_msix_enabled(pi)) 589 virtio_config_size = VTCFG_R_CFG1; 590 else 591 virtio_config_size = VTCFG_R_CFG0; 592 593 if (offset >= virtio_config_size) { 594 /* 595 * Subtract off the standard size (including MSI-X 596 * registers if enabled) and dispatch to underlying driver. 597 * If that fails, fall into general code. 598 */ 599 newoff = offset - virtio_config_size; 600 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000; 601 if (newoff + size > max) 602 goto bad; 603 error = (*vc->vc_cfgread)(DEV_SOFTC(vs), newoff, size, &value); 604 if (!error) 605 goto done; 606 } 607 608 bad: 609 cr = vi_find_cr(offset); 610 if (cr == NULL || cr->cr_size != size) { 611 if (cr != NULL) { 612 /* offset must be OK, so size must be bad */ 613 EPRINTLN( 614 "%s: read from %s: bad size %d", 615 name, cr->cr_name, size); 616 } else { 617 EPRINTLN( 618 "%s: read from bad offset/size %jd/%d", 619 name, (uintmax_t)offset, size); 620 } 621 goto done; 622 } 623 624 switch (offset) { 625 case VTCFG_R_HOSTCAP: 626 value = vc->vc_hv_caps; 627 break; 628 case VTCFG_R_GUESTCAP: 629 value = vs->vs_negotiated_caps; 630 break; 631 case VTCFG_R_PFN: 632 if (vs->vs_curq < vc->vc_nvq) 633 value = vs->vs_queues[vs->vs_curq].vq_pfn; 634 break; 635 case VTCFG_R_QNUM: 636 value = vs->vs_curq < vc->vc_nvq ? 637 vs->vs_queues[vs->vs_curq].vq_qsize : 0; 638 break; 639 case VTCFG_R_QSEL: 640 value = vs->vs_curq; 641 break; 642 case VTCFG_R_QNOTIFY: 643 value = 0; /* XXX */ 644 break; 645 case VTCFG_R_STATUS: 646 value = vs->vs_status; 647 break; 648 case VTCFG_R_ISR: 649 value = vs->vs_isr; 650 vs->vs_isr = 0; /* a read clears this flag */ 651 if (value) 652 pci_lintr_deassert(pi); 653 break; 654 case VTCFG_R_CFGVEC: 655 value = vs->vs_msix_cfg_idx; 656 break; 657 case VTCFG_R_QVEC: 658 value = vs->vs_curq < vc->vc_nvq ? 659 vs->vs_queues[vs->vs_curq].vq_msix_idx : 660 VIRTIO_MSI_NO_VECTOR; 661 break; 662 } 663 done: 664 if (vs->vs_mtx) 665 pthread_mutex_unlock(vs->vs_mtx); 666 return (value); 667 } 668 669 /* 670 * Handle pci config space writes. 671 * If it's to the MSI-X info, do that. 672 * If it's part of the virtio standard stuff, do that. 673 * Otherwise dispatch to the actual driver. 674 */ 675 void 676 vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 677 int baridx, uint64_t offset, int size, uint64_t value) 678 { 679 struct virtio_softc *vs = pi->pi_arg; 680 struct vqueue_info *vq; 681 struct virtio_consts *vc; 682 struct config_reg *cr; 683 uint64_t virtio_config_size, max; 684 const char *name; 685 uint32_t newoff; 686 int error; 687 688 if (vs->vs_flags & VIRTIO_USE_MSIX) { 689 if (baridx == pci_msix_table_bar(pi) || 690 baridx == pci_msix_pba_bar(pi)) { 691 pci_emul_msix_twrite(pi, offset, size, value); 692 return; 693 } 694 } 695 696 /* XXX probably should do something better than just assert() */ 697 assert(baridx == 0); 698 699 if (vs->vs_mtx) 700 pthread_mutex_lock(vs->vs_mtx); 701 702 vc = vs->vs_vc; 703 name = vc->vc_name; 704 705 if (size != 1 && size != 2 && size != 4) 706 goto bad; 707 708 if (pci_msix_enabled(pi)) 709 virtio_config_size = VTCFG_R_CFG1; 710 else 711 virtio_config_size = VTCFG_R_CFG0; 712 713 if (offset >= virtio_config_size) { 714 /* 715 * Subtract off the standard size (including MSI-X 716 * registers if enabled) and dispatch to underlying driver. 717 */ 718 newoff = offset - virtio_config_size; 719 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000; 720 if (newoff + size > max) 721 goto bad; 722 error = (*vc->vc_cfgwrite)(DEV_SOFTC(vs), newoff, size, value); 723 if (!error) 724 goto done; 725 } 726 727 bad: 728 cr = vi_find_cr(offset); 729 if (cr == NULL || cr->cr_size != size || cr->cr_ro) { 730 if (cr != NULL) { 731 /* offset must be OK, wrong size and/or reg is R/O */ 732 if (cr->cr_size != size) 733 EPRINTLN( 734 "%s: write to %s: bad size %d", 735 name, cr->cr_name, size); 736 if (cr->cr_ro) 737 EPRINTLN( 738 "%s: write to read-only reg %s", 739 name, cr->cr_name); 740 } else { 741 EPRINTLN( 742 "%s: write to bad offset/size %jd/%d", 743 name, (uintmax_t)offset, size); 744 } 745 goto done; 746 } 747 748 switch (offset) { 749 case VTCFG_R_GUESTCAP: 750 vs->vs_negotiated_caps = value & vc->vc_hv_caps; 751 if (vc->vc_apply_features) 752 (*vc->vc_apply_features)(DEV_SOFTC(vs), 753 vs->vs_negotiated_caps); 754 break; 755 case VTCFG_R_PFN: 756 if (vs->vs_curq >= vc->vc_nvq) 757 goto bad_qindex; 758 vi_vq_init(vs, value); 759 break; 760 case VTCFG_R_QSEL: 761 /* 762 * Note that the guest is allowed to select an 763 * invalid queue; we just need to return a QNUM 764 * of 0 while the bad queue is selected. 765 */ 766 vs->vs_curq = value; 767 break; 768 case VTCFG_R_QNOTIFY: 769 if (value >= vc->vc_nvq) { 770 EPRINTLN("%s: queue %d notify out of range", 771 name, (int)value); 772 goto done; 773 } 774 vq = &vs->vs_queues[value]; 775 if (vq->vq_notify) 776 (*vq->vq_notify)(DEV_SOFTC(vs), vq); 777 else if (vc->vc_qnotify) 778 (*vc->vc_qnotify)(DEV_SOFTC(vs), vq); 779 else 780 EPRINTLN( 781 "%s: qnotify queue %d: missing vq/vc notify", 782 name, (int)value); 783 break; 784 case VTCFG_R_STATUS: 785 vs->vs_status = value; 786 if (value == 0) 787 (*vc->vc_reset)(DEV_SOFTC(vs)); 788 break; 789 case VTCFG_R_CFGVEC: 790 vs->vs_msix_cfg_idx = value; 791 break; 792 case VTCFG_R_QVEC: 793 if (vs->vs_curq >= vc->vc_nvq) 794 goto bad_qindex; 795 vq = &vs->vs_queues[vs->vs_curq]; 796 vq->vq_msix_idx = value; 797 break; 798 } 799 goto done; 800 801 bad_qindex: 802 EPRINTLN( 803 "%s: write config reg %s: curq %d >= max %d", 804 name, cr->cr_name, vs->vs_curq, vc->vc_nvq); 805 done: 806 if (vs->vs_mtx) 807 pthread_mutex_unlock(vs->vs_mtx); 808 } 809