1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/bus.h> 38 #include <sys/pciio.h> 39 #include <sys/rman.h> 40 #include <sys/smp.h> 41 42 #include <dev/pci/pcivar.h> 43 #include <dev/pci/pcireg.h> 44 45 #include <machine/resource.h> 46 47 #include <machine/vmm.h> 48 #include <machine/vmm_dev.h> 49 50 #include "vmm_lapic.h" 51 #include "vmm_ktr.h" 52 53 #include "iommu.h" 54 #include "ppt.h" 55 56 /* XXX locking */ 57 58 #define MAX_PPTDEVS (sizeof(pptdevs) / sizeof(pptdevs[0])) 59 #define MAX_MSIMSGS 32 60 61 /* 62 * If the MSI-X table is located in the middle of a BAR then that MMIO 63 * region gets split into two segments - one segment above the MSI-X table 64 * and the other segment below the MSI-X table - with a hole in place of 65 * the MSI-X table so accesses to it can be trapped and emulated. 66 * 67 * So, allocate a MMIO segment for each BAR register + 1 additional segment. 68 */ 69 #define MAX_MMIOSEGS ((PCIR_MAX_BAR_0 + 1) + 1) 70 71 MALLOC_DEFINE(M_PPTMSIX, "pptmsix", "Passthru MSI-X resources"); 72 73 struct pptintr_arg { /* pptintr(pptintr_arg) */ 74 struct pptdev *pptdev; 75 int vec; 76 int vcpu; 77 }; 78 79 static struct pptdev { 80 device_t dev; 81 struct vm *vm; /* owner of this device */ 82 struct vm_memory_segment mmio[MAX_MMIOSEGS]; 83 struct { 84 int num_msgs; /* guest state */ 85 86 int startrid; /* host state */ 87 struct resource *res[MAX_MSIMSGS]; 88 void *cookie[MAX_MSIMSGS]; 89 struct pptintr_arg arg[MAX_MSIMSGS]; 90 } msi; 91 92 struct { 93 int num_msgs; 94 int startrid; 95 int msix_table_rid; 96 struct resource *msix_table_res; 97 struct resource **res; 98 void **cookie; 99 struct pptintr_arg *arg; 100 } msix; 101 } pptdevs[64]; 102 103 static int num_pptdevs; 104 105 static int 106 ppt_probe(device_t dev) 107 { 108 int bus, slot, func; 109 struct pci_devinfo *dinfo; 110 111 dinfo = (struct pci_devinfo *)device_get_ivars(dev); 112 113 bus = pci_get_bus(dev); 114 slot = pci_get_slot(dev); 115 func = pci_get_function(dev); 116 117 /* 118 * To qualify as a pci passthrough device a device must: 119 * - be allowed by administrator to be used in this role 120 * - be an endpoint device 121 */ 122 if (vmm_is_pptdev(bus, slot, func) && 123 (dinfo->cfg.hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_NORMAL) 124 return (0); 125 else 126 return (ENXIO); 127 } 128 129 static int 130 ppt_attach(device_t dev) 131 { 132 int n; 133 134 if (num_pptdevs >= MAX_PPTDEVS) { 135 printf("ppt_attach: maximum number of pci passthrough devices " 136 "exceeded\n"); 137 return (ENXIO); 138 } 139 140 n = num_pptdevs++; 141 pptdevs[n].dev = dev; 142 143 if (bootverbose) 144 device_printf(dev, "attached\n"); 145 146 return (0); 147 } 148 149 static int 150 ppt_detach(device_t dev) 151 { 152 /* 153 * XXX check whether there are any pci passthrough devices assigned 154 * to guests before we allow this driver to detach. 155 */ 156 157 return (0); 158 } 159 160 static device_method_t ppt_methods[] = { 161 /* Device interface */ 162 DEVMETHOD(device_probe, ppt_probe), 163 DEVMETHOD(device_attach, ppt_attach), 164 DEVMETHOD(device_detach, ppt_detach), 165 {0, 0} 166 }; 167 168 static devclass_t ppt_devclass; 169 DEFINE_CLASS_0(ppt, ppt_driver, ppt_methods, 0); 170 DRIVER_MODULE(ppt, pci, ppt_driver, ppt_devclass, NULL, NULL); 171 172 static struct pptdev * 173 ppt_find(int bus, int slot, int func) 174 { 175 device_t dev; 176 int i, b, s, f; 177 178 for (i = 0; i < num_pptdevs; i++) { 179 dev = pptdevs[i].dev; 180 b = pci_get_bus(dev); 181 s = pci_get_slot(dev); 182 f = pci_get_function(dev); 183 if (bus == b && slot == s && func == f) 184 return (&pptdevs[i]); 185 } 186 return (NULL); 187 } 188 189 static void 190 ppt_unmap_mmio(struct vm *vm, struct pptdev *ppt) 191 { 192 int i; 193 struct vm_memory_segment *seg; 194 195 for (i = 0; i < MAX_MMIOSEGS; i++) { 196 seg = &ppt->mmio[i]; 197 if (seg->len == 0) 198 continue; 199 (void)vm_unmap_mmio(vm, seg->gpa, seg->len); 200 bzero(seg, sizeof(struct vm_memory_segment)); 201 } 202 } 203 204 static void 205 ppt_teardown_msi(struct pptdev *ppt) 206 { 207 int i, rid; 208 void *cookie; 209 struct resource *res; 210 211 if (ppt->msi.num_msgs == 0) 212 return; 213 214 for (i = 0; i < ppt->msi.num_msgs; i++) { 215 rid = ppt->msi.startrid + i; 216 res = ppt->msi.res[i]; 217 cookie = ppt->msi.cookie[i]; 218 219 if (cookie != NULL) 220 bus_teardown_intr(ppt->dev, res, cookie); 221 222 if (res != NULL) 223 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res); 224 225 ppt->msi.res[i] = NULL; 226 ppt->msi.cookie[i] = NULL; 227 } 228 229 if (ppt->msi.startrid == 1) 230 pci_release_msi(ppt->dev); 231 232 ppt->msi.num_msgs = 0; 233 } 234 235 static void 236 ppt_teardown_msix_intr(struct pptdev *ppt, int idx) 237 { 238 int rid; 239 struct resource *res; 240 void *cookie; 241 242 rid = ppt->msix.startrid + idx; 243 res = ppt->msix.res[idx]; 244 cookie = ppt->msix.cookie[idx]; 245 246 if (cookie != NULL) 247 bus_teardown_intr(ppt->dev, res, cookie); 248 249 if (res != NULL) 250 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res); 251 252 ppt->msix.res[idx] = NULL; 253 ppt->msix.cookie[idx] = NULL; 254 } 255 256 static void 257 ppt_teardown_msix(struct pptdev *ppt) 258 { 259 int i; 260 261 if (ppt->msix.num_msgs == 0) 262 return; 263 264 for (i = 0; i < ppt->msix.num_msgs; i++) 265 ppt_teardown_msix_intr(ppt, i); 266 267 if (ppt->msix.msix_table_res) { 268 bus_release_resource(ppt->dev, SYS_RES_MEMORY, 269 ppt->msix.msix_table_rid, 270 ppt->msix.msix_table_res); 271 ppt->msix.msix_table_res = NULL; 272 ppt->msix.msix_table_rid = 0; 273 } 274 275 free(ppt->msix.res, M_PPTMSIX); 276 free(ppt->msix.cookie, M_PPTMSIX); 277 free(ppt->msix.arg, M_PPTMSIX); 278 279 pci_release_msi(ppt->dev); 280 281 ppt->msix.num_msgs = 0; 282 } 283 284 int 285 ppt_assign_device(struct vm *vm, int bus, int slot, int func) 286 { 287 struct pptdev *ppt; 288 289 ppt = ppt_find(bus, slot, func); 290 if (ppt != NULL) { 291 /* 292 * If this device is owned by a different VM then we 293 * cannot change its owner. 294 */ 295 if (ppt->vm != NULL && ppt->vm != vm) 296 return (EBUSY); 297 298 ppt->vm = vm; 299 iommu_add_device(vm_iommu_domain(vm), bus, slot, func); 300 return (0); 301 } 302 return (ENOENT); 303 } 304 305 int 306 ppt_unassign_device(struct vm *vm, int bus, int slot, int func) 307 { 308 struct pptdev *ppt; 309 310 ppt = ppt_find(bus, slot, func); 311 if (ppt != NULL) { 312 /* 313 * If this device is not owned by this 'vm' then bail out. 314 */ 315 if (ppt->vm != vm) 316 return (EBUSY); 317 ppt_unmap_mmio(vm, ppt); 318 ppt_teardown_msi(ppt); 319 ppt_teardown_msix(ppt); 320 iommu_remove_device(vm_iommu_domain(vm), bus, slot, func); 321 ppt->vm = NULL; 322 return (0); 323 } 324 return (ENOENT); 325 } 326 327 int 328 ppt_unassign_all(struct vm *vm) 329 { 330 int i, bus, slot, func; 331 device_t dev; 332 333 for (i = 0; i < num_pptdevs; i++) { 334 if (pptdevs[i].vm == vm) { 335 dev = pptdevs[i].dev; 336 bus = pci_get_bus(dev); 337 slot = pci_get_slot(dev); 338 func = pci_get_function(dev); 339 ppt_unassign_device(vm, bus, slot, func); 340 } 341 } 342 343 return (0); 344 } 345 346 int 347 ppt_map_mmio(struct vm *vm, int bus, int slot, int func, 348 vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 349 { 350 int i, error; 351 struct vm_memory_segment *seg; 352 struct pptdev *ppt; 353 354 ppt = ppt_find(bus, slot, func); 355 if (ppt != NULL) { 356 if (ppt->vm != vm) 357 return (EBUSY); 358 359 for (i = 0; i < MAX_MMIOSEGS; i++) { 360 seg = &ppt->mmio[i]; 361 if (seg->len == 0) { 362 error = vm_map_mmio(vm, gpa, len, hpa); 363 if (error == 0) { 364 seg->gpa = gpa; 365 seg->len = len; 366 } 367 return (error); 368 } 369 } 370 return (ENOSPC); 371 } 372 return (ENOENT); 373 } 374 375 static int 376 pptintr(void *arg) 377 { 378 int vec; 379 struct pptdev *ppt; 380 struct pptintr_arg *pptarg; 381 382 pptarg = arg; 383 ppt = pptarg->pptdev; 384 vec = pptarg->vec; 385 386 if (ppt->vm != NULL) 387 (void) lapic_set_intr(ppt->vm, pptarg->vcpu, vec); 388 else { 389 /* 390 * XXX 391 * This is not expected to happen - panic? 392 */ 393 } 394 395 /* 396 * For legacy interrupts give other filters a chance in case 397 * the interrupt was not generated by the passthrough device. 398 */ 399 if (ppt->msi.startrid == 0) 400 return (FILTER_STRAY); 401 else 402 return (FILTER_HANDLED); 403 } 404 405 /* 406 * XXX 407 * When we try to free the MSI resource the kernel will bind the thread to 408 * the host cpu was originally handling the MSI. The function freeing the 409 * MSI vector (apic_free_vector()) will panic the kernel if the thread 410 * is already bound to a cpu. 411 * 412 * So, we temporarily unbind the vcpu thread before freeing the MSI resource. 413 */ 414 static void 415 PPT_TEARDOWN_MSI(struct vm *vm, int vcpu, struct pptdev *ppt) 416 { 417 int pincpu = -1; 418 419 vm_get_pinning(vm, vcpu, &pincpu); 420 421 if (pincpu >= 0) 422 vm_set_pinning(vm, vcpu, -1); 423 424 ppt_teardown_msi(ppt); 425 426 if (pincpu >= 0) 427 vm_set_pinning(vm, vcpu, pincpu); 428 } 429 430 int 431 ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func, 432 int destcpu, int vector, int numvec) 433 { 434 int i, rid, flags; 435 int msi_count, startrid, error, tmp; 436 struct pptdev *ppt; 437 438 if ((destcpu >= VM_MAXCPU || destcpu < 0) || 439 (vector < 0 || vector > 255) || 440 (numvec < 0 || numvec > MAX_MSIMSGS)) 441 return (EINVAL); 442 443 ppt = ppt_find(bus, slot, func); 444 if (ppt == NULL) 445 return (ENOENT); 446 if (ppt->vm != vm) /* Make sure we own this device */ 447 return (EBUSY); 448 449 /* Free any allocated resources */ 450 PPT_TEARDOWN_MSI(vm, vcpu, ppt); 451 452 if (numvec == 0) /* nothing more to do */ 453 return (0); 454 455 flags = RF_ACTIVE; 456 msi_count = pci_msi_count(ppt->dev); 457 if (msi_count == 0) { 458 startrid = 0; /* legacy interrupt */ 459 msi_count = 1; 460 flags |= RF_SHAREABLE; 461 } else 462 startrid = 1; /* MSI */ 463 464 /* 465 * The device must be capable of supporting the number of vectors 466 * the guest wants to allocate. 467 */ 468 if (numvec > msi_count) 469 return (EINVAL); 470 471 /* 472 * Make sure that we can allocate all the MSI vectors that are needed 473 * by the guest. 474 */ 475 if (startrid == 1) { 476 tmp = numvec; 477 error = pci_alloc_msi(ppt->dev, &tmp); 478 if (error) 479 return (error); 480 else if (tmp != numvec) { 481 pci_release_msi(ppt->dev); 482 return (ENOSPC); 483 } else { 484 /* success */ 485 } 486 } 487 488 ppt->msi.startrid = startrid; 489 490 /* 491 * Allocate the irq resource and attach it to the interrupt handler. 492 */ 493 for (i = 0; i < numvec; i++) { 494 ppt->msi.num_msgs = i + 1; 495 ppt->msi.cookie[i] = NULL; 496 497 rid = startrid + i; 498 ppt->msi.res[i] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ, 499 &rid, flags); 500 if (ppt->msi.res[i] == NULL) 501 break; 502 503 ppt->msi.arg[i].pptdev = ppt; 504 ppt->msi.arg[i].vec = vector + i; 505 ppt->msi.arg[i].vcpu = destcpu; 506 507 error = bus_setup_intr(ppt->dev, ppt->msi.res[i], 508 INTR_TYPE_NET | INTR_MPSAFE, 509 pptintr, NULL, &ppt->msi.arg[i], 510 &ppt->msi.cookie[i]); 511 if (error != 0) 512 break; 513 } 514 515 if (i < numvec) { 516 PPT_TEARDOWN_MSI(vm, vcpu, ppt); 517 return (ENXIO); 518 } 519 520 return (0); 521 } 522 523 int 524 ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func, 525 int idx, uint32_t msg, uint32_t vector_control, uint64_t addr) 526 { 527 struct pptdev *ppt; 528 struct pci_devinfo *dinfo; 529 int numvec, alloced, rid, error; 530 size_t res_size, cookie_size, arg_size; 531 532 ppt = ppt_find(bus, slot, func); 533 if (ppt == NULL) 534 return (ENOENT); 535 if (ppt->vm != vm) /* Make sure we own this device */ 536 return (EBUSY); 537 538 dinfo = device_get_ivars(ppt->dev); 539 if (!dinfo) 540 return (ENXIO); 541 542 /* 543 * First-time configuration: 544 * Allocate the MSI-X table 545 * Allocate the IRQ resources 546 * Set up some variables in ppt->msix 547 */ 548 if (ppt->msix.num_msgs == 0) { 549 numvec = pci_msix_count(ppt->dev); 550 if (numvec <= 0) 551 return (EINVAL); 552 553 ppt->msix.startrid = 1; 554 ppt->msix.num_msgs = numvec; 555 556 res_size = numvec * sizeof(ppt->msix.res[0]); 557 cookie_size = numvec * sizeof(ppt->msix.cookie[0]); 558 arg_size = numvec * sizeof(ppt->msix.arg[0]); 559 560 ppt->msix.res = malloc(res_size, M_PPTMSIX, M_WAITOK | M_ZERO); 561 ppt->msix.cookie = malloc(cookie_size, M_PPTMSIX, 562 M_WAITOK | M_ZERO); 563 ppt->msix.arg = malloc(arg_size, M_PPTMSIX, M_WAITOK | M_ZERO); 564 565 rid = dinfo->cfg.msix.msix_table_bar; 566 ppt->msix.msix_table_res = bus_alloc_resource_any(ppt->dev, 567 SYS_RES_MEMORY, &rid, RF_ACTIVE); 568 569 if (ppt->msix.msix_table_res == NULL) { 570 ppt_teardown_msix(ppt); 571 return (ENOSPC); 572 } 573 ppt->msix.msix_table_rid = rid; 574 575 alloced = numvec; 576 error = pci_alloc_msix(ppt->dev, &alloced); 577 if (error || alloced != numvec) { 578 ppt_teardown_msix(ppt); 579 return (error == 0 ? ENOSPC: error); 580 } 581 } 582 583 if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 584 /* Tear down the IRQ if it's already set up */ 585 ppt_teardown_msix_intr(ppt, idx); 586 587 /* Allocate the IRQ resource */ 588 ppt->msix.cookie[idx] = NULL; 589 rid = ppt->msix.startrid + idx; 590 ppt->msix.res[idx] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ, 591 &rid, RF_ACTIVE); 592 if (ppt->msix.res[idx] == NULL) 593 return (ENXIO); 594 595 ppt->msix.arg[idx].pptdev = ppt; 596 ppt->msix.arg[idx].vec = msg; 597 ppt->msix.arg[idx].vcpu = (addr >> 12) & 0xFF; 598 599 /* Setup the MSI-X interrupt */ 600 error = bus_setup_intr(ppt->dev, ppt->msix.res[idx], 601 INTR_TYPE_NET | INTR_MPSAFE, 602 pptintr, NULL, &ppt->msix.arg[idx], 603 &ppt->msix.cookie[idx]); 604 605 if (error != 0) { 606 bus_teardown_intr(ppt->dev, ppt->msix.res[idx], ppt->msix.cookie[idx]); 607 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, ppt->msix.res[idx]); 608 ppt->msix.cookie[idx] = NULL; 609 ppt->msix.res[idx] = NULL; 610 return (ENXIO); 611 } 612 } else { 613 /* Masked, tear it down if it's already been set up */ 614 ppt_teardown_msix_intr(ppt, idx); 615 } 616 617 return (0); 618 } 619 620