1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VFIO PCI interrupt handling 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * Derived from original vfio: 9 * Copyright 2010 Cisco Systems, Inc. All rights reserved. 10 * Author: Tom Lyon, pugs@cisco.com 11 */ 12 13 #include <linux/device.h> 14 #include <linux/interrupt.h> 15 #include <linux/eventfd.h> 16 #include <linux/msi.h> 17 #include <linux/pci.h> 18 #include <linux/file.h> 19 #include <linux/vfio.h> 20 #include <linux/wait.h> 21 #include <linux/slab.h> 22 23 #include "vfio_pci_priv.h" 24 25 struct vfio_pci_irq_ctx { 26 struct eventfd_ctx *trigger; 27 struct virqfd *unmask; 28 struct virqfd *mask; 29 char *name; 30 bool masked; 31 struct irq_bypass_producer producer; 32 }; 33 34 static bool irq_is(struct vfio_pci_core_device *vdev, int type) 35 { 36 return vdev->irq_type == type; 37 } 38 39 static bool is_intx(struct vfio_pci_core_device *vdev) 40 { 41 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX; 42 } 43 44 static bool is_irq_none(struct vfio_pci_core_device *vdev) 45 { 46 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX || 47 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX || 48 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX); 49 } 50 51 static 52 struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev, 53 unsigned long index) 54 { 55 return xa_load(&vdev->ctx, index); 56 } 57 58 static void vfio_irq_ctx_free(struct vfio_pci_core_device *vdev, 59 struct vfio_pci_irq_ctx *ctx, unsigned long index) 60 { 61 xa_erase(&vdev->ctx, index); 62 kfree(ctx); 63 } 64 65 static struct vfio_pci_irq_ctx * 66 vfio_irq_ctx_alloc(struct vfio_pci_core_device *vdev, unsigned long index) 67 { 68 struct vfio_pci_irq_ctx *ctx; 69 int ret; 70 71 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); 72 if (!ctx) 73 return NULL; 74 75 ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT); 76 if (ret) { 77 kfree(ctx); 78 return NULL; 79 } 80 81 return ctx; 82 } 83 84 /* 85 * INTx 86 */ 87 static void vfio_send_intx_eventfd(void *opaque, void *unused) 88 { 89 struct vfio_pci_core_device *vdev = opaque; 90 91 if (likely(is_intx(vdev) && !vdev->virq_disabled)) { 92 struct vfio_pci_irq_ctx *ctx; 93 94 ctx = vfio_irq_ctx_get(vdev, 0); 95 if (WARN_ON_ONCE(!ctx)) 96 return; 97 eventfd_signal(ctx->trigger, 1); 98 } 99 } 100 101 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */ 102 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) 103 { 104 struct pci_dev *pdev = vdev->pdev; 105 struct vfio_pci_irq_ctx *ctx; 106 unsigned long flags; 107 bool masked_changed = false; 108 109 spin_lock_irqsave(&vdev->irqlock, flags); 110 111 /* 112 * Masking can come from interrupt, ioctl, or config space 113 * via INTx disable. The latter means this can get called 114 * even when not using intx delivery. In this case, just 115 * try to have the physical bit follow the virtual bit. 116 */ 117 if (unlikely(!is_intx(vdev))) { 118 if (vdev->pci_2_3) 119 pci_intx(pdev, 0); 120 goto out_unlock; 121 } 122 123 ctx = vfio_irq_ctx_get(vdev, 0); 124 if (WARN_ON_ONCE(!ctx)) 125 goto out_unlock; 126 127 if (!ctx->masked) { 128 /* 129 * Can't use check_and_mask here because we always want to 130 * mask, not just when something is pending. 131 */ 132 if (vdev->pci_2_3) 133 pci_intx(pdev, 0); 134 else 135 disable_irq_nosync(pdev->irq); 136 137 ctx->masked = true; 138 masked_changed = true; 139 } 140 141 out_unlock: 142 spin_unlock_irqrestore(&vdev->irqlock, flags); 143 return masked_changed; 144 } 145 146 /* 147 * If this is triggered by an eventfd, we can't call eventfd_signal 148 * or else we'll deadlock on the eventfd wait queue. Return >0 when 149 * a signal is necessary, which can then be handled via a work queue 150 * or directly depending on the caller. 151 */ 152 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) 153 { 154 struct vfio_pci_core_device *vdev = opaque; 155 struct pci_dev *pdev = vdev->pdev; 156 struct vfio_pci_irq_ctx *ctx; 157 unsigned long flags; 158 int ret = 0; 159 160 spin_lock_irqsave(&vdev->irqlock, flags); 161 162 /* 163 * Unmasking comes from ioctl or config, so again, have the 164 * physical bit follow the virtual even when not using INTx. 165 */ 166 if (unlikely(!is_intx(vdev))) { 167 if (vdev->pci_2_3) 168 pci_intx(pdev, 1); 169 goto out_unlock; 170 } 171 172 ctx = vfio_irq_ctx_get(vdev, 0); 173 if (WARN_ON_ONCE(!ctx)) 174 goto out_unlock; 175 176 if (ctx->masked && !vdev->virq_disabled) { 177 /* 178 * A pending interrupt here would immediately trigger, 179 * but we can avoid that overhead by just re-sending 180 * the interrupt to the user. 181 */ 182 if (vdev->pci_2_3) { 183 if (!pci_check_and_unmask_intx(pdev)) 184 ret = 1; 185 } else 186 enable_irq(pdev->irq); 187 188 ctx->masked = (ret > 0); 189 } 190 191 out_unlock: 192 spin_unlock_irqrestore(&vdev->irqlock, flags); 193 194 return ret; 195 } 196 197 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) 198 { 199 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) 200 vfio_send_intx_eventfd(vdev, NULL); 201 } 202 203 static irqreturn_t vfio_intx_handler(int irq, void *dev_id) 204 { 205 struct vfio_pci_core_device *vdev = dev_id; 206 struct vfio_pci_irq_ctx *ctx; 207 unsigned long flags; 208 int ret = IRQ_NONE; 209 210 ctx = vfio_irq_ctx_get(vdev, 0); 211 if (WARN_ON_ONCE(!ctx)) 212 return ret; 213 214 spin_lock_irqsave(&vdev->irqlock, flags); 215 216 if (!vdev->pci_2_3) { 217 disable_irq_nosync(vdev->pdev->irq); 218 ctx->masked = true; 219 ret = IRQ_HANDLED; 220 } else if (!ctx->masked && /* may be shared */ 221 pci_check_and_mask_intx(vdev->pdev)) { 222 ctx->masked = true; 223 ret = IRQ_HANDLED; 224 } 225 226 spin_unlock_irqrestore(&vdev->irqlock, flags); 227 228 if (ret == IRQ_HANDLED) 229 vfio_send_intx_eventfd(vdev, NULL); 230 231 return ret; 232 } 233 234 static int vfio_intx_enable(struct vfio_pci_core_device *vdev) 235 { 236 struct vfio_pci_irq_ctx *ctx; 237 238 if (!is_irq_none(vdev)) 239 return -EINVAL; 240 241 if (!vdev->pdev->irq) 242 return -ENODEV; 243 244 ctx = vfio_irq_ctx_alloc(vdev, 0); 245 if (!ctx) 246 return -ENOMEM; 247 248 /* 249 * If the virtual interrupt is masked, restore it. Devices 250 * supporting DisINTx can be masked at the hardware level 251 * here, non-PCI-2.3 devices will have to wait until the 252 * interrupt is enabled. 253 */ 254 ctx->masked = vdev->virq_disabled; 255 if (vdev->pci_2_3) 256 pci_intx(vdev->pdev, !ctx->masked); 257 258 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; 259 260 return 0; 261 } 262 263 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) 264 { 265 struct pci_dev *pdev = vdev->pdev; 266 unsigned long irqflags = IRQF_SHARED; 267 struct vfio_pci_irq_ctx *ctx; 268 struct eventfd_ctx *trigger; 269 unsigned long flags; 270 int ret; 271 272 ctx = vfio_irq_ctx_get(vdev, 0); 273 if (WARN_ON_ONCE(!ctx)) 274 return -EINVAL; 275 276 if (ctx->trigger) { 277 free_irq(pdev->irq, vdev); 278 kfree(ctx->name); 279 eventfd_ctx_put(ctx->trigger); 280 ctx->trigger = NULL; 281 } 282 283 if (fd < 0) /* Disable only */ 284 return 0; 285 286 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", 287 pci_name(pdev)); 288 if (!ctx->name) 289 return -ENOMEM; 290 291 trigger = eventfd_ctx_fdget(fd); 292 if (IS_ERR(trigger)) { 293 kfree(ctx->name); 294 return PTR_ERR(trigger); 295 } 296 297 ctx->trigger = trigger; 298 299 if (!vdev->pci_2_3) 300 irqflags = 0; 301 302 ret = request_irq(pdev->irq, vfio_intx_handler, 303 irqflags, ctx->name, vdev); 304 if (ret) { 305 ctx->trigger = NULL; 306 kfree(ctx->name); 307 eventfd_ctx_put(trigger); 308 return ret; 309 } 310 311 /* 312 * INTx disable will stick across the new irq setup, 313 * disable_irq won't. 314 */ 315 spin_lock_irqsave(&vdev->irqlock, flags); 316 if (!vdev->pci_2_3 && ctx->masked) 317 disable_irq_nosync(pdev->irq); 318 spin_unlock_irqrestore(&vdev->irqlock, flags); 319 320 return 0; 321 } 322 323 static void vfio_intx_disable(struct vfio_pci_core_device *vdev) 324 { 325 struct vfio_pci_irq_ctx *ctx; 326 327 ctx = vfio_irq_ctx_get(vdev, 0); 328 WARN_ON_ONCE(!ctx); 329 if (ctx) { 330 vfio_virqfd_disable(&ctx->unmask); 331 vfio_virqfd_disable(&ctx->mask); 332 } 333 vfio_intx_set_signal(vdev, -1); 334 vdev->irq_type = VFIO_PCI_NUM_IRQS; 335 vfio_irq_ctx_free(vdev, ctx, 0); 336 } 337 338 /* 339 * MSI/MSI-X 340 */ 341 static irqreturn_t vfio_msihandler(int irq, void *arg) 342 { 343 struct eventfd_ctx *trigger = arg; 344 345 eventfd_signal(trigger, 1); 346 return IRQ_HANDLED; 347 } 348 349 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix) 350 { 351 struct pci_dev *pdev = vdev->pdev; 352 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; 353 int ret; 354 u16 cmd; 355 356 if (!is_irq_none(vdev)) 357 return -EINVAL; 358 359 /* return the number of supported vectors if we can't get all: */ 360 cmd = vfio_pci_memory_lock_and_enable(vdev); 361 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); 362 if (ret < nvec) { 363 if (ret > 0) 364 pci_free_irq_vectors(pdev); 365 vfio_pci_memory_unlock_and_restore(vdev, cmd); 366 return ret; 367 } 368 vfio_pci_memory_unlock_and_restore(vdev, cmd); 369 370 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : 371 VFIO_PCI_MSI_IRQ_INDEX; 372 373 if (!msix) { 374 /* 375 * Compute the virtual hardware field for max msi vectors - 376 * it is the log base 2 of the number of vectors. 377 */ 378 vdev->msi_qmax = fls(nvec * 2 - 1) - 1; 379 } 380 381 return 0; 382 } 383 384 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev, 385 unsigned int vector, int fd, bool msix) 386 { 387 struct pci_dev *pdev = vdev->pdev; 388 struct vfio_pci_irq_ctx *ctx; 389 struct eventfd_ctx *trigger; 390 int irq, ret; 391 u16 cmd; 392 393 irq = pci_irq_vector(pdev, vector); 394 if (irq < 0) 395 return -EINVAL; 396 397 ctx = vfio_irq_ctx_get(vdev, vector); 398 399 if (ctx) { 400 irq_bypass_unregister_producer(&ctx->producer); 401 402 cmd = vfio_pci_memory_lock_and_enable(vdev); 403 free_irq(irq, ctx->trigger); 404 vfio_pci_memory_unlock_and_restore(vdev, cmd); 405 kfree(ctx->name); 406 eventfd_ctx_put(ctx->trigger); 407 vfio_irq_ctx_free(vdev, ctx, vector); 408 } 409 410 if (fd < 0) 411 return 0; 412 413 ctx = vfio_irq_ctx_alloc(vdev, vector); 414 if (!ctx) 415 return -ENOMEM; 416 417 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)", 418 msix ? "x" : "", vector, pci_name(pdev)); 419 if (!ctx->name) { 420 ret = -ENOMEM; 421 goto out_free_ctx; 422 } 423 424 trigger = eventfd_ctx_fdget(fd); 425 if (IS_ERR(trigger)) { 426 ret = PTR_ERR(trigger); 427 goto out_free_name; 428 } 429 430 /* 431 * The MSIx vector table resides in device memory which may be cleared 432 * via backdoor resets. We don't allow direct access to the vector 433 * table so even if a userspace driver attempts to save/restore around 434 * such a reset it would be unsuccessful. To avoid this, restore the 435 * cached value of the message prior to enabling. 436 */ 437 cmd = vfio_pci_memory_lock_and_enable(vdev); 438 if (msix) { 439 struct msi_msg msg; 440 441 get_cached_msi_msg(irq, &msg); 442 pci_write_msi_msg(irq, &msg); 443 } 444 445 ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger); 446 vfio_pci_memory_unlock_and_restore(vdev, cmd); 447 if (ret) 448 goto out_put_eventfd_ctx; 449 450 ctx->producer.token = trigger; 451 ctx->producer.irq = irq; 452 ret = irq_bypass_register_producer(&ctx->producer); 453 if (unlikely(ret)) { 454 dev_info(&pdev->dev, 455 "irq bypass producer (token %p) registration fails: %d\n", 456 ctx->producer.token, ret); 457 458 ctx->producer.token = NULL; 459 } 460 ctx->trigger = trigger; 461 462 return 0; 463 464 out_put_eventfd_ctx: 465 eventfd_ctx_put(trigger); 466 out_free_name: 467 kfree(ctx->name); 468 out_free_ctx: 469 vfio_irq_ctx_free(vdev, ctx, vector); 470 return ret; 471 } 472 473 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start, 474 unsigned count, int32_t *fds, bool msix) 475 { 476 unsigned int i, j; 477 int ret = 0; 478 479 for (i = 0, j = start; i < count && !ret; i++, j++) { 480 int fd = fds ? fds[i] : -1; 481 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); 482 } 483 484 if (ret) { 485 for (i = start; i < j; i++) 486 vfio_msi_set_vector_signal(vdev, i, -1, msix); 487 } 488 489 return ret; 490 } 491 492 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix) 493 { 494 struct pci_dev *pdev = vdev->pdev; 495 struct vfio_pci_irq_ctx *ctx; 496 unsigned long i; 497 u16 cmd; 498 499 xa_for_each(&vdev->ctx, i, ctx) { 500 vfio_virqfd_disable(&ctx->unmask); 501 vfio_virqfd_disable(&ctx->mask); 502 vfio_msi_set_vector_signal(vdev, i, -1, msix); 503 } 504 505 cmd = vfio_pci_memory_lock_and_enable(vdev); 506 pci_free_irq_vectors(pdev); 507 vfio_pci_memory_unlock_and_restore(vdev, cmd); 508 509 /* 510 * Both disable paths above use pci_intx_for_msi() to clear DisINTx 511 * via their shutdown paths. Restore for NoINTx devices. 512 */ 513 if (vdev->nointx) 514 pci_intx(pdev, 0); 515 516 vdev->irq_type = VFIO_PCI_NUM_IRQS; 517 } 518 519 /* 520 * IOCTL support 521 */ 522 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev, 523 unsigned index, unsigned start, 524 unsigned count, uint32_t flags, void *data) 525 { 526 if (!is_intx(vdev) || start != 0 || count != 1) 527 return -EINVAL; 528 529 if (flags & VFIO_IRQ_SET_DATA_NONE) { 530 vfio_pci_intx_unmask(vdev); 531 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 532 uint8_t unmask = *(uint8_t *)data; 533 if (unmask) 534 vfio_pci_intx_unmask(vdev); 535 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 536 struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0); 537 int32_t fd = *(int32_t *)data; 538 539 if (WARN_ON_ONCE(!ctx)) 540 return -EINVAL; 541 if (fd >= 0) 542 return vfio_virqfd_enable((void *) vdev, 543 vfio_pci_intx_unmask_handler, 544 vfio_send_intx_eventfd, NULL, 545 &ctx->unmask, fd); 546 547 vfio_virqfd_disable(&ctx->unmask); 548 } 549 550 return 0; 551 } 552 553 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev, 554 unsigned index, unsigned start, 555 unsigned count, uint32_t flags, void *data) 556 { 557 if (!is_intx(vdev) || start != 0 || count != 1) 558 return -EINVAL; 559 560 if (flags & VFIO_IRQ_SET_DATA_NONE) { 561 vfio_pci_intx_mask(vdev); 562 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 563 uint8_t mask = *(uint8_t *)data; 564 if (mask) 565 vfio_pci_intx_mask(vdev); 566 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 567 return -ENOTTY; /* XXX implement me */ 568 } 569 570 return 0; 571 } 572 573 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev, 574 unsigned index, unsigned start, 575 unsigned count, uint32_t flags, void *data) 576 { 577 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { 578 vfio_intx_disable(vdev); 579 return 0; 580 } 581 582 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) 583 return -EINVAL; 584 585 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 586 int32_t fd = *(int32_t *)data; 587 int ret; 588 589 if (is_intx(vdev)) 590 return vfio_intx_set_signal(vdev, fd); 591 592 ret = vfio_intx_enable(vdev); 593 if (ret) 594 return ret; 595 596 ret = vfio_intx_set_signal(vdev, fd); 597 if (ret) 598 vfio_intx_disable(vdev); 599 600 return ret; 601 } 602 603 if (!is_intx(vdev)) 604 return -EINVAL; 605 606 if (flags & VFIO_IRQ_SET_DATA_NONE) { 607 vfio_send_intx_eventfd(vdev, NULL); 608 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 609 uint8_t trigger = *(uint8_t *)data; 610 if (trigger) 611 vfio_send_intx_eventfd(vdev, NULL); 612 } 613 return 0; 614 } 615 616 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev, 617 unsigned index, unsigned start, 618 unsigned count, uint32_t flags, void *data) 619 { 620 struct vfio_pci_irq_ctx *ctx; 621 unsigned int i; 622 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; 623 624 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { 625 vfio_msi_disable(vdev, msix); 626 return 0; 627 } 628 629 if (!(irq_is(vdev, index) || is_irq_none(vdev))) 630 return -EINVAL; 631 632 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 633 int32_t *fds = data; 634 int ret; 635 636 if (vdev->irq_type == index) 637 return vfio_msi_set_block(vdev, start, count, 638 fds, msix); 639 640 ret = vfio_msi_enable(vdev, start + count, msix); 641 if (ret) 642 return ret; 643 644 ret = vfio_msi_set_block(vdev, start, count, fds, msix); 645 if (ret) 646 vfio_msi_disable(vdev, msix); 647 648 return ret; 649 } 650 651 if (!irq_is(vdev, index)) 652 return -EINVAL; 653 654 for (i = start; i < start + count; i++) { 655 ctx = vfio_irq_ctx_get(vdev, i); 656 if (!ctx) 657 continue; 658 if (flags & VFIO_IRQ_SET_DATA_NONE) { 659 eventfd_signal(ctx->trigger, 1); 660 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 661 uint8_t *bools = data; 662 if (bools[i - start]) 663 eventfd_signal(ctx->trigger, 1); 664 } 665 } 666 return 0; 667 } 668 669 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, 670 unsigned int count, uint32_t flags, 671 void *data) 672 { 673 /* DATA_NONE/DATA_BOOL enables loopback testing */ 674 if (flags & VFIO_IRQ_SET_DATA_NONE) { 675 if (*ctx) { 676 if (count) { 677 eventfd_signal(*ctx, 1); 678 } else { 679 eventfd_ctx_put(*ctx); 680 *ctx = NULL; 681 } 682 return 0; 683 } 684 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 685 uint8_t trigger; 686 687 if (!count) 688 return -EINVAL; 689 690 trigger = *(uint8_t *)data; 691 if (trigger && *ctx) 692 eventfd_signal(*ctx, 1); 693 694 return 0; 695 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 696 int32_t fd; 697 698 if (!count) 699 return -EINVAL; 700 701 fd = *(int32_t *)data; 702 if (fd == -1) { 703 if (*ctx) 704 eventfd_ctx_put(*ctx); 705 *ctx = NULL; 706 } else if (fd >= 0) { 707 struct eventfd_ctx *efdctx; 708 709 efdctx = eventfd_ctx_fdget(fd); 710 if (IS_ERR(efdctx)) 711 return PTR_ERR(efdctx); 712 713 if (*ctx) 714 eventfd_ctx_put(*ctx); 715 716 *ctx = efdctx; 717 } 718 return 0; 719 } 720 721 return -EINVAL; 722 } 723 724 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev, 725 unsigned index, unsigned start, 726 unsigned count, uint32_t flags, void *data) 727 { 728 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) 729 return -EINVAL; 730 731 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, 732 count, flags, data); 733 } 734 735 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev, 736 unsigned index, unsigned start, 737 unsigned count, uint32_t flags, void *data) 738 { 739 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) 740 return -EINVAL; 741 742 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, 743 count, flags, data); 744 } 745 746 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags, 747 unsigned index, unsigned start, unsigned count, 748 void *data) 749 { 750 int (*func)(struct vfio_pci_core_device *vdev, unsigned index, 751 unsigned start, unsigned count, uint32_t flags, 752 void *data) = NULL; 753 754 switch (index) { 755 case VFIO_PCI_INTX_IRQ_INDEX: 756 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 757 case VFIO_IRQ_SET_ACTION_MASK: 758 func = vfio_pci_set_intx_mask; 759 break; 760 case VFIO_IRQ_SET_ACTION_UNMASK: 761 func = vfio_pci_set_intx_unmask; 762 break; 763 case VFIO_IRQ_SET_ACTION_TRIGGER: 764 func = vfio_pci_set_intx_trigger; 765 break; 766 } 767 break; 768 case VFIO_PCI_MSI_IRQ_INDEX: 769 case VFIO_PCI_MSIX_IRQ_INDEX: 770 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 771 case VFIO_IRQ_SET_ACTION_MASK: 772 case VFIO_IRQ_SET_ACTION_UNMASK: 773 /* XXX Need masking support exported */ 774 break; 775 case VFIO_IRQ_SET_ACTION_TRIGGER: 776 func = vfio_pci_set_msi_trigger; 777 break; 778 } 779 break; 780 case VFIO_PCI_ERR_IRQ_INDEX: 781 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 782 case VFIO_IRQ_SET_ACTION_TRIGGER: 783 if (pci_is_pcie(vdev->pdev)) 784 func = vfio_pci_set_err_trigger; 785 break; 786 } 787 break; 788 case VFIO_PCI_REQ_IRQ_INDEX: 789 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 790 case VFIO_IRQ_SET_ACTION_TRIGGER: 791 func = vfio_pci_set_req_trigger; 792 break; 793 } 794 break; 795 } 796 797 if (!func) 798 return -ENOTTY; 799 800 return func(vdev, index, start, count, flags, data); 801 } 802