1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VFIO PCI interrupt handling 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * Derived from original vfio: 9 * Copyright 2010 Cisco Systems, Inc. All rights reserved. 10 * Author: Tom Lyon, pugs@cisco.com 11 */ 12 13 #include <linux/device.h> 14 #include <linux/interrupt.h> 15 #include <linux/eventfd.h> 16 #include <linux/msi.h> 17 #include <linux/pci.h> 18 #include <linux/file.h> 19 #include <linux/vfio.h> 20 #include <linux/wait.h> 21 #include <linux/slab.h> 22 23 #include "vfio_pci_priv.h" 24 25 struct vfio_pci_irq_ctx { 26 struct eventfd_ctx *trigger; 27 struct virqfd *unmask; 28 struct virqfd *mask; 29 char *name; 30 bool masked; 31 struct irq_bypass_producer producer; 32 }; 33 34 static bool irq_is(struct vfio_pci_core_device *vdev, int type) 35 { 36 return vdev->irq_type == type; 37 } 38 39 static bool is_intx(struct vfio_pci_core_device *vdev) 40 { 41 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX; 42 } 43 44 static bool is_irq_none(struct vfio_pci_core_device *vdev) 45 { 46 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX || 47 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX || 48 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX); 49 } 50 51 static 52 struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev, 53 unsigned long index) 54 { 55 if (index >= vdev->num_ctx) 56 return NULL; 57 return &vdev->ctx[index]; 58 } 59 60 static void vfio_irq_ctx_free_all(struct vfio_pci_core_device *vdev) 61 { 62 kfree(vdev->ctx); 63 } 64 65 static int vfio_irq_ctx_alloc_num(struct vfio_pci_core_device *vdev, 66 unsigned long num) 67 { 68 vdev->ctx = kcalloc(num, sizeof(struct vfio_pci_irq_ctx), 69 GFP_KERNEL_ACCOUNT); 70 if (!vdev->ctx) 71 return -ENOMEM; 72 73 return 0; 74 } 75 76 /* 77 * INTx 78 */ 79 static void vfio_send_intx_eventfd(void *opaque, void *unused) 80 { 81 struct vfio_pci_core_device *vdev = opaque; 82 83 if (likely(is_intx(vdev) && !vdev->virq_disabled)) { 84 struct vfio_pci_irq_ctx *ctx; 85 86 ctx = vfio_irq_ctx_get(vdev, 0); 87 if (WARN_ON_ONCE(!ctx)) 88 return; 89 eventfd_signal(ctx->trigger, 1); 90 } 91 } 92 93 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */ 94 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) 95 { 96 struct pci_dev *pdev = vdev->pdev; 97 struct vfio_pci_irq_ctx *ctx; 98 unsigned long flags; 99 bool masked_changed = false; 100 101 spin_lock_irqsave(&vdev->irqlock, flags); 102 103 /* 104 * Masking can come from interrupt, ioctl, or config space 105 * via INTx disable. The latter means this can get called 106 * even when not using intx delivery. In this case, just 107 * try to have the physical bit follow the virtual bit. 108 */ 109 if (unlikely(!is_intx(vdev))) { 110 if (vdev->pci_2_3) 111 pci_intx(pdev, 0); 112 goto out_unlock; 113 } 114 115 ctx = vfio_irq_ctx_get(vdev, 0); 116 if (WARN_ON_ONCE(!ctx)) 117 goto out_unlock; 118 119 if (!ctx->masked) { 120 /* 121 * Can't use check_and_mask here because we always want to 122 * mask, not just when something is pending. 123 */ 124 if (vdev->pci_2_3) 125 pci_intx(pdev, 0); 126 else 127 disable_irq_nosync(pdev->irq); 128 129 ctx->masked = true; 130 masked_changed = true; 131 } 132 133 out_unlock: 134 spin_unlock_irqrestore(&vdev->irqlock, flags); 135 return masked_changed; 136 } 137 138 /* 139 * If this is triggered by an eventfd, we can't call eventfd_signal 140 * or else we'll deadlock on the eventfd wait queue. Return >0 when 141 * a signal is necessary, which can then be handled via a work queue 142 * or directly depending on the caller. 143 */ 144 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) 145 { 146 struct vfio_pci_core_device *vdev = opaque; 147 struct pci_dev *pdev = vdev->pdev; 148 struct vfio_pci_irq_ctx *ctx; 149 unsigned long flags; 150 int ret = 0; 151 152 spin_lock_irqsave(&vdev->irqlock, flags); 153 154 /* 155 * Unmasking comes from ioctl or config, so again, have the 156 * physical bit follow the virtual even when not using INTx. 157 */ 158 if (unlikely(!is_intx(vdev))) { 159 if (vdev->pci_2_3) 160 pci_intx(pdev, 1); 161 goto out_unlock; 162 } 163 164 ctx = vfio_irq_ctx_get(vdev, 0); 165 if (WARN_ON_ONCE(!ctx)) 166 goto out_unlock; 167 168 if (ctx->masked && !vdev->virq_disabled) { 169 /* 170 * A pending interrupt here would immediately trigger, 171 * but we can avoid that overhead by just re-sending 172 * the interrupt to the user. 173 */ 174 if (vdev->pci_2_3) { 175 if (!pci_check_and_unmask_intx(pdev)) 176 ret = 1; 177 } else 178 enable_irq(pdev->irq); 179 180 ctx->masked = (ret > 0); 181 } 182 183 out_unlock: 184 spin_unlock_irqrestore(&vdev->irqlock, flags); 185 186 return ret; 187 } 188 189 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) 190 { 191 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) 192 vfio_send_intx_eventfd(vdev, NULL); 193 } 194 195 static irqreturn_t vfio_intx_handler(int irq, void *dev_id) 196 { 197 struct vfio_pci_core_device *vdev = dev_id; 198 struct vfio_pci_irq_ctx *ctx; 199 unsigned long flags; 200 int ret = IRQ_NONE; 201 202 ctx = vfio_irq_ctx_get(vdev, 0); 203 if (WARN_ON_ONCE(!ctx)) 204 return ret; 205 206 spin_lock_irqsave(&vdev->irqlock, flags); 207 208 if (!vdev->pci_2_3) { 209 disable_irq_nosync(vdev->pdev->irq); 210 ctx->masked = true; 211 ret = IRQ_HANDLED; 212 } else if (!ctx->masked && /* may be shared */ 213 pci_check_and_mask_intx(vdev->pdev)) { 214 ctx->masked = true; 215 ret = IRQ_HANDLED; 216 } 217 218 spin_unlock_irqrestore(&vdev->irqlock, flags); 219 220 if (ret == IRQ_HANDLED) 221 vfio_send_intx_eventfd(vdev, NULL); 222 223 return ret; 224 } 225 226 static int vfio_intx_enable(struct vfio_pci_core_device *vdev) 227 { 228 struct vfio_pci_irq_ctx *ctx; 229 int ret; 230 231 if (!is_irq_none(vdev)) 232 return -EINVAL; 233 234 if (!vdev->pdev->irq) 235 return -ENODEV; 236 237 ret = vfio_irq_ctx_alloc_num(vdev, 1); 238 if (ret) 239 return ret; 240 241 ctx = vfio_irq_ctx_get(vdev, 0); 242 if (!ctx) { 243 vfio_irq_ctx_free_all(vdev); 244 return -EINVAL; 245 } 246 247 vdev->num_ctx = 1; 248 249 /* 250 * If the virtual interrupt is masked, restore it. Devices 251 * supporting DisINTx can be masked at the hardware level 252 * here, non-PCI-2.3 devices will have to wait until the 253 * interrupt is enabled. 254 */ 255 ctx->masked = vdev->virq_disabled; 256 if (vdev->pci_2_3) 257 pci_intx(vdev->pdev, !ctx->masked); 258 259 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; 260 261 return 0; 262 } 263 264 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) 265 { 266 struct pci_dev *pdev = vdev->pdev; 267 unsigned long irqflags = IRQF_SHARED; 268 struct vfio_pci_irq_ctx *ctx; 269 struct eventfd_ctx *trigger; 270 unsigned long flags; 271 int ret; 272 273 ctx = vfio_irq_ctx_get(vdev, 0); 274 if (WARN_ON_ONCE(!ctx)) 275 return -EINVAL; 276 277 if (ctx->trigger) { 278 free_irq(pdev->irq, vdev); 279 kfree(ctx->name); 280 eventfd_ctx_put(ctx->trigger); 281 ctx->trigger = NULL; 282 } 283 284 if (fd < 0) /* Disable only */ 285 return 0; 286 287 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", 288 pci_name(pdev)); 289 if (!ctx->name) 290 return -ENOMEM; 291 292 trigger = eventfd_ctx_fdget(fd); 293 if (IS_ERR(trigger)) { 294 kfree(ctx->name); 295 return PTR_ERR(trigger); 296 } 297 298 ctx->trigger = trigger; 299 300 if (!vdev->pci_2_3) 301 irqflags = 0; 302 303 ret = request_irq(pdev->irq, vfio_intx_handler, 304 irqflags, ctx->name, vdev); 305 if (ret) { 306 ctx->trigger = NULL; 307 kfree(ctx->name); 308 eventfd_ctx_put(trigger); 309 return ret; 310 } 311 312 /* 313 * INTx disable will stick across the new irq setup, 314 * disable_irq won't. 315 */ 316 spin_lock_irqsave(&vdev->irqlock, flags); 317 if (!vdev->pci_2_3 && ctx->masked) 318 disable_irq_nosync(pdev->irq); 319 spin_unlock_irqrestore(&vdev->irqlock, flags); 320 321 return 0; 322 } 323 324 static void vfio_intx_disable(struct vfio_pci_core_device *vdev) 325 { 326 struct vfio_pci_irq_ctx *ctx; 327 328 ctx = vfio_irq_ctx_get(vdev, 0); 329 WARN_ON_ONCE(!ctx); 330 if (ctx) { 331 vfio_virqfd_disable(&ctx->unmask); 332 vfio_virqfd_disable(&ctx->mask); 333 } 334 vfio_intx_set_signal(vdev, -1); 335 vdev->irq_type = VFIO_PCI_NUM_IRQS; 336 vdev->num_ctx = 0; 337 vfio_irq_ctx_free_all(vdev); 338 } 339 340 /* 341 * MSI/MSI-X 342 */ 343 static irqreturn_t vfio_msihandler(int irq, void *arg) 344 { 345 struct eventfd_ctx *trigger = arg; 346 347 eventfd_signal(trigger, 1); 348 return IRQ_HANDLED; 349 } 350 351 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix) 352 { 353 struct pci_dev *pdev = vdev->pdev; 354 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; 355 int ret; 356 u16 cmd; 357 358 if (!is_irq_none(vdev)) 359 return -EINVAL; 360 361 ret = vfio_irq_ctx_alloc_num(vdev, nvec); 362 if (ret) 363 return ret; 364 365 /* return the number of supported vectors if we can't get all: */ 366 cmd = vfio_pci_memory_lock_and_enable(vdev); 367 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); 368 if (ret < nvec) { 369 if (ret > 0) 370 pci_free_irq_vectors(pdev); 371 vfio_pci_memory_unlock_and_restore(vdev, cmd); 372 vfio_irq_ctx_free_all(vdev); 373 return ret; 374 } 375 vfio_pci_memory_unlock_and_restore(vdev, cmd); 376 377 vdev->num_ctx = nvec; 378 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : 379 VFIO_PCI_MSI_IRQ_INDEX; 380 381 if (!msix) { 382 /* 383 * Compute the virtual hardware field for max msi vectors - 384 * it is the log base 2 of the number of vectors. 385 */ 386 vdev->msi_qmax = fls(nvec * 2 - 1) - 1; 387 } 388 389 return 0; 390 } 391 392 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev, 393 unsigned int vector, int fd, bool msix) 394 { 395 struct pci_dev *pdev = vdev->pdev; 396 struct vfio_pci_irq_ctx *ctx; 397 struct eventfd_ctx *trigger; 398 int irq, ret; 399 u16 cmd; 400 401 if (vector >= vdev->num_ctx) 402 return -EINVAL; 403 404 ctx = vfio_irq_ctx_get(vdev, vector); 405 if (!ctx) 406 return -EINVAL; 407 irq = pci_irq_vector(pdev, vector); 408 409 if (ctx->trigger) { 410 irq_bypass_unregister_producer(&ctx->producer); 411 412 cmd = vfio_pci_memory_lock_and_enable(vdev); 413 free_irq(irq, ctx->trigger); 414 vfio_pci_memory_unlock_and_restore(vdev, cmd); 415 kfree(ctx->name); 416 eventfd_ctx_put(ctx->trigger); 417 ctx->trigger = NULL; 418 } 419 420 if (fd < 0) 421 return 0; 422 423 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)", 424 msix ? "x" : "", vector, pci_name(pdev)); 425 if (!ctx->name) 426 return -ENOMEM; 427 428 trigger = eventfd_ctx_fdget(fd); 429 if (IS_ERR(trigger)) { 430 kfree(ctx->name); 431 return PTR_ERR(trigger); 432 } 433 434 /* 435 * The MSIx vector table resides in device memory which may be cleared 436 * via backdoor resets. We don't allow direct access to the vector 437 * table so even if a userspace driver attempts to save/restore around 438 * such a reset it would be unsuccessful. To avoid this, restore the 439 * cached value of the message prior to enabling. 440 */ 441 cmd = vfio_pci_memory_lock_and_enable(vdev); 442 if (msix) { 443 struct msi_msg msg; 444 445 get_cached_msi_msg(irq, &msg); 446 pci_write_msi_msg(irq, &msg); 447 } 448 449 ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger); 450 vfio_pci_memory_unlock_and_restore(vdev, cmd); 451 if (ret) { 452 kfree(ctx->name); 453 eventfd_ctx_put(trigger); 454 return ret; 455 } 456 457 ctx->producer.token = trigger; 458 ctx->producer.irq = irq; 459 ret = irq_bypass_register_producer(&ctx->producer); 460 if (unlikely(ret)) { 461 dev_info(&pdev->dev, 462 "irq bypass producer (token %p) registration fails: %d\n", 463 ctx->producer.token, ret); 464 465 ctx->producer.token = NULL; 466 } 467 ctx->trigger = trigger; 468 469 return 0; 470 } 471 472 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start, 473 unsigned count, int32_t *fds, bool msix) 474 { 475 unsigned int i, j; 476 int ret = 0; 477 478 if (start >= vdev->num_ctx || start + count > vdev->num_ctx) 479 return -EINVAL; 480 481 for (i = 0, j = start; i < count && !ret; i++, j++) { 482 int fd = fds ? fds[i] : -1; 483 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); 484 } 485 486 if (ret) { 487 for (i = start; i < j; i++) 488 vfio_msi_set_vector_signal(vdev, i, -1, msix); 489 } 490 491 return ret; 492 } 493 494 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix) 495 { 496 struct pci_dev *pdev = vdev->pdev; 497 struct vfio_pci_irq_ctx *ctx; 498 unsigned int i; 499 u16 cmd; 500 501 for (i = 0; i < vdev->num_ctx; i++) { 502 ctx = vfio_irq_ctx_get(vdev, i); 503 if (ctx) { 504 vfio_virqfd_disable(&ctx->unmask); 505 vfio_virqfd_disable(&ctx->mask); 506 vfio_msi_set_vector_signal(vdev, i, -1, msix); 507 } 508 } 509 510 cmd = vfio_pci_memory_lock_and_enable(vdev); 511 pci_free_irq_vectors(pdev); 512 vfio_pci_memory_unlock_and_restore(vdev, cmd); 513 514 /* 515 * Both disable paths above use pci_intx_for_msi() to clear DisINTx 516 * via their shutdown paths. Restore for NoINTx devices. 517 */ 518 if (vdev->nointx) 519 pci_intx(pdev, 0); 520 521 vdev->irq_type = VFIO_PCI_NUM_IRQS; 522 vdev->num_ctx = 0; 523 vfio_irq_ctx_free_all(vdev); 524 } 525 526 /* 527 * IOCTL support 528 */ 529 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev, 530 unsigned index, unsigned start, 531 unsigned count, uint32_t flags, void *data) 532 { 533 if (!is_intx(vdev) || start != 0 || count != 1) 534 return -EINVAL; 535 536 if (flags & VFIO_IRQ_SET_DATA_NONE) { 537 vfio_pci_intx_unmask(vdev); 538 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 539 uint8_t unmask = *(uint8_t *)data; 540 if (unmask) 541 vfio_pci_intx_unmask(vdev); 542 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 543 struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0); 544 int32_t fd = *(int32_t *)data; 545 546 if (WARN_ON_ONCE(!ctx)) 547 return -EINVAL; 548 if (fd >= 0) 549 return vfio_virqfd_enable((void *) vdev, 550 vfio_pci_intx_unmask_handler, 551 vfio_send_intx_eventfd, NULL, 552 &ctx->unmask, fd); 553 554 vfio_virqfd_disable(&ctx->unmask); 555 } 556 557 return 0; 558 } 559 560 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev, 561 unsigned index, unsigned start, 562 unsigned count, uint32_t flags, void *data) 563 { 564 if (!is_intx(vdev) || start != 0 || count != 1) 565 return -EINVAL; 566 567 if (flags & VFIO_IRQ_SET_DATA_NONE) { 568 vfio_pci_intx_mask(vdev); 569 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 570 uint8_t mask = *(uint8_t *)data; 571 if (mask) 572 vfio_pci_intx_mask(vdev); 573 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 574 return -ENOTTY; /* XXX implement me */ 575 } 576 577 return 0; 578 } 579 580 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev, 581 unsigned index, unsigned start, 582 unsigned count, uint32_t flags, void *data) 583 { 584 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { 585 vfio_intx_disable(vdev); 586 return 0; 587 } 588 589 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) 590 return -EINVAL; 591 592 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 593 int32_t fd = *(int32_t *)data; 594 int ret; 595 596 if (is_intx(vdev)) 597 return vfio_intx_set_signal(vdev, fd); 598 599 ret = vfio_intx_enable(vdev); 600 if (ret) 601 return ret; 602 603 ret = vfio_intx_set_signal(vdev, fd); 604 if (ret) 605 vfio_intx_disable(vdev); 606 607 return ret; 608 } 609 610 if (!is_intx(vdev)) 611 return -EINVAL; 612 613 if (flags & VFIO_IRQ_SET_DATA_NONE) { 614 vfio_send_intx_eventfd(vdev, NULL); 615 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 616 uint8_t trigger = *(uint8_t *)data; 617 if (trigger) 618 vfio_send_intx_eventfd(vdev, NULL); 619 } 620 return 0; 621 } 622 623 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev, 624 unsigned index, unsigned start, 625 unsigned count, uint32_t flags, void *data) 626 { 627 struct vfio_pci_irq_ctx *ctx; 628 unsigned int i; 629 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; 630 631 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { 632 vfio_msi_disable(vdev, msix); 633 return 0; 634 } 635 636 if (!(irq_is(vdev, index) || is_irq_none(vdev))) 637 return -EINVAL; 638 639 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 640 int32_t *fds = data; 641 int ret; 642 643 if (vdev->irq_type == index) 644 return vfio_msi_set_block(vdev, start, count, 645 fds, msix); 646 647 ret = vfio_msi_enable(vdev, start + count, msix); 648 if (ret) 649 return ret; 650 651 ret = vfio_msi_set_block(vdev, start, count, fds, msix); 652 if (ret) 653 vfio_msi_disable(vdev, msix); 654 655 return ret; 656 } 657 658 if (!irq_is(vdev, index) || start + count > vdev->num_ctx) 659 return -EINVAL; 660 661 for (i = start; i < start + count; i++) { 662 ctx = vfio_irq_ctx_get(vdev, i); 663 if (!ctx || !ctx->trigger) 664 continue; 665 if (flags & VFIO_IRQ_SET_DATA_NONE) { 666 eventfd_signal(ctx->trigger, 1); 667 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 668 uint8_t *bools = data; 669 if (bools[i - start]) 670 eventfd_signal(ctx->trigger, 1); 671 } 672 } 673 return 0; 674 } 675 676 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, 677 unsigned int count, uint32_t flags, 678 void *data) 679 { 680 /* DATA_NONE/DATA_BOOL enables loopback testing */ 681 if (flags & VFIO_IRQ_SET_DATA_NONE) { 682 if (*ctx) { 683 if (count) { 684 eventfd_signal(*ctx, 1); 685 } else { 686 eventfd_ctx_put(*ctx); 687 *ctx = NULL; 688 } 689 return 0; 690 } 691 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 692 uint8_t trigger; 693 694 if (!count) 695 return -EINVAL; 696 697 trigger = *(uint8_t *)data; 698 if (trigger && *ctx) 699 eventfd_signal(*ctx, 1); 700 701 return 0; 702 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 703 int32_t fd; 704 705 if (!count) 706 return -EINVAL; 707 708 fd = *(int32_t *)data; 709 if (fd == -1) { 710 if (*ctx) 711 eventfd_ctx_put(*ctx); 712 *ctx = NULL; 713 } else if (fd >= 0) { 714 struct eventfd_ctx *efdctx; 715 716 efdctx = eventfd_ctx_fdget(fd); 717 if (IS_ERR(efdctx)) 718 return PTR_ERR(efdctx); 719 720 if (*ctx) 721 eventfd_ctx_put(*ctx); 722 723 *ctx = efdctx; 724 } 725 return 0; 726 } 727 728 return -EINVAL; 729 } 730 731 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev, 732 unsigned index, unsigned start, 733 unsigned count, uint32_t flags, void *data) 734 { 735 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) 736 return -EINVAL; 737 738 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, 739 count, flags, data); 740 } 741 742 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev, 743 unsigned index, unsigned start, 744 unsigned count, uint32_t flags, void *data) 745 { 746 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) 747 return -EINVAL; 748 749 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, 750 count, flags, data); 751 } 752 753 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags, 754 unsigned index, unsigned start, unsigned count, 755 void *data) 756 { 757 int (*func)(struct vfio_pci_core_device *vdev, unsigned index, 758 unsigned start, unsigned count, uint32_t flags, 759 void *data) = NULL; 760 761 switch (index) { 762 case VFIO_PCI_INTX_IRQ_INDEX: 763 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 764 case VFIO_IRQ_SET_ACTION_MASK: 765 func = vfio_pci_set_intx_mask; 766 break; 767 case VFIO_IRQ_SET_ACTION_UNMASK: 768 func = vfio_pci_set_intx_unmask; 769 break; 770 case VFIO_IRQ_SET_ACTION_TRIGGER: 771 func = vfio_pci_set_intx_trigger; 772 break; 773 } 774 break; 775 case VFIO_PCI_MSI_IRQ_INDEX: 776 case VFIO_PCI_MSIX_IRQ_INDEX: 777 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 778 case VFIO_IRQ_SET_ACTION_MASK: 779 case VFIO_IRQ_SET_ACTION_UNMASK: 780 /* XXX Need masking support exported */ 781 break; 782 case VFIO_IRQ_SET_ACTION_TRIGGER: 783 func = vfio_pci_set_msi_trigger; 784 break; 785 } 786 break; 787 case VFIO_PCI_ERR_IRQ_INDEX: 788 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 789 case VFIO_IRQ_SET_ACTION_TRIGGER: 790 if (pci_is_pcie(vdev->pdev)) 791 func = vfio_pci_set_err_trigger; 792 break; 793 } 794 break; 795 case VFIO_PCI_REQ_IRQ_INDEX: 796 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 797 case VFIO_IRQ_SET_ACTION_TRIGGER: 798 func = vfio_pci_set_req_trigger; 799 break; 800 } 801 break; 802 } 803 804 if (!func) 805 return -ENOTTY; 806 807 return func(vdev, index, start, count, flags, data); 808 } 809