Lines Matching +full:irq +full:- +full:start
1 // SPDX-License-Identifier: GPL-2.0-only
37 return vdev->irq_type == type; in irq_is()
42 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX; in is_intx()
47 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX || in is_irq_none()
48 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX || in is_irq_none()
49 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX); in is_irq_none()
56 return xa_load(&vdev->ctx, index); in vfio_irq_ctx_get()
62 xa_erase(&vdev->ctx, index); in vfio_irq_ctx_free()
76 ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT); in vfio_irq_ctx_alloc()
92 if (likely(is_intx(vdev) && !vdev->virq_disabled)) { in vfio_send_intx_eventfd()
94 struct eventfd_ctx *trigger = READ_ONCE(ctx->trigger); in vfio_send_intx_eventfd()
104 struct pci_dev *pdev = vdev->pdev; in __vfio_pci_intx_mask()
109 lockdep_assert_held(&vdev->igate); in __vfio_pci_intx_mask()
111 spin_lock_irqsave(&vdev->irqlock, flags); in __vfio_pci_intx_mask()
120 if (vdev->pci_2_3) in __vfio_pci_intx_mask()
129 if (!ctx->masked) { in __vfio_pci_intx_mask()
134 if (vdev->pci_2_3) in __vfio_pci_intx_mask()
137 disable_irq_nosync(pdev->irq); in __vfio_pci_intx_mask()
139 ctx->masked = true; in __vfio_pci_intx_mask()
144 spin_unlock_irqrestore(&vdev->irqlock, flags); in __vfio_pci_intx_mask()
152 mutex_lock(&vdev->igate); in vfio_pci_intx_mask()
154 mutex_unlock(&vdev->igate); in vfio_pci_intx_mask()
168 struct pci_dev *pdev = vdev->pdev; in vfio_pci_intx_unmask_handler()
173 spin_lock_irqsave(&vdev->irqlock, flags); in vfio_pci_intx_unmask_handler()
180 if (vdev->pci_2_3) in vfio_pci_intx_unmask_handler()
185 if (ctx->masked && !vdev->virq_disabled) { in vfio_pci_intx_unmask_handler()
188 * but we can avoid that overhead by just re-sending in vfio_pci_intx_unmask_handler()
191 if (vdev->pci_2_3) { in vfio_pci_intx_unmask_handler()
195 enable_irq(pdev->irq); in vfio_pci_intx_unmask_handler()
197 ctx->masked = (ret > 0); in vfio_pci_intx_unmask_handler()
201 spin_unlock_irqrestore(&vdev->irqlock, flags); in vfio_pci_intx_unmask_handler()
210 lockdep_assert_held(&vdev->igate); in __vfio_pci_intx_unmask()
218 mutex_lock(&vdev->igate); in vfio_pci_intx_unmask()
220 mutex_unlock(&vdev->igate); in vfio_pci_intx_unmask()
223 static irqreturn_t vfio_intx_handler(int irq, void *dev_id) in vfio_intx_handler() argument
226 struct vfio_pci_core_device *vdev = ctx->vdev; in vfio_intx_handler()
230 spin_lock_irqsave(&vdev->irqlock, flags); in vfio_intx_handler()
232 if (!vdev->pci_2_3) { in vfio_intx_handler()
233 disable_irq_nosync(vdev->pdev->irq); in vfio_intx_handler()
234 ctx->masked = true; in vfio_intx_handler()
236 } else if (!ctx->masked && /* may be shared */ in vfio_intx_handler()
237 pci_check_and_mask_intx(vdev->pdev)) { in vfio_intx_handler()
238 ctx->masked = true; in vfio_intx_handler()
242 spin_unlock_irqrestore(&vdev->irqlock, flags); in vfio_intx_handler()
253 struct pci_dev *pdev = vdev->pdev; in vfio_intx_enable()
260 return -EINVAL; in vfio_intx_enable()
262 if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED) in vfio_intx_enable()
263 return -ENODEV; in vfio_intx_enable()
265 name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); in vfio_intx_enable()
267 return -ENOMEM; in vfio_intx_enable()
272 return -ENOMEM; in vfio_intx_enable()
275 ctx->name = name; in vfio_intx_enable()
276 ctx->trigger = trigger; in vfio_intx_enable()
277 ctx->vdev = vdev; in vfio_intx_enable()
286 * the physical DisINTx bit, which is not affected during IRQ setup. in vfio_intx_enable()
289 * IRQ masking is performed at the IRQ chip. Again, igate protects in vfio_intx_enable()
290 * against races during setup and IRQ handlers and irqfds are not in vfio_intx_enable()
292 * conditionally auto-enable the IRQ. in vfio_intx_enable()
294 * irq_type must be stable while the IRQ handler is registered, in vfio_intx_enable()
297 ctx->masked = vdev->virq_disabled; in vfio_intx_enable()
298 if (vdev->pci_2_3) { in vfio_intx_enable()
299 pci_intx(pdev, !ctx->masked); in vfio_intx_enable()
302 irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0; in vfio_intx_enable()
305 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; in vfio_intx_enable()
307 ret = request_irq(pdev->irq, vfio_intx_handler, in vfio_intx_enable()
308 irqflags, ctx->name, ctx); in vfio_intx_enable()
310 vdev->irq_type = VFIO_PCI_NUM_IRQS; in vfio_intx_enable()
322 struct pci_dev *pdev = vdev->pdev; in vfio_intx_set_signal()
328 return -EINVAL; in vfio_intx_set_signal()
330 old = ctx->trigger; in vfio_intx_set_signal()
332 WRITE_ONCE(ctx->trigger, trigger); in vfio_intx_set_signal()
334 /* Releasing an old ctx requires synchronizing in-flight users */ in vfio_intx_set_signal()
336 synchronize_irq(pdev->irq); in vfio_intx_set_signal()
337 vfio_virqfd_flush_thread(&ctx->unmask); in vfio_intx_set_signal()
346 struct pci_dev *pdev = vdev->pdev; in vfio_intx_disable()
352 vfio_virqfd_disable(&ctx->unmask); in vfio_intx_disable()
353 vfio_virqfd_disable(&ctx->mask); in vfio_intx_disable()
354 free_irq(pdev->irq, ctx); in vfio_intx_disable()
355 if (ctx->trigger) in vfio_intx_disable()
356 eventfd_ctx_put(ctx->trigger); in vfio_intx_disable()
357 kfree(ctx->name); in vfio_intx_disable()
360 vdev->irq_type = VFIO_PCI_NUM_IRQS; in vfio_intx_disable()
364 * MSI/MSI-X
366 static irqreturn_t vfio_msihandler(int irq, void *arg) in vfio_msihandler() argument
376 struct pci_dev *pdev = vdev->pdev; in vfio_msi_enable()
382 return -EINVAL; in vfio_msi_enable()
395 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : in vfio_msi_enable()
400 * Compute the virtual hardware field for max msi vectors - in vfio_msi_enable()
403 vdev->msi_qmax = fls(nvec * 2 - 1) - 1; in vfio_msi_enable()
410 * vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
411 * interrupt vector. If a Linux IRQ number is not available then a new
412 * interrupt is allocated if dynamic MSI-X is supported.
416 * Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
422 struct pci_dev *pdev = vdev->pdev; in vfio_msi_alloc_irq()
424 int irq; in vfio_msi_alloc_irq() local
427 irq = pci_irq_vector(pdev, vector); in vfio_msi_alloc_irq()
428 if (WARN_ON_ONCE(irq == 0)) in vfio_msi_alloc_irq()
429 return -EINVAL; in vfio_msi_alloc_irq()
430 if (irq > 0 || !msix || !vdev->has_dyn_msix) in vfio_msi_alloc_irq()
431 return irq; in vfio_msi_alloc_irq()
443 struct pci_dev *pdev = vdev->pdev; in vfio_msi_set_vector_signal()
446 int irq = -EINVAL, ret; in vfio_msi_set_vector_signal() local
452 irq_bypass_unregister_producer(&ctx->producer); in vfio_msi_set_vector_signal()
453 irq = pci_irq_vector(pdev, vector); in vfio_msi_set_vector_signal()
455 free_irq(irq, ctx->trigger); in vfio_msi_set_vector_signal()
457 /* Interrupt stays allocated, will be freed at MSI-X disable. */ in vfio_msi_set_vector_signal()
458 kfree(ctx->name); in vfio_msi_set_vector_signal()
459 eventfd_ctx_put(ctx->trigger); in vfio_msi_set_vector_signal()
466 if (irq == -EINVAL) { in vfio_msi_set_vector_signal()
467 /* Interrupt stays allocated, will be freed at MSI-X disable. */ in vfio_msi_set_vector_signal()
468 irq = vfio_msi_alloc_irq(vdev, vector, msix); in vfio_msi_set_vector_signal()
469 if (irq < 0) in vfio_msi_set_vector_signal()
470 return irq; in vfio_msi_set_vector_signal()
475 return -ENOMEM; in vfio_msi_set_vector_signal()
477 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)", in vfio_msi_set_vector_signal()
479 if (!ctx->name) { in vfio_msi_set_vector_signal()
480 ret = -ENOMEM; in vfio_msi_set_vector_signal()
491 * If the vector was previously allocated, refresh the on-device in vfio_msi_set_vector_signal()
499 get_cached_msi_msg(irq, &msg); in vfio_msi_set_vector_signal()
500 pci_write_msi_msg(irq, &msg); in vfio_msi_set_vector_signal()
503 ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger); in vfio_msi_set_vector_signal()
508 ret = irq_bypass_register_producer(&ctx->producer, trigger, irq); in vfio_msi_set_vector_signal()
510 dev_info(&pdev->dev, in vfio_msi_set_vector_signal()
511 "irq bypass producer (eventfd %p) registration fails: %d\n", in vfio_msi_set_vector_signal()
514 ctx->trigger = trigger; in vfio_msi_set_vector_signal()
521 kfree(ctx->name); in vfio_msi_set_vector_signal()
527 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start, in vfio_msi_set_block() argument
533 for (i = 0, j = start; i < count && !ret; i++, j++) { in vfio_msi_set_block()
534 int fd = fds ? fds[i] : -1; in vfio_msi_set_block()
539 for (i = start; i < j; i++) in vfio_msi_set_block()
540 vfio_msi_set_vector_signal(vdev, i, -1, msix); in vfio_msi_set_block()
548 struct pci_dev *pdev = vdev->pdev; in vfio_msi_disable()
553 xa_for_each(&vdev->ctx, i, ctx) { in vfio_msi_disable()
554 vfio_virqfd_disable(&ctx->unmask); in vfio_msi_disable()
555 vfio_virqfd_disable(&ctx->mask); in vfio_msi_disable()
556 vfio_msi_set_vector_signal(vdev, i, -1, msix); in vfio_msi_disable()
567 if (vdev->nointx) in vfio_msi_disable()
570 vdev->irq_type = VFIO_PCI_NUM_IRQS; in vfio_msi_disable()
577 unsigned index, unsigned start, in vfio_pci_set_intx_unmask() argument
580 if (!is_intx(vdev) || start != 0 || count != 1) in vfio_pci_set_intx_unmask()
581 return -EINVAL; in vfio_pci_set_intx_unmask()
594 return -EINVAL; in vfio_pci_set_intx_unmask()
599 &ctx->unmask, fd); in vfio_pci_set_intx_unmask()
601 vfio_virqfd_disable(&ctx->unmask); in vfio_pci_set_intx_unmask()
608 unsigned index, unsigned start, in vfio_pci_set_intx_mask() argument
611 if (!is_intx(vdev) || start != 0 || count != 1) in vfio_pci_set_intx_mask()
612 return -EINVAL; in vfio_pci_set_intx_mask()
621 return -ENOTTY; /* XXX implement me */ in vfio_pci_set_intx_mask()
628 unsigned index, unsigned start, in vfio_pci_set_intx_trigger() argument
636 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) in vfio_pci_set_intx_trigger()
637 return -EINVAL; in vfio_pci_set_intx_trigger()
662 return -EINVAL; in vfio_pci_set_intx_trigger()
675 unsigned index, unsigned start, in vfio_pci_set_msi_trigger() argument
688 return -EINVAL; in vfio_pci_set_msi_trigger()
694 if (vdev->irq_type == index) in vfio_pci_set_msi_trigger()
695 return vfio_msi_set_block(vdev, start, count, in vfio_pci_set_msi_trigger()
698 ret = vfio_msi_enable(vdev, start + count, msix); in vfio_pci_set_msi_trigger()
702 ret = vfio_msi_set_block(vdev, start, count, fds, msix); in vfio_pci_set_msi_trigger()
710 return -EINVAL; in vfio_pci_set_msi_trigger()
712 for (i = start; i < start + count; i++) { in vfio_pci_set_msi_trigger()
717 eventfd_signal(ctx->trigger); in vfio_pci_set_msi_trigger()
720 if (bools[i - start]) in vfio_pci_set_msi_trigger()
721 eventfd_signal(ctx->trigger); in vfio_pci_set_msi_trigger()
746 return -EINVAL; in vfio_pci_set_ctx_trigger_single()
757 return -EINVAL; in vfio_pci_set_ctx_trigger_single()
760 if (fd == -1) { in vfio_pci_set_ctx_trigger_single()
779 return -EINVAL; in vfio_pci_set_ctx_trigger_single()
783 unsigned index, unsigned start, in vfio_pci_set_err_trigger() argument
786 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) in vfio_pci_set_err_trigger()
787 return -EINVAL; in vfio_pci_set_err_trigger()
789 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, in vfio_pci_set_err_trigger()
794 unsigned index, unsigned start, in vfio_pci_set_req_trigger() argument
797 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) in vfio_pci_set_req_trigger()
798 return -EINVAL; in vfio_pci_set_req_trigger()
800 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, in vfio_pci_set_req_trigger()
805 unsigned index, unsigned start, unsigned count, in vfio_pci_set_irqs_ioctl() argument
809 unsigned start, unsigned count, uint32_t flags, in vfio_pci_set_irqs_ioctl()
841 if (pci_is_pcie(vdev->pdev)) in vfio_pci_set_irqs_ioctl()
856 return -ENOTTY; in vfio_pci_set_irqs_ioctl()
858 return func(vdev, index, start, count, flags, data); in vfio_pci_set_irqs_ioctl()