xref: /linux/drivers/vfio/pci/vfio_pci_intrs.c (revision 9429ec96c2718c0d1e3317cf60a87a0405223814)
1 /*
2  * VFIO PCI interrupt handling
3  *
4  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
5  *     Author: Alex Williamson <alex.williamson@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio:
12  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
13  * Author: Tom Lyon, pugs@cisco.com
14  */
15 
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/pci.h>
20 #include <linux/file.h>
21 #include <linux/poll.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/workqueue.h>
25 
26 #include "vfio_pci_private.h"
27 
28 /*
29  * IRQfd - generic
30  */
31 struct virqfd {
32 	struct vfio_pci_device	*vdev;
33 	struct eventfd_ctx	*eventfd;
34 	int			(*handler)(struct vfio_pci_device *, void *);
35 	void			(*thread)(struct vfio_pci_device *, void *);
36 	void			*data;
37 	struct work_struct	inject;
38 	wait_queue_t		wait;
39 	poll_table		pt;
40 	struct work_struct	shutdown;
41 	struct virqfd		**pvirqfd;
42 };
43 
44 static struct workqueue_struct *vfio_irqfd_cleanup_wq;
45 
46 int __init vfio_pci_virqfd_init(void)
47 {
48 	vfio_irqfd_cleanup_wq =
49 		create_singlethread_workqueue("vfio-irqfd-cleanup");
50 	if (!vfio_irqfd_cleanup_wq)
51 		return -ENOMEM;
52 
53 	return 0;
54 }
55 
56 void vfio_pci_virqfd_exit(void)
57 {
58 	destroy_workqueue(vfio_irqfd_cleanup_wq);
59 }
60 
61 static void virqfd_deactivate(struct virqfd *virqfd)
62 {
63 	queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
64 }
65 
66 static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
67 {
68 	struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
69 	unsigned long flags = (unsigned long)key;
70 
71 	if (flags & POLLIN) {
72 		/* An event has been signaled, call function */
73 		if ((!virqfd->handler ||
74 		     virqfd->handler(virqfd->vdev, virqfd->data)) &&
75 		    virqfd->thread)
76 			schedule_work(&virqfd->inject);
77 	}
78 
79 	if (flags & POLLHUP) {
80 		unsigned long flags;
81 		spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
82 
83 		/*
84 		 * The eventfd is closing, if the virqfd has not yet been
85 		 * queued for release, as determined by testing whether the
86 		 * vdev pointer to it is still valid, queue it now.  As
87 		 * with kvm irqfds, we know we won't race against the virqfd
88 		 * going away because we hold wqh->lock to get here.
89 		 */
90 		if (*(virqfd->pvirqfd) == virqfd) {
91 			*(virqfd->pvirqfd) = NULL;
92 			virqfd_deactivate(virqfd);
93 		}
94 
95 		spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
96 	}
97 
98 	return 0;
99 }
100 
101 static void virqfd_ptable_queue_proc(struct file *file,
102 				     wait_queue_head_t *wqh, poll_table *pt)
103 {
104 	struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
105 	add_wait_queue(wqh, &virqfd->wait);
106 }
107 
108 static void virqfd_shutdown(struct work_struct *work)
109 {
110 	struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
111 	u64 cnt;
112 
113 	eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
114 	flush_work(&virqfd->inject);
115 	eventfd_ctx_put(virqfd->eventfd);
116 
117 	kfree(virqfd);
118 }
119 
120 static void virqfd_inject(struct work_struct *work)
121 {
122 	struct virqfd *virqfd = container_of(work, struct virqfd, inject);
123 	if (virqfd->thread)
124 		virqfd->thread(virqfd->vdev, virqfd->data);
125 }
126 
127 static int virqfd_enable(struct vfio_pci_device *vdev,
128 			 int (*handler)(struct vfio_pci_device *, void *),
129 			 void (*thread)(struct vfio_pci_device *, void *),
130 			 void *data, struct virqfd **pvirqfd, int fd)
131 {
132 	struct file *file = NULL;
133 	struct eventfd_ctx *ctx = NULL;
134 	struct virqfd *virqfd;
135 	int ret = 0;
136 	unsigned int events;
137 
138 	virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
139 	if (!virqfd)
140 		return -ENOMEM;
141 
142 	virqfd->pvirqfd = pvirqfd;
143 	virqfd->vdev = vdev;
144 	virqfd->handler = handler;
145 	virqfd->thread = thread;
146 	virqfd->data = data;
147 
148 	INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
149 	INIT_WORK(&virqfd->inject, virqfd_inject);
150 
151 	file = eventfd_fget(fd);
152 	if (IS_ERR(file)) {
153 		ret = PTR_ERR(file);
154 		goto fail;
155 	}
156 
157 	ctx = eventfd_ctx_fileget(file);
158 	if (IS_ERR(ctx)) {
159 		ret = PTR_ERR(ctx);
160 		goto fail;
161 	}
162 
163 	virqfd->eventfd = ctx;
164 
165 	/*
166 	 * virqfds can be released by closing the eventfd or directly
167 	 * through ioctl.  These are both done through a workqueue, so
168 	 * we update the pointer to the virqfd under lock to avoid
169 	 * pushing multiple jobs to release the same virqfd.
170 	 */
171 	spin_lock_irq(&vdev->irqlock);
172 
173 	if (*pvirqfd) {
174 		spin_unlock_irq(&vdev->irqlock);
175 		ret = -EBUSY;
176 		goto fail;
177 	}
178 	*pvirqfd = virqfd;
179 
180 	spin_unlock_irq(&vdev->irqlock);
181 
182 	/*
183 	 * Install our own custom wake-up handling so we are notified via
184 	 * a callback whenever someone signals the underlying eventfd.
185 	 */
186 	init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
187 	init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
188 
189 	events = file->f_op->poll(file, &virqfd->pt);
190 
191 	/*
192 	 * Check if there was an event already pending on the eventfd
193 	 * before we registered and trigger it as if we didn't miss it.
194 	 */
195 	if (events & POLLIN) {
196 		if ((!handler || handler(vdev, data)) && thread)
197 			schedule_work(&virqfd->inject);
198 	}
199 
200 	/*
201 	 * Do not drop the file until the irqfd is fully initialized,
202 	 * otherwise we might race against the POLLHUP.
203 	 */
204 	fput(file);
205 
206 	return 0;
207 
208 fail:
209 	if (ctx && !IS_ERR(ctx))
210 		eventfd_ctx_put(ctx);
211 
212 	if (file && !IS_ERR(file))
213 		fput(file);
214 
215 	kfree(virqfd);
216 
217 	return ret;
218 }
219 
220 static void virqfd_disable(struct vfio_pci_device *vdev,
221 			   struct virqfd **pvirqfd)
222 {
223 	unsigned long flags;
224 
225 	spin_lock_irqsave(&vdev->irqlock, flags);
226 
227 	if (*pvirqfd) {
228 		virqfd_deactivate(*pvirqfd);
229 		*pvirqfd = NULL;
230 	}
231 
232 	spin_unlock_irqrestore(&vdev->irqlock, flags);
233 
234 	/*
235 	 * Block until we know all outstanding shutdown jobs have completed.
236 	 * Even if we don't queue the job, flush the wq to be sure it's
237 	 * been released.
238 	 */
239 	flush_workqueue(vfio_irqfd_cleanup_wq);
240 }
241 
242 /*
243  * INTx
244  */
245 static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused)
246 {
247 	if (likely(is_intx(vdev) && !vdev->virq_disabled))
248 		eventfd_signal(vdev->ctx[0].trigger, 1);
249 }
250 
251 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
252 {
253 	struct pci_dev *pdev = vdev->pdev;
254 	unsigned long flags;
255 
256 	spin_lock_irqsave(&vdev->irqlock, flags);
257 
258 	/*
259 	 * Masking can come from interrupt, ioctl, or config space
260 	 * via INTx disable.  The latter means this can get called
261 	 * even when not using intx delivery.  In this case, just
262 	 * try to have the physical bit follow the virtual bit.
263 	 */
264 	if (unlikely(!is_intx(vdev))) {
265 		if (vdev->pci_2_3)
266 			pci_intx(pdev, 0);
267 	} else if (!vdev->ctx[0].masked) {
268 		/*
269 		 * Can't use check_and_mask here because we always want to
270 		 * mask, not just when something is pending.
271 		 */
272 		if (vdev->pci_2_3)
273 			pci_intx(pdev, 0);
274 		else
275 			disable_irq_nosync(pdev->irq);
276 
277 		vdev->ctx[0].masked = true;
278 	}
279 
280 	spin_unlock_irqrestore(&vdev->irqlock, flags);
281 }
282 
283 /*
284  * If this is triggered by an eventfd, we can't call eventfd_signal
285  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
286  * a signal is necessary, which can then be handled via a work queue
287  * or directly depending on the caller.
288  */
289 int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, void *unused)
290 {
291 	struct pci_dev *pdev = vdev->pdev;
292 	unsigned long flags;
293 	int ret = 0;
294 
295 	spin_lock_irqsave(&vdev->irqlock, flags);
296 
297 	/*
298 	 * Unmasking comes from ioctl or config, so again, have the
299 	 * physical bit follow the virtual even when not using INTx.
300 	 */
301 	if (unlikely(!is_intx(vdev))) {
302 		if (vdev->pci_2_3)
303 			pci_intx(pdev, 1);
304 	} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
305 		/*
306 		 * A pending interrupt here would immediately trigger,
307 		 * but we can avoid that overhead by just re-sending
308 		 * the interrupt to the user.
309 		 */
310 		if (vdev->pci_2_3) {
311 			if (!pci_check_and_unmask_intx(pdev))
312 				ret = 1;
313 		} else
314 			enable_irq(pdev->irq);
315 
316 		vdev->ctx[0].masked = (ret > 0);
317 	}
318 
319 	spin_unlock_irqrestore(&vdev->irqlock, flags);
320 
321 	return ret;
322 }
323 
324 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
325 {
326 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
327 		vfio_send_intx_eventfd(vdev, NULL);
328 }
329 
330 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
331 {
332 	struct vfio_pci_device *vdev = dev_id;
333 	unsigned long flags;
334 	int ret = IRQ_NONE;
335 
336 	spin_lock_irqsave(&vdev->irqlock, flags);
337 
338 	if (!vdev->pci_2_3) {
339 		disable_irq_nosync(vdev->pdev->irq);
340 		vdev->ctx[0].masked = true;
341 		ret = IRQ_HANDLED;
342 	} else if (!vdev->ctx[0].masked &&  /* may be shared */
343 		   pci_check_and_mask_intx(vdev->pdev)) {
344 		vdev->ctx[0].masked = true;
345 		ret = IRQ_HANDLED;
346 	}
347 
348 	spin_unlock_irqrestore(&vdev->irqlock, flags);
349 
350 	if (ret == IRQ_HANDLED)
351 		vfio_send_intx_eventfd(vdev, NULL);
352 
353 	return ret;
354 }
355 
356 static int vfio_intx_enable(struct vfio_pci_device *vdev)
357 {
358 	if (!is_irq_none(vdev))
359 		return -EINVAL;
360 
361 	if (!vdev->pdev->irq)
362 		return -ENODEV;
363 
364 	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
365 	if (!vdev->ctx)
366 		return -ENOMEM;
367 
368 	vdev->num_ctx = 1;
369 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
370 
371 	return 0;
372 }
373 
374 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
375 {
376 	struct pci_dev *pdev = vdev->pdev;
377 	unsigned long irqflags = IRQF_SHARED;
378 	struct eventfd_ctx *trigger;
379 	unsigned long flags;
380 	int ret;
381 
382 	if (vdev->ctx[0].trigger) {
383 		free_irq(pdev->irq, vdev);
384 		kfree(vdev->ctx[0].name);
385 		eventfd_ctx_put(vdev->ctx[0].trigger);
386 		vdev->ctx[0].trigger = NULL;
387 	}
388 
389 	if (fd < 0) /* Disable only */
390 		return 0;
391 
392 	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
393 				      pci_name(pdev));
394 	if (!vdev->ctx[0].name)
395 		return -ENOMEM;
396 
397 	trigger = eventfd_ctx_fdget(fd);
398 	if (IS_ERR(trigger)) {
399 		kfree(vdev->ctx[0].name);
400 		return PTR_ERR(trigger);
401 	}
402 
403 	if (!vdev->pci_2_3)
404 		irqflags = 0;
405 
406 	ret = request_irq(pdev->irq, vfio_intx_handler,
407 			  irqflags, vdev->ctx[0].name, vdev);
408 	if (ret) {
409 		kfree(vdev->ctx[0].name);
410 		eventfd_ctx_put(trigger);
411 		return ret;
412 	}
413 
414 	vdev->ctx[0].trigger = trigger;
415 
416 	/*
417 	 * INTx disable will stick across the new irq setup,
418 	 * disable_irq won't.
419 	 */
420 	spin_lock_irqsave(&vdev->irqlock, flags);
421 	if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled))
422 		disable_irq_nosync(pdev->irq);
423 	spin_unlock_irqrestore(&vdev->irqlock, flags);
424 
425 	return 0;
426 }
427 
428 static void vfio_intx_disable(struct vfio_pci_device *vdev)
429 {
430 	vfio_intx_set_signal(vdev, -1);
431 	virqfd_disable(vdev, &vdev->ctx[0].unmask);
432 	virqfd_disable(vdev, &vdev->ctx[0].mask);
433 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
434 	vdev->num_ctx = 0;
435 	kfree(vdev->ctx);
436 }
437 
438 /*
439  * MSI/MSI-X
440  */
441 static irqreturn_t vfio_msihandler(int irq, void *arg)
442 {
443 	struct eventfd_ctx *trigger = arg;
444 
445 	eventfd_signal(trigger, 1);
446 	return IRQ_HANDLED;
447 }
448 
449 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
450 {
451 	struct pci_dev *pdev = vdev->pdev;
452 	int ret;
453 
454 	if (!is_irq_none(vdev))
455 		return -EINVAL;
456 
457 	vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
458 	if (!vdev->ctx)
459 		return -ENOMEM;
460 
461 	if (msix) {
462 		int i;
463 
464 		vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
465 				     GFP_KERNEL);
466 		if (!vdev->msix) {
467 			kfree(vdev->ctx);
468 			return -ENOMEM;
469 		}
470 
471 		for (i = 0; i < nvec; i++)
472 			vdev->msix[i].entry = i;
473 
474 		ret = pci_enable_msix(pdev, vdev->msix, nvec);
475 		if (ret) {
476 			kfree(vdev->msix);
477 			kfree(vdev->ctx);
478 			return ret;
479 		}
480 	} else {
481 		ret = pci_enable_msi_block(pdev, nvec);
482 		if (ret) {
483 			kfree(vdev->ctx);
484 			return ret;
485 		}
486 	}
487 
488 	vdev->num_ctx = nvec;
489 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
490 				VFIO_PCI_MSI_IRQ_INDEX;
491 
492 	if (!msix) {
493 		/*
494 		 * Compute the virtual hardware field for max msi vectors -
495 		 * it is the log base 2 of the number of vectors.
496 		 */
497 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
498 	}
499 
500 	return 0;
501 }
502 
503 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
504 				      int vector, int fd, bool msix)
505 {
506 	struct pci_dev *pdev = vdev->pdev;
507 	int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
508 	char *name = msix ? "vfio-msix" : "vfio-msi";
509 	struct eventfd_ctx *trigger;
510 	int ret;
511 
512 	if (vector >= vdev->num_ctx)
513 		return -EINVAL;
514 
515 	if (vdev->ctx[vector].trigger) {
516 		free_irq(irq, vdev->ctx[vector].trigger);
517 		kfree(vdev->ctx[vector].name);
518 		eventfd_ctx_put(vdev->ctx[vector].trigger);
519 		vdev->ctx[vector].trigger = NULL;
520 	}
521 
522 	if (fd < 0)
523 		return 0;
524 
525 	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
526 					   name, vector, pci_name(pdev));
527 	if (!vdev->ctx[vector].name)
528 		return -ENOMEM;
529 
530 	trigger = eventfd_ctx_fdget(fd);
531 	if (IS_ERR(trigger)) {
532 		kfree(vdev->ctx[vector].name);
533 		return PTR_ERR(trigger);
534 	}
535 
536 	ret = request_irq(irq, vfio_msihandler, 0,
537 			  vdev->ctx[vector].name, trigger);
538 	if (ret) {
539 		kfree(vdev->ctx[vector].name);
540 		eventfd_ctx_put(trigger);
541 		return ret;
542 	}
543 
544 	vdev->ctx[vector].trigger = trigger;
545 
546 	return 0;
547 }
548 
549 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
550 			      unsigned count, int32_t *fds, bool msix)
551 {
552 	int i, j, ret = 0;
553 
554 	if (start + count > vdev->num_ctx)
555 		return -EINVAL;
556 
557 	for (i = 0, j = start; i < count && !ret; i++, j++) {
558 		int fd = fds ? fds[i] : -1;
559 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
560 	}
561 
562 	if (ret) {
563 		for (--j; j >= start; j--)
564 			vfio_msi_set_vector_signal(vdev, j, -1, msix);
565 	}
566 
567 	return ret;
568 }
569 
570 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
571 {
572 	struct pci_dev *pdev = vdev->pdev;
573 	int i;
574 
575 	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
576 
577 	for (i = 0; i < vdev->num_ctx; i++) {
578 		virqfd_disable(vdev, &vdev->ctx[i].unmask);
579 		virqfd_disable(vdev, &vdev->ctx[i].mask);
580 	}
581 
582 	if (msix) {
583 		pci_disable_msix(vdev->pdev);
584 		kfree(vdev->msix);
585 	} else
586 		pci_disable_msi(pdev);
587 
588 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
589 	vdev->num_ctx = 0;
590 	kfree(vdev->ctx);
591 }
592 
593 /*
594  * IOCTL support
595  */
596 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
597 				    unsigned index, unsigned start,
598 				    unsigned count, uint32_t flags, void *data)
599 {
600 	if (!is_intx(vdev) || start != 0 || count != 1)
601 		return -EINVAL;
602 
603 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
604 		vfio_pci_intx_unmask(vdev);
605 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
606 		uint8_t unmask = *(uint8_t *)data;
607 		if (unmask)
608 			vfio_pci_intx_unmask(vdev);
609 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
610 		int32_t fd = *(int32_t *)data;
611 		if (fd >= 0)
612 			return virqfd_enable(vdev, vfio_pci_intx_unmask_handler,
613 					     vfio_send_intx_eventfd, NULL,
614 					     &vdev->ctx[0].unmask, fd);
615 
616 		virqfd_disable(vdev, &vdev->ctx[0].unmask);
617 	}
618 
619 	return 0;
620 }
621 
622 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
623 				  unsigned index, unsigned start,
624 				  unsigned count, uint32_t flags, void *data)
625 {
626 	if (!is_intx(vdev) || start != 0 || count != 1)
627 		return -EINVAL;
628 
629 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
630 		vfio_pci_intx_mask(vdev);
631 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
632 		uint8_t mask = *(uint8_t *)data;
633 		if (mask)
634 			vfio_pci_intx_mask(vdev);
635 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
636 		return -ENOTTY; /* XXX implement me */
637 	}
638 
639 	return 0;
640 }
641 
642 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
643 				     unsigned index, unsigned start,
644 				     unsigned count, uint32_t flags, void *data)
645 {
646 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
647 		vfio_intx_disable(vdev);
648 		return 0;
649 	}
650 
651 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
652 		return -EINVAL;
653 
654 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
655 		int32_t fd = *(int32_t *)data;
656 		int ret;
657 
658 		if (is_intx(vdev))
659 			return vfio_intx_set_signal(vdev, fd);
660 
661 		ret = vfio_intx_enable(vdev);
662 		if (ret)
663 			return ret;
664 
665 		ret = vfio_intx_set_signal(vdev, fd);
666 		if (ret)
667 			vfio_intx_disable(vdev);
668 
669 		return ret;
670 	}
671 
672 	if (!is_intx(vdev))
673 		return -EINVAL;
674 
675 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
676 		vfio_send_intx_eventfd(vdev, NULL);
677 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
678 		uint8_t trigger = *(uint8_t *)data;
679 		if (trigger)
680 			vfio_send_intx_eventfd(vdev, NULL);
681 	}
682 	return 0;
683 }
684 
685 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
686 				    unsigned index, unsigned start,
687 				    unsigned count, uint32_t flags, void *data)
688 {
689 	int i;
690 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
691 
692 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
693 		vfio_msi_disable(vdev, msix);
694 		return 0;
695 	}
696 
697 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
698 		return -EINVAL;
699 
700 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
701 		int32_t *fds = data;
702 		int ret;
703 
704 		if (vdev->irq_type == index)
705 			return vfio_msi_set_block(vdev, start, count,
706 						  fds, msix);
707 
708 		ret = vfio_msi_enable(vdev, start + count, msix);
709 		if (ret)
710 			return ret;
711 
712 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
713 		if (ret)
714 			vfio_msi_disable(vdev, msix);
715 
716 		return ret;
717 	}
718 
719 	if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
720 		return -EINVAL;
721 
722 	for (i = start; i < start + count; i++) {
723 		if (!vdev->ctx[i].trigger)
724 			continue;
725 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
726 			eventfd_signal(vdev->ctx[i].trigger, 1);
727 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
728 			uint8_t *bools = data;
729 			if (bools[i - start])
730 				eventfd_signal(vdev->ctx[i].trigger, 1);
731 		}
732 	}
733 	return 0;
734 }
735 
736 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
737 			    unsigned index, unsigned start, unsigned count,
738 			    void *data)
739 {
740 	int (*func)(struct vfio_pci_device *vdev, unsigned index,
741 		    unsigned start, unsigned count, uint32_t flags,
742 		    void *data) = NULL;
743 
744 	switch (index) {
745 	case VFIO_PCI_INTX_IRQ_INDEX:
746 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
747 		case VFIO_IRQ_SET_ACTION_MASK:
748 			func = vfio_pci_set_intx_mask;
749 			break;
750 		case VFIO_IRQ_SET_ACTION_UNMASK:
751 			func = vfio_pci_set_intx_unmask;
752 			break;
753 		case VFIO_IRQ_SET_ACTION_TRIGGER:
754 			func = vfio_pci_set_intx_trigger;
755 			break;
756 		}
757 		break;
758 	case VFIO_PCI_MSI_IRQ_INDEX:
759 	case VFIO_PCI_MSIX_IRQ_INDEX:
760 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
761 		case VFIO_IRQ_SET_ACTION_MASK:
762 		case VFIO_IRQ_SET_ACTION_UNMASK:
763 			/* XXX Need masking support exported */
764 			break;
765 		case VFIO_IRQ_SET_ACTION_TRIGGER:
766 			func = vfio_pci_set_msi_trigger;
767 			break;
768 		}
769 		break;
770 	}
771 
772 	if (!func)
773 		return -ENOTTY;
774 
775 	return func(vdev, index, start, count, flags, data);
776 }
777