xref: /linux/drivers/vfio/pci/vfio_pci_intrs.c (revision 91ba548cfd5cc8ee93b9435527efb8fa4caf5c5e)
1 /*
2  * VFIO PCI interrupt handling
3  *
4  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
5  *     Author: Alex Williamson <alex.williamson@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio:
12  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
13  * Author: Tom Lyon, pugs@cisco.com
14  */
15 
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/pci.h>
20 #include <linux/file.h>
21 #include <linux/poll.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/workqueue.h>
25 
26 #include "vfio_pci_private.h"
27 
28 /*
29  * IRQfd - generic
30  */
31 struct virqfd {
32 	struct vfio_pci_device	*vdev;
33 	struct eventfd_ctx	*eventfd;
34 	int			(*handler)(struct vfio_pci_device *, void *);
35 	void			(*thread)(struct vfio_pci_device *, void *);
36 	void			*data;
37 	struct work_struct	inject;
38 	wait_queue_t		wait;
39 	poll_table		pt;
40 	struct work_struct	shutdown;
41 	struct virqfd		**pvirqfd;
42 };
43 
44 static struct workqueue_struct *vfio_irqfd_cleanup_wq;
45 
46 int __init vfio_pci_virqfd_init(void)
47 {
48 	vfio_irqfd_cleanup_wq =
49 		create_singlethread_workqueue("vfio-irqfd-cleanup");
50 	if (!vfio_irqfd_cleanup_wq)
51 		return -ENOMEM;
52 
53 	return 0;
54 }
55 
56 void vfio_pci_virqfd_exit(void)
57 {
58 	destroy_workqueue(vfio_irqfd_cleanup_wq);
59 }
60 
61 static void virqfd_deactivate(struct virqfd *virqfd)
62 {
63 	queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
64 }
65 
66 static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
67 {
68 	struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
69 	unsigned long flags = (unsigned long)key;
70 
71 	if (flags & POLLIN) {
72 		/* An event has been signaled, call function */
73 		if ((!virqfd->handler ||
74 		     virqfd->handler(virqfd->vdev, virqfd->data)) &&
75 		    virqfd->thread)
76 			schedule_work(&virqfd->inject);
77 	}
78 
79 	if (flags & POLLHUP)
80 		/* The eventfd is closing, detach from VFIO */
81 		virqfd_deactivate(virqfd);
82 
83 	return 0;
84 }
85 
86 static void virqfd_ptable_queue_proc(struct file *file,
87 				     wait_queue_head_t *wqh, poll_table *pt)
88 {
89 	struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
90 	add_wait_queue(wqh, &virqfd->wait);
91 }
92 
93 static void virqfd_shutdown(struct work_struct *work)
94 {
95 	struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
96 	struct virqfd **pvirqfd = virqfd->pvirqfd;
97 	u64 cnt;
98 
99 	eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
100 	flush_work(&virqfd->inject);
101 	eventfd_ctx_put(virqfd->eventfd);
102 
103 	kfree(virqfd);
104 	*pvirqfd = NULL;
105 }
106 
107 static void virqfd_inject(struct work_struct *work)
108 {
109 	struct virqfd *virqfd = container_of(work, struct virqfd, inject);
110 	if (virqfd->thread)
111 		virqfd->thread(virqfd->vdev, virqfd->data);
112 }
113 
114 static int virqfd_enable(struct vfio_pci_device *vdev,
115 			 int (*handler)(struct vfio_pci_device *, void *),
116 			 void (*thread)(struct vfio_pci_device *, void *),
117 			 void *data, struct virqfd **pvirqfd, int fd)
118 {
119 	struct file *file = NULL;
120 	struct eventfd_ctx *ctx = NULL;
121 	struct virqfd *virqfd;
122 	int ret = 0;
123 	unsigned int events;
124 
125 	if (*pvirqfd)
126 		return -EBUSY;
127 
128 	virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
129 	if (!virqfd)
130 		return -ENOMEM;
131 
132 	virqfd->pvirqfd = pvirqfd;
133 	*pvirqfd = virqfd;
134 	virqfd->vdev = vdev;
135 	virqfd->handler = handler;
136 	virqfd->thread = thread;
137 	virqfd->data = data;
138 
139 	INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
140 	INIT_WORK(&virqfd->inject, virqfd_inject);
141 
142 	file = eventfd_fget(fd);
143 	if (IS_ERR(file)) {
144 		ret = PTR_ERR(file);
145 		goto fail;
146 	}
147 
148 	ctx = eventfd_ctx_fileget(file);
149 	if (IS_ERR(ctx)) {
150 		ret = PTR_ERR(ctx);
151 		goto fail;
152 	}
153 
154 	virqfd->eventfd = ctx;
155 
156 	/*
157 	 * Install our own custom wake-up handling so we are notified via
158 	 * a callback whenever someone signals the underlying eventfd.
159 	 */
160 	init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
161 	init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
162 
163 	events = file->f_op->poll(file, &virqfd->pt);
164 
165 	/*
166 	 * Check if there was an event already pending on the eventfd
167 	 * before we registered and trigger it as if we didn't miss it.
168 	 */
169 	if (events & POLLIN) {
170 		if ((!handler || handler(vdev, data)) && thread)
171 			schedule_work(&virqfd->inject);
172 	}
173 
174 	/*
175 	 * Do not drop the file until the irqfd is fully initialized,
176 	 * otherwise we might race against the POLLHUP.
177 	 */
178 	fput(file);
179 
180 	return 0;
181 
182 fail:
183 	if (ctx && !IS_ERR(ctx))
184 		eventfd_ctx_put(ctx);
185 
186 	if (file && !IS_ERR(file))
187 		fput(file);
188 
189 	kfree(virqfd);
190 	*pvirqfd = NULL;
191 
192 	return ret;
193 }
194 
195 static void virqfd_disable(struct virqfd *virqfd)
196 {
197 	if (!virqfd)
198 		return;
199 
200 	virqfd_deactivate(virqfd);
201 
202 	/* Block until we know all outstanding shutdown jobs have completed. */
203 	flush_workqueue(vfio_irqfd_cleanup_wq);
204 }
205 
206 /*
207  * INTx
208  */
209 static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused)
210 {
211 	if (likely(is_intx(vdev) && !vdev->virq_disabled))
212 		eventfd_signal(vdev->ctx[0].trigger, 1);
213 }
214 
215 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
216 {
217 	struct pci_dev *pdev = vdev->pdev;
218 	unsigned long flags;
219 
220 	spin_lock_irqsave(&vdev->irqlock, flags);
221 
222 	/*
223 	 * Masking can come from interrupt, ioctl, or config space
224 	 * via INTx disable.  The latter means this can get called
225 	 * even when not using intx delivery.  In this case, just
226 	 * try to have the physical bit follow the virtual bit.
227 	 */
228 	if (unlikely(!is_intx(vdev))) {
229 		if (vdev->pci_2_3)
230 			pci_intx(pdev, 0);
231 	} else if (!vdev->ctx[0].masked) {
232 		/*
233 		 * Can't use check_and_mask here because we always want to
234 		 * mask, not just when something is pending.
235 		 */
236 		if (vdev->pci_2_3)
237 			pci_intx(pdev, 0);
238 		else
239 			disable_irq_nosync(pdev->irq);
240 
241 		vdev->ctx[0].masked = true;
242 	}
243 
244 	spin_unlock_irqrestore(&vdev->irqlock, flags);
245 }
246 
247 /*
248  * If this is triggered by an eventfd, we can't call eventfd_signal
249  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
250  * a signal is necessary, which can then be handled via a work queue
251  * or directly depending on the caller.
252  */
253 int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, void *unused)
254 {
255 	struct pci_dev *pdev = vdev->pdev;
256 	unsigned long flags;
257 	int ret = 0;
258 
259 	spin_lock_irqsave(&vdev->irqlock, flags);
260 
261 	/*
262 	 * Unmasking comes from ioctl or config, so again, have the
263 	 * physical bit follow the virtual even when not using INTx.
264 	 */
265 	if (unlikely(!is_intx(vdev))) {
266 		if (vdev->pci_2_3)
267 			pci_intx(pdev, 1);
268 	} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
269 		/*
270 		 * A pending interrupt here would immediately trigger,
271 		 * but we can avoid that overhead by just re-sending
272 		 * the interrupt to the user.
273 		 */
274 		if (vdev->pci_2_3) {
275 			if (!pci_check_and_unmask_intx(pdev))
276 				ret = 1;
277 		} else
278 			enable_irq(pdev->irq);
279 
280 		vdev->ctx[0].masked = (ret > 0);
281 	}
282 
283 	spin_unlock_irqrestore(&vdev->irqlock, flags);
284 
285 	return ret;
286 }
287 
288 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
289 {
290 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
291 		vfio_send_intx_eventfd(vdev, NULL);
292 }
293 
294 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
295 {
296 	struct vfio_pci_device *vdev = dev_id;
297 	unsigned long flags;
298 	int ret = IRQ_NONE;
299 
300 	spin_lock_irqsave(&vdev->irqlock, flags);
301 
302 	if (!vdev->pci_2_3) {
303 		disable_irq_nosync(vdev->pdev->irq);
304 		vdev->ctx[0].masked = true;
305 		ret = IRQ_HANDLED;
306 	} else if (!vdev->ctx[0].masked &&  /* may be shared */
307 		   pci_check_and_mask_intx(vdev->pdev)) {
308 		vdev->ctx[0].masked = true;
309 		ret = IRQ_HANDLED;
310 	}
311 
312 	spin_unlock_irqrestore(&vdev->irqlock, flags);
313 
314 	if (ret == IRQ_HANDLED)
315 		vfio_send_intx_eventfd(vdev, NULL);
316 
317 	return ret;
318 }
319 
320 static int vfio_intx_enable(struct vfio_pci_device *vdev)
321 {
322 	if (!is_irq_none(vdev))
323 		return -EINVAL;
324 
325 	if (!vdev->pdev->irq)
326 		return -ENODEV;
327 
328 	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
329 	if (!vdev->ctx)
330 		return -ENOMEM;
331 
332 	vdev->num_ctx = 1;
333 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
334 
335 	return 0;
336 }
337 
338 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
339 {
340 	struct pci_dev *pdev = vdev->pdev;
341 	unsigned long irqflags = IRQF_SHARED;
342 	struct eventfd_ctx *trigger;
343 	unsigned long flags;
344 	int ret;
345 
346 	if (vdev->ctx[0].trigger) {
347 		free_irq(pdev->irq, vdev);
348 		kfree(vdev->ctx[0].name);
349 		eventfd_ctx_put(vdev->ctx[0].trigger);
350 		vdev->ctx[0].trigger = NULL;
351 	}
352 
353 	if (fd < 0) /* Disable only */
354 		return 0;
355 
356 	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
357 				      pci_name(pdev));
358 	if (!vdev->ctx[0].name)
359 		return -ENOMEM;
360 
361 	trigger = eventfd_ctx_fdget(fd);
362 	if (IS_ERR(trigger)) {
363 		kfree(vdev->ctx[0].name);
364 		return PTR_ERR(trigger);
365 	}
366 
367 	if (!vdev->pci_2_3)
368 		irqflags = 0;
369 
370 	ret = request_irq(pdev->irq, vfio_intx_handler,
371 			  irqflags, vdev->ctx[0].name, vdev);
372 	if (ret) {
373 		kfree(vdev->ctx[0].name);
374 		eventfd_ctx_put(trigger);
375 		return ret;
376 	}
377 
378 	vdev->ctx[0].trigger = trigger;
379 
380 	/*
381 	 * INTx disable will stick across the new irq setup,
382 	 * disable_irq won't.
383 	 */
384 	spin_lock_irqsave(&vdev->irqlock, flags);
385 	if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled))
386 		disable_irq_nosync(pdev->irq);
387 	spin_unlock_irqrestore(&vdev->irqlock, flags);
388 
389 	return 0;
390 }
391 
392 static void vfio_intx_disable(struct vfio_pci_device *vdev)
393 {
394 	vfio_intx_set_signal(vdev, -1);
395 	virqfd_disable(vdev->ctx[0].unmask);
396 	virqfd_disable(vdev->ctx[0].mask);
397 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
398 	vdev->num_ctx = 0;
399 	kfree(vdev->ctx);
400 }
401 
402 /*
403  * MSI/MSI-X
404  */
405 static irqreturn_t vfio_msihandler(int irq, void *arg)
406 {
407 	struct eventfd_ctx *trigger = arg;
408 
409 	eventfd_signal(trigger, 1);
410 	return IRQ_HANDLED;
411 }
412 
413 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
414 {
415 	struct pci_dev *pdev = vdev->pdev;
416 	int ret;
417 
418 	if (!is_irq_none(vdev))
419 		return -EINVAL;
420 
421 	vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
422 	if (!vdev->ctx)
423 		return -ENOMEM;
424 
425 	if (msix) {
426 		int i;
427 
428 		vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
429 				     GFP_KERNEL);
430 		if (!vdev->msix) {
431 			kfree(vdev->ctx);
432 			return -ENOMEM;
433 		}
434 
435 		for (i = 0; i < nvec; i++)
436 			vdev->msix[i].entry = i;
437 
438 		ret = pci_enable_msix(pdev, vdev->msix, nvec);
439 		if (ret) {
440 			kfree(vdev->msix);
441 			kfree(vdev->ctx);
442 			return ret;
443 		}
444 	} else {
445 		ret = pci_enable_msi_block(pdev, nvec);
446 		if (ret) {
447 			kfree(vdev->ctx);
448 			return ret;
449 		}
450 	}
451 
452 	vdev->num_ctx = nvec;
453 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
454 				VFIO_PCI_MSI_IRQ_INDEX;
455 
456 	if (!msix) {
457 		/*
458 		 * Compute the virtual hardware field for max msi vectors -
459 		 * it is the log base 2 of the number of vectors.
460 		 */
461 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
462 	}
463 
464 	return 0;
465 }
466 
467 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
468 				      int vector, int fd, bool msix)
469 {
470 	struct pci_dev *pdev = vdev->pdev;
471 	int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
472 	char *name = msix ? "vfio-msix" : "vfio-msi";
473 	struct eventfd_ctx *trigger;
474 	int ret;
475 
476 	if (vector >= vdev->num_ctx)
477 		return -EINVAL;
478 
479 	if (vdev->ctx[vector].trigger) {
480 		free_irq(irq, vdev->ctx[vector].trigger);
481 		kfree(vdev->ctx[vector].name);
482 		eventfd_ctx_put(vdev->ctx[vector].trigger);
483 		vdev->ctx[vector].trigger = NULL;
484 	}
485 
486 	if (fd < 0)
487 		return 0;
488 
489 	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
490 					   name, vector, pci_name(pdev));
491 	if (!vdev->ctx[vector].name)
492 		return -ENOMEM;
493 
494 	trigger = eventfd_ctx_fdget(fd);
495 	if (IS_ERR(trigger)) {
496 		kfree(vdev->ctx[vector].name);
497 		return PTR_ERR(trigger);
498 	}
499 
500 	ret = request_irq(irq, vfio_msihandler, 0,
501 			  vdev->ctx[vector].name, trigger);
502 	if (ret) {
503 		kfree(vdev->ctx[vector].name);
504 		eventfd_ctx_put(trigger);
505 		return ret;
506 	}
507 
508 	vdev->ctx[vector].trigger = trigger;
509 
510 	return 0;
511 }
512 
513 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
514 			      unsigned count, int32_t *fds, bool msix)
515 {
516 	int i, j, ret = 0;
517 
518 	if (start + count > vdev->num_ctx)
519 		return -EINVAL;
520 
521 	for (i = 0, j = start; i < count && !ret; i++, j++) {
522 		int fd = fds ? fds[i] : -1;
523 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
524 	}
525 
526 	if (ret) {
527 		for (--j; j >= start; j--)
528 			vfio_msi_set_vector_signal(vdev, j, -1, msix);
529 	}
530 
531 	return ret;
532 }
533 
534 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
535 {
536 	struct pci_dev *pdev = vdev->pdev;
537 	int i;
538 
539 	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
540 
541 	for (i = 0; i < vdev->num_ctx; i++) {
542 		virqfd_disable(vdev->ctx[i].unmask);
543 		virqfd_disable(vdev->ctx[i].mask);
544 	}
545 
546 	if (msix) {
547 		pci_disable_msix(vdev->pdev);
548 		kfree(vdev->msix);
549 	} else
550 		pci_disable_msi(pdev);
551 
552 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
553 	vdev->num_ctx = 0;
554 	kfree(vdev->ctx);
555 }
556 
557 /*
558  * IOCTL support
559  */
560 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
561 				    unsigned index, unsigned start,
562 				    unsigned count, uint32_t flags, void *data)
563 {
564 	if (!is_intx(vdev) || start != 0 || count != 1)
565 		return -EINVAL;
566 
567 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
568 		vfio_pci_intx_unmask(vdev);
569 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
570 		uint8_t unmask = *(uint8_t *)data;
571 		if (unmask)
572 			vfio_pci_intx_unmask(vdev);
573 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
574 		int32_t fd = *(int32_t *)data;
575 		if (fd >= 0)
576 			return virqfd_enable(vdev, vfio_pci_intx_unmask_handler,
577 					     vfio_send_intx_eventfd, NULL,
578 					     &vdev->ctx[0].unmask, fd);
579 
580 		virqfd_disable(vdev->ctx[0].unmask);
581 	}
582 
583 	return 0;
584 }
585 
586 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
587 				  unsigned index, unsigned start,
588 				  unsigned count, uint32_t flags, void *data)
589 {
590 	if (!is_intx(vdev) || start != 0 || count != 1)
591 		return -EINVAL;
592 
593 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
594 		vfio_pci_intx_mask(vdev);
595 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
596 		uint8_t mask = *(uint8_t *)data;
597 		if (mask)
598 			vfio_pci_intx_mask(vdev);
599 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
600 		return -ENOTTY; /* XXX implement me */
601 	}
602 
603 	return 0;
604 }
605 
606 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
607 				     unsigned index, unsigned start,
608 				     unsigned count, uint32_t flags, void *data)
609 {
610 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
611 		vfio_intx_disable(vdev);
612 		return 0;
613 	}
614 
615 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
616 		return -EINVAL;
617 
618 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
619 		int32_t fd = *(int32_t *)data;
620 		int ret;
621 
622 		if (is_intx(vdev))
623 			return vfio_intx_set_signal(vdev, fd);
624 
625 		ret = vfio_intx_enable(vdev);
626 		if (ret)
627 			return ret;
628 
629 		ret = vfio_intx_set_signal(vdev, fd);
630 		if (ret)
631 			vfio_intx_disable(vdev);
632 
633 		return ret;
634 	}
635 
636 	if (!is_intx(vdev))
637 		return -EINVAL;
638 
639 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
640 		vfio_send_intx_eventfd(vdev, NULL);
641 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
642 		uint8_t trigger = *(uint8_t *)data;
643 		if (trigger)
644 			vfio_send_intx_eventfd(vdev, NULL);
645 	}
646 	return 0;
647 }
648 
649 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
650 				    unsigned index, unsigned start,
651 				    unsigned count, uint32_t flags, void *data)
652 {
653 	int i;
654 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
655 
656 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
657 		vfio_msi_disable(vdev, msix);
658 		return 0;
659 	}
660 
661 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
662 		return -EINVAL;
663 
664 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
665 		int32_t *fds = data;
666 		int ret;
667 
668 		if (vdev->irq_type == index)
669 			return vfio_msi_set_block(vdev, start, count,
670 						  fds, msix);
671 
672 		ret = vfio_msi_enable(vdev, start + count, msix);
673 		if (ret)
674 			return ret;
675 
676 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
677 		if (ret)
678 			vfio_msi_disable(vdev, msix);
679 
680 		return ret;
681 	}
682 
683 	if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
684 		return -EINVAL;
685 
686 	for (i = start; i < start + count; i++) {
687 		if (!vdev->ctx[i].trigger)
688 			continue;
689 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
690 			eventfd_signal(vdev->ctx[i].trigger, 1);
691 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
692 			uint8_t *bools = data;
693 			if (bools[i - start])
694 				eventfd_signal(vdev->ctx[i].trigger, 1);
695 		}
696 	}
697 	return 0;
698 }
699 
700 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
701 			    unsigned index, unsigned start, unsigned count,
702 			    void *data)
703 {
704 	int (*func)(struct vfio_pci_device *vdev, unsigned index,
705 		    unsigned start, unsigned count, uint32_t flags,
706 		    void *data) = NULL;
707 
708 	switch (index) {
709 	case VFIO_PCI_INTX_IRQ_INDEX:
710 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
711 		case VFIO_IRQ_SET_ACTION_MASK:
712 			func = vfio_pci_set_intx_mask;
713 			break;
714 		case VFIO_IRQ_SET_ACTION_UNMASK:
715 			func = vfio_pci_set_intx_unmask;
716 			break;
717 		case VFIO_IRQ_SET_ACTION_TRIGGER:
718 			func = vfio_pci_set_intx_trigger;
719 			break;
720 		}
721 		break;
722 	case VFIO_PCI_MSI_IRQ_INDEX:
723 	case VFIO_PCI_MSIX_IRQ_INDEX:
724 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
725 		case VFIO_IRQ_SET_ACTION_MASK:
726 		case VFIO_IRQ_SET_ACTION_UNMASK:
727 			/* XXX Need masking support exported */
728 			break;
729 		case VFIO_IRQ_SET_ACTION_TRIGGER:
730 			func = vfio_pci_set_msi_trigger;
731 			break;
732 		}
733 		break;
734 	}
735 
736 	if (!func)
737 		return -ENOTTY;
738 
739 	return func(vdev, index, start, count, flags, data);
740 }
741