xref: /linux/drivers/vfio/pci/vfio_pci_intrs.c (revision f26e8817b235d8764363bffcc9cbfc61867371f2)
1 /*
2  * VFIO PCI interrupt handling
3  *
4  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
5  *     Author: Alex Williamson <alex.williamson@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio:
12  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
13  * Author: Tom Lyon, pugs@cisco.com
14  */
15 
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/msi.h>
20 #include <linux/pci.h>
21 #include <linux/file.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/slab.h>
25 
26 #include "vfio_pci_private.h"
27 
28 /*
29  * INTx
30  */
31 static void vfio_send_intx_eventfd(void *opaque, void *unused)
32 {
33 	struct vfio_pci_device *vdev = opaque;
34 
35 	if (likely(is_intx(vdev) && !vdev->virq_disabled))
36 		eventfd_signal(vdev->ctx[0].trigger, 1);
37 }
38 
39 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
40 {
41 	struct pci_dev *pdev = vdev->pdev;
42 	unsigned long flags;
43 
44 	spin_lock_irqsave(&vdev->irqlock, flags);
45 
46 	/*
47 	 * Masking can come from interrupt, ioctl, or config space
48 	 * via INTx disable.  The latter means this can get called
49 	 * even when not using intx delivery.  In this case, just
50 	 * try to have the physical bit follow the virtual bit.
51 	 */
52 	if (unlikely(!is_intx(vdev))) {
53 		if (vdev->pci_2_3)
54 			pci_intx(pdev, 0);
55 	} else if (!vdev->ctx[0].masked) {
56 		/*
57 		 * Can't use check_and_mask here because we always want to
58 		 * mask, not just when something is pending.
59 		 */
60 		if (vdev->pci_2_3)
61 			pci_intx(pdev, 0);
62 		else
63 			disable_irq_nosync(pdev->irq);
64 
65 		vdev->ctx[0].masked = true;
66 	}
67 
68 	spin_unlock_irqrestore(&vdev->irqlock, flags);
69 }
70 
71 /*
72  * If this is triggered by an eventfd, we can't call eventfd_signal
73  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
74  * a signal is necessary, which can then be handled via a work queue
75  * or directly depending on the caller.
76  */
77 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
78 {
79 	struct vfio_pci_device *vdev = opaque;
80 	struct pci_dev *pdev = vdev->pdev;
81 	unsigned long flags;
82 	int ret = 0;
83 
84 	spin_lock_irqsave(&vdev->irqlock, flags);
85 
86 	/*
87 	 * Unmasking comes from ioctl or config, so again, have the
88 	 * physical bit follow the virtual even when not using INTx.
89 	 */
90 	if (unlikely(!is_intx(vdev))) {
91 		if (vdev->pci_2_3)
92 			pci_intx(pdev, 1);
93 	} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
94 		/*
95 		 * A pending interrupt here would immediately trigger,
96 		 * but we can avoid that overhead by just re-sending
97 		 * the interrupt to the user.
98 		 */
99 		if (vdev->pci_2_3) {
100 			if (!pci_check_and_unmask_intx(pdev))
101 				ret = 1;
102 		} else
103 			enable_irq(pdev->irq);
104 
105 		vdev->ctx[0].masked = (ret > 0);
106 	}
107 
108 	spin_unlock_irqrestore(&vdev->irqlock, flags);
109 
110 	return ret;
111 }
112 
113 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
114 {
115 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
116 		vfio_send_intx_eventfd(vdev, NULL);
117 }
118 
119 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
120 {
121 	struct vfio_pci_device *vdev = dev_id;
122 	unsigned long flags;
123 	int ret = IRQ_NONE;
124 
125 	spin_lock_irqsave(&vdev->irqlock, flags);
126 
127 	if (!vdev->pci_2_3) {
128 		disable_irq_nosync(vdev->pdev->irq);
129 		vdev->ctx[0].masked = true;
130 		ret = IRQ_HANDLED;
131 	} else if (!vdev->ctx[0].masked &&  /* may be shared */
132 		   pci_check_and_mask_intx(vdev->pdev)) {
133 		vdev->ctx[0].masked = true;
134 		ret = IRQ_HANDLED;
135 	}
136 
137 	spin_unlock_irqrestore(&vdev->irqlock, flags);
138 
139 	if (ret == IRQ_HANDLED)
140 		vfio_send_intx_eventfd(vdev, NULL);
141 
142 	return ret;
143 }
144 
145 static int vfio_intx_enable(struct vfio_pci_device *vdev)
146 {
147 	if (!is_irq_none(vdev))
148 		return -EINVAL;
149 
150 	if (!vdev->pdev->irq)
151 		return -ENODEV;
152 
153 	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
154 	if (!vdev->ctx)
155 		return -ENOMEM;
156 
157 	vdev->num_ctx = 1;
158 
159 	/*
160 	 * If the virtual interrupt is masked, restore it.  Devices
161 	 * supporting DisINTx can be masked at the hardware level
162 	 * here, non-PCI-2.3 devices will have to wait until the
163 	 * interrupt is enabled.
164 	 */
165 	vdev->ctx[0].masked = vdev->virq_disabled;
166 	if (vdev->pci_2_3)
167 		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
168 
169 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
170 
171 	return 0;
172 }
173 
174 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
175 {
176 	struct pci_dev *pdev = vdev->pdev;
177 	unsigned long irqflags = IRQF_SHARED;
178 	struct eventfd_ctx *trigger;
179 	unsigned long flags;
180 	int ret;
181 
182 	if (vdev->ctx[0].trigger) {
183 		free_irq(pdev->irq, vdev);
184 		kfree(vdev->ctx[0].name);
185 		eventfd_ctx_put(vdev->ctx[0].trigger);
186 		vdev->ctx[0].trigger = NULL;
187 	}
188 
189 	if (fd < 0) /* Disable only */
190 		return 0;
191 
192 	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
193 				      pci_name(pdev));
194 	if (!vdev->ctx[0].name)
195 		return -ENOMEM;
196 
197 	trigger = eventfd_ctx_fdget(fd);
198 	if (IS_ERR(trigger)) {
199 		kfree(vdev->ctx[0].name);
200 		return PTR_ERR(trigger);
201 	}
202 
203 	vdev->ctx[0].trigger = trigger;
204 
205 	if (!vdev->pci_2_3)
206 		irqflags = 0;
207 
208 	ret = request_irq(pdev->irq, vfio_intx_handler,
209 			  irqflags, vdev->ctx[0].name, vdev);
210 	if (ret) {
211 		vdev->ctx[0].trigger = NULL;
212 		kfree(vdev->ctx[0].name);
213 		eventfd_ctx_put(trigger);
214 		return ret;
215 	}
216 
217 	/*
218 	 * INTx disable will stick across the new irq setup,
219 	 * disable_irq won't.
220 	 */
221 	spin_lock_irqsave(&vdev->irqlock, flags);
222 	if (!vdev->pci_2_3 && vdev->ctx[0].masked)
223 		disable_irq_nosync(pdev->irq);
224 	spin_unlock_irqrestore(&vdev->irqlock, flags);
225 
226 	return 0;
227 }
228 
229 static void vfio_intx_disable(struct vfio_pci_device *vdev)
230 {
231 	vfio_virqfd_disable(&vdev->ctx[0].unmask);
232 	vfio_virqfd_disable(&vdev->ctx[0].mask);
233 	vfio_intx_set_signal(vdev, -1);
234 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
235 	vdev->num_ctx = 0;
236 	kfree(vdev->ctx);
237 }
238 
239 /*
240  * MSI/MSI-X
241  */
242 static irqreturn_t vfio_msihandler(int irq, void *arg)
243 {
244 	struct eventfd_ctx *trigger = arg;
245 
246 	eventfd_signal(trigger, 1);
247 	return IRQ_HANDLED;
248 }
249 
250 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
251 {
252 	struct pci_dev *pdev = vdev->pdev;
253 	int ret;
254 
255 	if (!is_irq_none(vdev))
256 		return -EINVAL;
257 
258 	vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
259 	if (!vdev->ctx)
260 		return -ENOMEM;
261 
262 	if (msix) {
263 		int i;
264 
265 		vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
266 				     GFP_KERNEL);
267 		if (!vdev->msix) {
268 			kfree(vdev->ctx);
269 			return -ENOMEM;
270 		}
271 
272 		for (i = 0; i < nvec; i++)
273 			vdev->msix[i].entry = i;
274 
275 		ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
276 		if (ret < nvec) {
277 			if (ret > 0)
278 				pci_disable_msix(pdev);
279 			kfree(vdev->msix);
280 			kfree(vdev->ctx);
281 			return ret;
282 		}
283 	} else {
284 		ret = pci_enable_msi_range(pdev, 1, nvec);
285 		if (ret < nvec) {
286 			if (ret > 0)
287 				pci_disable_msi(pdev);
288 			kfree(vdev->ctx);
289 			return ret;
290 		}
291 	}
292 
293 	vdev->num_ctx = nvec;
294 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
295 				VFIO_PCI_MSI_IRQ_INDEX;
296 
297 	if (!msix) {
298 		/*
299 		 * Compute the virtual hardware field for max msi vectors -
300 		 * it is the log base 2 of the number of vectors.
301 		 */
302 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
303 	}
304 
305 	return 0;
306 }
307 
308 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
309 				      int vector, int fd, bool msix)
310 {
311 	struct pci_dev *pdev = vdev->pdev;
312 	struct eventfd_ctx *trigger;
313 	int irq, ret;
314 
315 	if (vector < 0 || vector >= vdev->num_ctx)
316 		return -EINVAL;
317 
318 	irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
319 
320 	if (vdev->ctx[vector].trigger) {
321 		free_irq(irq, vdev->ctx[vector].trigger);
322 		irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
323 		kfree(vdev->ctx[vector].name);
324 		eventfd_ctx_put(vdev->ctx[vector].trigger);
325 		vdev->ctx[vector].trigger = NULL;
326 	}
327 
328 	if (fd < 0)
329 		return 0;
330 
331 	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
332 					   msix ? "x" : "", vector,
333 					   pci_name(pdev));
334 	if (!vdev->ctx[vector].name)
335 		return -ENOMEM;
336 
337 	trigger = eventfd_ctx_fdget(fd);
338 	if (IS_ERR(trigger)) {
339 		kfree(vdev->ctx[vector].name);
340 		return PTR_ERR(trigger);
341 	}
342 
343 	/*
344 	 * The MSIx vector table resides in device memory which may be cleared
345 	 * via backdoor resets. We don't allow direct access to the vector
346 	 * table so even if a userspace driver attempts to save/restore around
347 	 * such a reset it would be unsuccessful. To avoid this, restore the
348 	 * cached value of the message prior to enabling.
349 	 */
350 	if (msix) {
351 		struct msi_msg msg;
352 
353 		get_cached_msi_msg(irq, &msg);
354 		pci_write_msi_msg(irq, &msg);
355 	}
356 
357 	ret = request_irq(irq, vfio_msihandler, 0,
358 			  vdev->ctx[vector].name, trigger);
359 	if (ret) {
360 		kfree(vdev->ctx[vector].name);
361 		eventfd_ctx_put(trigger);
362 		return ret;
363 	}
364 
365 	vdev->ctx[vector].producer.token = trigger;
366 	vdev->ctx[vector].producer.irq = irq;
367 	ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
368 	if (unlikely(ret))
369 		dev_info(&pdev->dev,
370 		"irq bypass producer (token %p) registration fails: %d\n",
371 		vdev->ctx[vector].producer.token, ret);
372 
373 	vdev->ctx[vector].trigger = trigger;
374 
375 	return 0;
376 }
377 
378 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
379 			      unsigned count, int32_t *fds, bool msix)
380 {
381 	int i, j, ret = 0;
382 
383 	if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
384 		return -EINVAL;
385 
386 	for (i = 0, j = start; i < count && !ret; i++, j++) {
387 		int fd = fds ? fds[i] : -1;
388 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
389 	}
390 
391 	if (ret) {
392 		for (--j; j >= (int)start; j--)
393 			vfio_msi_set_vector_signal(vdev, j, -1, msix);
394 	}
395 
396 	return ret;
397 }
398 
399 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
400 {
401 	struct pci_dev *pdev = vdev->pdev;
402 	int i;
403 
404 	for (i = 0; i < vdev->num_ctx; i++) {
405 		vfio_virqfd_disable(&vdev->ctx[i].unmask);
406 		vfio_virqfd_disable(&vdev->ctx[i].mask);
407 	}
408 
409 	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
410 
411 	if (msix) {
412 		pci_disable_msix(vdev->pdev);
413 		kfree(vdev->msix);
414 	} else
415 		pci_disable_msi(pdev);
416 
417 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
418 	vdev->num_ctx = 0;
419 	kfree(vdev->ctx);
420 }
421 
422 /*
423  * IOCTL support
424  */
425 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
426 				    unsigned index, unsigned start,
427 				    unsigned count, uint32_t flags, void *data)
428 {
429 	if (!is_intx(vdev) || start != 0 || count != 1)
430 		return -EINVAL;
431 
432 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
433 		vfio_pci_intx_unmask(vdev);
434 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
435 		uint8_t unmask = *(uint8_t *)data;
436 		if (unmask)
437 			vfio_pci_intx_unmask(vdev);
438 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
439 		int32_t fd = *(int32_t *)data;
440 		if (fd >= 0)
441 			return vfio_virqfd_enable((void *) vdev,
442 						  vfio_pci_intx_unmask_handler,
443 						  vfio_send_intx_eventfd, NULL,
444 						  &vdev->ctx[0].unmask, fd);
445 
446 		vfio_virqfd_disable(&vdev->ctx[0].unmask);
447 	}
448 
449 	return 0;
450 }
451 
452 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
453 				  unsigned index, unsigned start,
454 				  unsigned count, uint32_t flags, void *data)
455 {
456 	if (!is_intx(vdev) || start != 0 || count != 1)
457 		return -EINVAL;
458 
459 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
460 		vfio_pci_intx_mask(vdev);
461 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
462 		uint8_t mask = *(uint8_t *)data;
463 		if (mask)
464 			vfio_pci_intx_mask(vdev);
465 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
466 		return -ENOTTY; /* XXX implement me */
467 	}
468 
469 	return 0;
470 }
471 
472 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
473 				     unsigned index, unsigned start,
474 				     unsigned count, uint32_t flags, void *data)
475 {
476 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
477 		vfio_intx_disable(vdev);
478 		return 0;
479 	}
480 
481 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
482 		return -EINVAL;
483 
484 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
485 		int32_t fd = *(int32_t *)data;
486 		int ret;
487 
488 		if (is_intx(vdev))
489 			return vfio_intx_set_signal(vdev, fd);
490 
491 		ret = vfio_intx_enable(vdev);
492 		if (ret)
493 			return ret;
494 
495 		ret = vfio_intx_set_signal(vdev, fd);
496 		if (ret)
497 			vfio_intx_disable(vdev);
498 
499 		return ret;
500 	}
501 
502 	if (!is_intx(vdev))
503 		return -EINVAL;
504 
505 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
506 		vfio_send_intx_eventfd(vdev, NULL);
507 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
508 		uint8_t trigger = *(uint8_t *)data;
509 		if (trigger)
510 			vfio_send_intx_eventfd(vdev, NULL);
511 	}
512 	return 0;
513 }
514 
515 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
516 				    unsigned index, unsigned start,
517 				    unsigned count, uint32_t flags, void *data)
518 {
519 	int i;
520 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
521 
522 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
523 		vfio_msi_disable(vdev, msix);
524 		return 0;
525 	}
526 
527 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
528 		return -EINVAL;
529 
530 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
531 		int32_t *fds = data;
532 		int ret;
533 
534 		if (vdev->irq_type == index)
535 			return vfio_msi_set_block(vdev, start, count,
536 						  fds, msix);
537 
538 		ret = vfio_msi_enable(vdev, start + count, msix);
539 		if (ret)
540 			return ret;
541 
542 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
543 		if (ret)
544 			vfio_msi_disable(vdev, msix);
545 
546 		return ret;
547 	}
548 
549 	if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
550 		return -EINVAL;
551 
552 	for (i = start; i < start + count; i++) {
553 		if (!vdev->ctx[i].trigger)
554 			continue;
555 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
556 			eventfd_signal(vdev->ctx[i].trigger, 1);
557 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
558 			uint8_t *bools = data;
559 			if (bools[i - start])
560 				eventfd_signal(vdev->ctx[i].trigger, 1);
561 		}
562 	}
563 	return 0;
564 }
565 
566 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
567 					   unsigned int count, uint32_t flags,
568 					   void *data)
569 {
570 	/* DATA_NONE/DATA_BOOL enables loopback testing */
571 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
572 		if (*ctx) {
573 			if (count) {
574 				eventfd_signal(*ctx, 1);
575 			} else {
576 				eventfd_ctx_put(*ctx);
577 				*ctx = NULL;
578 			}
579 			return 0;
580 		}
581 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
582 		uint8_t trigger;
583 
584 		if (!count)
585 			return -EINVAL;
586 
587 		trigger = *(uint8_t *)data;
588 		if (trigger && *ctx)
589 			eventfd_signal(*ctx, 1);
590 
591 		return 0;
592 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
593 		int32_t fd;
594 
595 		if (!count)
596 			return -EINVAL;
597 
598 		fd = *(int32_t *)data;
599 		if (fd == -1) {
600 			if (*ctx)
601 				eventfd_ctx_put(*ctx);
602 			*ctx = NULL;
603 		} else if (fd >= 0) {
604 			struct eventfd_ctx *efdctx;
605 
606 			efdctx = eventfd_ctx_fdget(fd);
607 			if (IS_ERR(efdctx))
608 				return PTR_ERR(efdctx);
609 
610 			if (*ctx)
611 				eventfd_ctx_put(*ctx);
612 
613 			*ctx = efdctx;
614 		}
615 		return 0;
616 	}
617 
618 	return -EINVAL;
619 }
620 
621 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
622 				    unsigned index, unsigned start,
623 				    unsigned count, uint32_t flags, void *data)
624 {
625 	if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
626 		return -EINVAL;
627 
628 	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
629 					       count, flags, data);
630 }
631 
632 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
633 				    unsigned index, unsigned start,
634 				    unsigned count, uint32_t flags, void *data)
635 {
636 	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
637 		return -EINVAL;
638 
639 	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
640 					       count, flags, data);
641 }
642 
643 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
644 			    unsigned index, unsigned start, unsigned count,
645 			    void *data)
646 {
647 	int (*func)(struct vfio_pci_device *vdev, unsigned index,
648 		    unsigned start, unsigned count, uint32_t flags,
649 		    void *data) = NULL;
650 
651 	switch (index) {
652 	case VFIO_PCI_INTX_IRQ_INDEX:
653 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
654 		case VFIO_IRQ_SET_ACTION_MASK:
655 			func = vfio_pci_set_intx_mask;
656 			break;
657 		case VFIO_IRQ_SET_ACTION_UNMASK:
658 			func = vfio_pci_set_intx_unmask;
659 			break;
660 		case VFIO_IRQ_SET_ACTION_TRIGGER:
661 			func = vfio_pci_set_intx_trigger;
662 			break;
663 		}
664 		break;
665 	case VFIO_PCI_MSI_IRQ_INDEX:
666 	case VFIO_PCI_MSIX_IRQ_INDEX:
667 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
668 		case VFIO_IRQ_SET_ACTION_MASK:
669 		case VFIO_IRQ_SET_ACTION_UNMASK:
670 			/* XXX Need masking support exported */
671 			break;
672 		case VFIO_IRQ_SET_ACTION_TRIGGER:
673 			func = vfio_pci_set_msi_trigger;
674 			break;
675 		}
676 		break;
677 	case VFIO_PCI_ERR_IRQ_INDEX:
678 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
679 		case VFIO_IRQ_SET_ACTION_TRIGGER:
680 			if (pci_is_pcie(vdev->pdev))
681 				func = vfio_pci_set_err_trigger;
682 			break;
683 		}
684 		break;
685 	case VFIO_PCI_REQ_IRQ_INDEX:
686 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
687 		case VFIO_IRQ_SET_ACTION_TRIGGER:
688 			func = vfio_pci_set_req_trigger;
689 			break;
690 		}
691 		break;
692 	}
693 
694 	if (!func)
695 		return -ENOTTY;
696 
697 	return func(vdev, index, start, count, flags, data);
698 }
699