xref: /linux/drivers/vfio/pci/vfio_pci_intrs.c (revision c462a8c5d98877b76cf229d3d605d2a865aa9c9e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO PCI interrupt handling
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Derived from original vfio:
9  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
10  * Author: Tom Lyon, pugs@cisco.com
11  */
12 
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
22 
23 #include "vfio_pci_priv.h"
24 
25 struct vfio_pci_irq_ctx {
26 	struct eventfd_ctx	*trigger;
27 	struct virqfd		*unmask;
28 	struct virqfd		*mask;
29 	char			*name;
30 	bool			masked;
31 	struct irq_bypass_producer	producer;
32 };
33 
34 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
35 {
36 	return vdev->irq_type == type;
37 }
38 
39 static bool is_intx(struct vfio_pci_core_device *vdev)
40 {
41 	return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
42 }
43 
44 static bool is_irq_none(struct vfio_pci_core_device *vdev)
45 {
46 	return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
47 		 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
48 		 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
49 }
50 
51 /*
52  * INTx
53  */
54 static void vfio_send_intx_eventfd(void *opaque, void *unused)
55 {
56 	struct vfio_pci_core_device *vdev = opaque;
57 
58 	if (likely(is_intx(vdev) && !vdev->virq_disabled))
59 		eventfd_signal(vdev->ctx[0].trigger, 1);
60 }
61 
62 void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
63 {
64 	struct pci_dev *pdev = vdev->pdev;
65 	unsigned long flags;
66 
67 	spin_lock_irqsave(&vdev->irqlock, flags);
68 
69 	/*
70 	 * Masking can come from interrupt, ioctl, or config space
71 	 * via INTx disable.  The latter means this can get called
72 	 * even when not using intx delivery.  In this case, just
73 	 * try to have the physical bit follow the virtual bit.
74 	 */
75 	if (unlikely(!is_intx(vdev))) {
76 		if (vdev->pci_2_3)
77 			pci_intx(pdev, 0);
78 	} else if (!vdev->ctx[0].masked) {
79 		/*
80 		 * Can't use check_and_mask here because we always want to
81 		 * mask, not just when something is pending.
82 		 */
83 		if (vdev->pci_2_3)
84 			pci_intx(pdev, 0);
85 		else
86 			disable_irq_nosync(pdev->irq);
87 
88 		vdev->ctx[0].masked = true;
89 	}
90 
91 	spin_unlock_irqrestore(&vdev->irqlock, flags);
92 }
93 
94 /*
95  * If this is triggered by an eventfd, we can't call eventfd_signal
96  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
97  * a signal is necessary, which can then be handled via a work queue
98  * or directly depending on the caller.
99  */
100 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
101 {
102 	struct vfio_pci_core_device *vdev = opaque;
103 	struct pci_dev *pdev = vdev->pdev;
104 	unsigned long flags;
105 	int ret = 0;
106 
107 	spin_lock_irqsave(&vdev->irqlock, flags);
108 
109 	/*
110 	 * Unmasking comes from ioctl or config, so again, have the
111 	 * physical bit follow the virtual even when not using INTx.
112 	 */
113 	if (unlikely(!is_intx(vdev))) {
114 		if (vdev->pci_2_3)
115 			pci_intx(pdev, 1);
116 	} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
117 		/*
118 		 * A pending interrupt here would immediately trigger,
119 		 * but we can avoid that overhead by just re-sending
120 		 * the interrupt to the user.
121 		 */
122 		if (vdev->pci_2_3) {
123 			if (!pci_check_and_unmask_intx(pdev))
124 				ret = 1;
125 		} else
126 			enable_irq(pdev->irq);
127 
128 		vdev->ctx[0].masked = (ret > 0);
129 	}
130 
131 	spin_unlock_irqrestore(&vdev->irqlock, flags);
132 
133 	return ret;
134 }
135 
136 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
137 {
138 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
139 		vfio_send_intx_eventfd(vdev, NULL);
140 }
141 
142 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
143 {
144 	struct vfio_pci_core_device *vdev = dev_id;
145 	unsigned long flags;
146 	int ret = IRQ_NONE;
147 
148 	spin_lock_irqsave(&vdev->irqlock, flags);
149 
150 	if (!vdev->pci_2_3) {
151 		disable_irq_nosync(vdev->pdev->irq);
152 		vdev->ctx[0].masked = true;
153 		ret = IRQ_HANDLED;
154 	} else if (!vdev->ctx[0].masked &&  /* may be shared */
155 		   pci_check_and_mask_intx(vdev->pdev)) {
156 		vdev->ctx[0].masked = true;
157 		ret = IRQ_HANDLED;
158 	}
159 
160 	spin_unlock_irqrestore(&vdev->irqlock, flags);
161 
162 	if (ret == IRQ_HANDLED)
163 		vfio_send_intx_eventfd(vdev, NULL);
164 
165 	return ret;
166 }
167 
168 static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
169 {
170 	if (!is_irq_none(vdev))
171 		return -EINVAL;
172 
173 	if (!vdev->pdev->irq)
174 		return -ENODEV;
175 
176 	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
177 	if (!vdev->ctx)
178 		return -ENOMEM;
179 
180 	vdev->num_ctx = 1;
181 
182 	/*
183 	 * If the virtual interrupt is masked, restore it.  Devices
184 	 * supporting DisINTx can be masked at the hardware level
185 	 * here, non-PCI-2.3 devices will have to wait until the
186 	 * interrupt is enabled.
187 	 */
188 	vdev->ctx[0].masked = vdev->virq_disabled;
189 	if (vdev->pci_2_3)
190 		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
191 
192 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
193 
194 	return 0;
195 }
196 
197 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
198 {
199 	struct pci_dev *pdev = vdev->pdev;
200 	unsigned long irqflags = IRQF_SHARED;
201 	struct eventfd_ctx *trigger;
202 	unsigned long flags;
203 	int ret;
204 
205 	if (vdev->ctx[0].trigger) {
206 		free_irq(pdev->irq, vdev);
207 		kfree(vdev->ctx[0].name);
208 		eventfd_ctx_put(vdev->ctx[0].trigger);
209 		vdev->ctx[0].trigger = NULL;
210 	}
211 
212 	if (fd < 0) /* Disable only */
213 		return 0;
214 
215 	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
216 				      pci_name(pdev));
217 	if (!vdev->ctx[0].name)
218 		return -ENOMEM;
219 
220 	trigger = eventfd_ctx_fdget(fd);
221 	if (IS_ERR(trigger)) {
222 		kfree(vdev->ctx[0].name);
223 		return PTR_ERR(trigger);
224 	}
225 
226 	vdev->ctx[0].trigger = trigger;
227 
228 	if (!vdev->pci_2_3)
229 		irqflags = 0;
230 
231 	ret = request_irq(pdev->irq, vfio_intx_handler,
232 			  irqflags, vdev->ctx[0].name, vdev);
233 	if (ret) {
234 		vdev->ctx[0].trigger = NULL;
235 		kfree(vdev->ctx[0].name);
236 		eventfd_ctx_put(trigger);
237 		return ret;
238 	}
239 
240 	/*
241 	 * INTx disable will stick across the new irq setup,
242 	 * disable_irq won't.
243 	 */
244 	spin_lock_irqsave(&vdev->irqlock, flags);
245 	if (!vdev->pci_2_3 && vdev->ctx[0].masked)
246 		disable_irq_nosync(pdev->irq);
247 	spin_unlock_irqrestore(&vdev->irqlock, flags);
248 
249 	return 0;
250 }
251 
252 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
253 {
254 	vfio_virqfd_disable(&vdev->ctx[0].unmask);
255 	vfio_virqfd_disable(&vdev->ctx[0].mask);
256 	vfio_intx_set_signal(vdev, -1);
257 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
258 	vdev->num_ctx = 0;
259 	kfree(vdev->ctx);
260 }
261 
262 /*
263  * MSI/MSI-X
264  */
265 static irqreturn_t vfio_msihandler(int irq, void *arg)
266 {
267 	struct eventfd_ctx *trigger = arg;
268 
269 	eventfd_signal(trigger, 1);
270 	return IRQ_HANDLED;
271 }
272 
273 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
274 {
275 	struct pci_dev *pdev = vdev->pdev;
276 	unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
277 	int ret;
278 	u16 cmd;
279 
280 	if (!is_irq_none(vdev))
281 		return -EINVAL;
282 
283 	vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
284 	if (!vdev->ctx)
285 		return -ENOMEM;
286 
287 	/* return the number of supported vectors if we can't get all: */
288 	cmd = vfio_pci_memory_lock_and_enable(vdev);
289 	ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
290 	if (ret < nvec) {
291 		if (ret > 0)
292 			pci_free_irq_vectors(pdev);
293 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
294 		kfree(vdev->ctx);
295 		return ret;
296 	}
297 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
298 
299 	vdev->num_ctx = nvec;
300 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
301 				VFIO_PCI_MSI_IRQ_INDEX;
302 
303 	if (!msix) {
304 		/*
305 		 * Compute the virtual hardware field for max msi vectors -
306 		 * it is the log base 2 of the number of vectors.
307 		 */
308 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
309 	}
310 
311 	return 0;
312 }
313 
314 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
315 				      int vector, int fd, bool msix)
316 {
317 	struct pci_dev *pdev = vdev->pdev;
318 	struct eventfd_ctx *trigger;
319 	int irq, ret;
320 	u16 cmd;
321 
322 	if (vector < 0 || vector >= vdev->num_ctx)
323 		return -EINVAL;
324 
325 	irq = pci_irq_vector(pdev, vector);
326 
327 	if (vdev->ctx[vector].trigger) {
328 		irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
329 
330 		cmd = vfio_pci_memory_lock_and_enable(vdev);
331 		free_irq(irq, vdev->ctx[vector].trigger);
332 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
333 
334 		kfree(vdev->ctx[vector].name);
335 		eventfd_ctx_put(vdev->ctx[vector].trigger);
336 		vdev->ctx[vector].trigger = NULL;
337 	}
338 
339 	if (fd < 0)
340 		return 0;
341 
342 	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
343 					   msix ? "x" : "", vector,
344 					   pci_name(pdev));
345 	if (!vdev->ctx[vector].name)
346 		return -ENOMEM;
347 
348 	trigger = eventfd_ctx_fdget(fd);
349 	if (IS_ERR(trigger)) {
350 		kfree(vdev->ctx[vector].name);
351 		return PTR_ERR(trigger);
352 	}
353 
354 	/*
355 	 * The MSIx vector table resides in device memory which may be cleared
356 	 * via backdoor resets. We don't allow direct access to the vector
357 	 * table so even if a userspace driver attempts to save/restore around
358 	 * such a reset it would be unsuccessful. To avoid this, restore the
359 	 * cached value of the message prior to enabling.
360 	 */
361 	cmd = vfio_pci_memory_lock_and_enable(vdev);
362 	if (msix) {
363 		struct msi_msg msg;
364 
365 		get_cached_msi_msg(irq, &msg);
366 		pci_write_msi_msg(irq, &msg);
367 	}
368 
369 	ret = request_irq(irq, vfio_msihandler, 0,
370 			  vdev->ctx[vector].name, trigger);
371 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
372 	if (ret) {
373 		kfree(vdev->ctx[vector].name);
374 		eventfd_ctx_put(trigger);
375 		return ret;
376 	}
377 
378 	vdev->ctx[vector].producer.token = trigger;
379 	vdev->ctx[vector].producer.irq = irq;
380 	ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
381 	if (unlikely(ret)) {
382 		dev_info(&pdev->dev,
383 		"irq bypass producer (token %p) registration fails: %d\n",
384 		vdev->ctx[vector].producer.token, ret);
385 
386 		vdev->ctx[vector].producer.token = NULL;
387 	}
388 	vdev->ctx[vector].trigger = trigger;
389 
390 	return 0;
391 }
392 
393 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
394 			      unsigned count, int32_t *fds, bool msix)
395 {
396 	int i, j, ret = 0;
397 
398 	if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
399 		return -EINVAL;
400 
401 	for (i = 0, j = start; i < count && !ret; i++, j++) {
402 		int fd = fds ? fds[i] : -1;
403 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
404 	}
405 
406 	if (ret) {
407 		for (--j; j >= (int)start; j--)
408 			vfio_msi_set_vector_signal(vdev, j, -1, msix);
409 	}
410 
411 	return ret;
412 }
413 
414 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
415 {
416 	struct pci_dev *pdev = vdev->pdev;
417 	int i;
418 	u16 cmd;
419 
420 	for (i = 0; i < vdev->num_ctx; i++) {
421 		vfio_virqfd_disable(&vdev->ctx[i].unmask);
422 		vfio_virqfd_disable(&vdev->ctx[i].mask);
423 	}
424 
425 	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
426 
427 	cmd = vfio_pci_memory_lock_and_enable(vdev);
428 	pci_free_irq_vectors(pdev);
429 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
430 
431 	/*
432 	 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
433 	 * via their shutdown paths.  Restore for NoINTx devices.
434 	 */
435 	if (vdev->nointx)
436 		pci_intx(pdev, 0);
437 
438 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
439 	vdev->num_ctx = 0;
440 	kfree(vdev->ctx);
441 }
442 
443 /*
444  * IOCTL support
445  */
446 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
447 				    unsigned index, unsigned start,
448 				    unsigned count, uint32_t flags, void *data)
449 {
450 	if (!is_intx(vdev) || start != 0 || count != 1)
451 		return -EINVAL;
452 
453 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
454 		vfio_pci_intx_unmask(vdev);
455 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
456 		uint8_t unmask = *(uint8_t *)data;
457 		if (unmask)
458 			vfio_pci_intx_unmask(vdev);
459 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
460 		int32_t fd = *(int32_t *)data;
461 		if (fd >= 0)
462 			return vfio_virqfd_enable((void *) vdev,
463 						  vfio_pci_intx_unmask_handler,
464 						  vfio_send_intx_eventfd, NULL,
465 						  &vdev->ctx[0].unmask, fd);
466 
467 		vfio_virqfd_disable(&vdev->ctx[0].unmask);
468 	}
469 
470 	return 0;
471 }
472 
473 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
474 				  unsigned index, unsigned start,
475 				  unsigned count, uint32_t flags, void *data)
476 {
477 	if (!is_intx(vdev) || start != 0 || count != 1)
478 		return -EINVAL;
479 
480 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
481 		vfio_pci_intx_mask(vdev);
482 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
483 		uint8_t mask = *(uint8_t *)data;
484 		if (mask)
485 			vfio_pci_intx_mask(vdev);
486 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
487 		return -ENOTTY; /* XXX implement me */
488 	}
489 
490 	return 0;
491 }
492 
493 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
494 				     unsigned index, unsigned start,
495 				     unsigned count, uint32_t flags, void *data)
496 {
497 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
498 		vfio_intx_disable(vdev);
499 		return 0;
500 	}
501 
502 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
503 		return -EINVAL;
504 
505 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
506 		int32_t fd = *(int32_t *)data;
507 		int ret;
508 
509 		if (is_intx(vdev))
510 			return vfio_intx_set_signal(vdev, fd);
511 
512 		ret = vfio_intx_enable(vdev);
513 		if (ret)
514 			return ret;
515 
516 		ret = vfio_intx_set_signal(vdev, fd);
517 		if (ret)
518 			vfio_intx_disable(vdev);
519 
520 		return ret;
521 	}
522 
523 	if (!is_intx(vdev))
524 		return -EINVAL;
525 
526 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
527 		vfio_send_intx_eventfd(vdev, NULL);
528 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
529 		uint8_t trigger = *(uint8_t *)data;
530 		if (trigger)
531 			vfio_send_intx_eventfd(vdev, NULL);
532 	}
533 	return 0;
534 }
535 
536 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
537 				    unsigned index, unsigned start,
538 				    unsigned count, uint32_t flags, void *data)
539 {
540 	int i;
541 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
542 
543 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
544 		vfio_msi_disable(vdev, msix);
545 		return 0;
546 	}
547 
548 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
549 		return -EINVAL;
550 
551 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
552 		int32_t *fds = data;
553 		int ret;
554 
555 		if (vdev->irq_type == index)
556 			return vfio_msi_set_block(vdev, start, count,
557 						  fds, msix);
558 
559 		ret = vfio_msi_enable(vdev, start + count, msix);
560 		if (ret)
561 			return ret;
562 
563 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
564 		if (ret)
565 			vfio_msi_disable(vdev, msix);
566 
567 		return ret;
568 	}
569 
570 	if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
571 		return -EINVAL;
572 
573 	for (i = start; i < start + count; i++) {
574 		if (!vdev->ctx[i].trigger)
575 			continue;
576 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
577 			eventfd_signal(vdev->ctx[i].trigger, 1);
578 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
579 			uint8_t *bools = data;
580 			if (bools[i - start])
581 				eventfd_signal(vdev->ctx[i].trigger, 1);
582 		}
583 	}
584 	return 0;
585 }
586 
587 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
588 					   unsigned int count, uint32_t flags,
589 					   void *data)
590 {
591 	/* DATA_NONE/DATA_BOOL enables loopback testing */
592 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
593 		if (*ctx) {
594 			if (count) {
595 				eventfd_signal(*ctx, 1);
596 			} else {
597 				eventfd_ctx_put(*ctx);
598 				*ctx = NULL;
599 			}
600 			return 0;
601 		}
602 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
603 		uint8_t trigger;
604 
605 		if (!count)
606 			return -EINVAL;
607 
608 		trigger = *(uint8_t *)data;
609 		if (trigger && *ctx)
610 			eventfd_signal(*ctx, 1);
611 
612 		return 0;
613 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
614 		int32_t fd;
615 
616 		if (!count)
617 			return -EINVAL;
618 
619 		fd = *(int32_t *)data;
620 		if (fd == -1) {
621 			if (*ctx)
622 				eventfd_ctx_put(*ctx);
623 			*ctx = NULL;
624 		} else if (fd >= 0) {
625 			struct eventfd_ctx *efdctx;
626 
627 			efdctx = eventfd_ctx_fdget(fd);
628 			if (IS_ERR(efdctx))
629 				return PTR_ERR(efdctx);
630 
631 			if (*ctx)
632 				eventfd_ctx_put(*ctx);
633 
634 			*ctx = efdctx;
635 		}
636 		return 0;
637 	}
638 
639 	return -EINVAL;
640 }
641 
642 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
643 				    unsigned index, unsigned start,
644 				    unsigned count, uint32_t flags, void *data)
645 {
646 	if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
647 		return -EINVAL;
648 
649 	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
650 					       count, flags, data);
651 }
652 
653 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
654 				    unsigned index, unsigned start,
655 				    unsigned count, uint32_t flags, void *data)
656 {
657 	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
658 		return -EINVAL;
659 
660 	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
661 					       count, flags, data);
662 }
663 
664 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
665 			    unsigned index, unsigned start, unsigned count,
666 			    void *data)
667 {
668 	int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
669 		    unsigned start, unsigned count, uint32_t flags,
670 		    void *data) = NULL;
671 
672 	switch (index) {
673 	case VFIO_PCI_INTX_IRQ_INDEX:
674 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
675 		case VFIO_IRQ_SET_ACTION_MASK:
676 			func = vfio_pci_set_intx_mask;
677 			break;
678 		case VFIO_IRQ_SET_ACTION_UNMASK:
679 			func = vfio_pci_set_intx_unmask;
680 			break;
681 		case VFIO_IRQ_SET_ACTION_TRIGGER:
682 			func = vfio_pci_set_intx_trigger;
683 			break;
684 		}
685 		break;
686 	case VFIO_PCI_MSI_IRQ_INDEX:
687 	case VFIO_PCI_MSIX_IRQ_INDEX:
688 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
689 		case VFIO_IRQ_SET_ACTION_MASK:
690 		case VFIO_IRQ_SET_ACTION_UNMASK:
691 			/* XXX Need masking support exported */
692 			break;
693 		case VFIO_IRQ_SET_ACTION_TRIGGER:
694 			func = vfio_pci_set_msi_trigger;
695 			break;
696 		}
697 		break;
698 	case VFIO_PCI_ERR_IRQ_INDEX:
699 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
700 		case VFIO_IRQ_SET_ACTION_TRIGGER:
701 			if (pci_is_pcie(vdev->pdev))
702 				func = vfio_pci_set_err_trigger;
703 			break;
704 		}
705 		break;
706 	case VFIO_PCI_REQ_IRQ_INDEX:
707 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
708 		case VFIO_IRQ_SET_ACTION_TRIGGER:
709 			func = vfio_pci_set_req_trigger;
710 			break;
711 		}
712 		break;
713 	}
714 
715 	if (!func)
716 		return -ENOTTY;
717 
718 	return func(vdev, index, start, count, flags, data);
719 }
720