xref: /linux/drivers/vfio/pci/vfio_pci_intrs.c (revision e34a0425b8ef524355811e7408dc1d53d08dc538)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO PCI interrupt handling
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Derived from original vfio:
9  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
10  * Author: Tom Lyon, pugs@cisco.com
11  */
12 
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
22 
23 #include "vfio_pci_priv.h"
24 
25 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
26 #define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX)
27 #define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev)))
28 #define irq_is(vdev, type) (vdev->irq_type == type)
29 
30 struct vfio_pci_irq_ctx {
31 	struct eventfd_ctx	*trigger;
32 	struct virqfd		*unmask;
33 	struct virqfd		*mask;
34 	char			*name;
35 	bool			masked;
36 	struct irq_bypass_producer	producer;
37 };
38 
39 /*
40  * INTx
41  */
42 static void vfio_send_intx_eventfd(void *opaque, void *unused)
43 {
44 	struct vfio_pci_core_device *vdev = opaque;
45 
46 	if (likely(is_intx(vdev) && !vdev->virq_disabled))
47 		eventfd_signal(vdev->ctx[0].trigger, 1);
48 }
49 
50 void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
51 {
52 	struct pci_dev *pdev = vdev->pdev;
53 	unsigned long flags;
54 
55 	spin_lock_irqsave(&vdev->irqlock, flags);
56 
57 	/*
58 	 * Masking can come from interrupt, ioctl, or config space
59 	 * via INTx disable.  The latter means this can get called
60 	 * even when not using intx delivery.  In this case, just
61 	 * try to have the physical bit follow the virtual bit.
62 	 */
63 	if (unlikely(!is_intx(vdev))) {
64 		if (vdev->pci_2_3)
65 			pci_intx(pdev, 0);
66 	} else if (!vdev->ctx[0].masked) {
67 		/*
68 		 * Can't use check_and_mask here because we always want to
69 		 * mask, not just when something is pending.
70 		 */
71 		if (vdev->pci_2_3)
72 			pci_intx(pdev, 0);
73 		else
74 			disable_irq_nosync(pdev->irq);
75 
76 		vdev->ctx[0].masked = true;
77 	}
78 
79 	spin_unlock_irqrestore(&vdev->irqlock, flags);
80 }
81 
82 /*
83  * If this is triggered by an eventfd, we can't call eventfd_signal
84  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
85  * a signal is necessary, which can then be handled via a work queue
86  * or directly depending on the caller.
87  */
88 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
89 {
90 	struct vfio_pci_core_device *vdev = opaque;
91 	struct pci_dev *pdev = vdev->pdev;
92 	unsigned long flags;
93 	int ret = 0;
94 
95 	spin_lock_irqsave(&vdev->irqlock, flags);
96 
97 	/*
98 	 * Unmasking comes from ioctl or config, so again, have the
99 	 * physical bit follow the virtual even when not using INTx.
100 	 */
101 	if (unlikely(!is_intx(vdev))) {
102 		if (vdev->pci_2_3)
103 			pci_intx(pdev, 1);
104 	} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
105 		/*
106 		 * A pending interrupt here would immediately trigger,
107 		 * but we can avoid that overhead by just re-sending
108 		 * the interrupt to the user.
109 		 */
110 		if (vdev->pci_2_3) {
111 			if (!pci_check_and_unmask_intx(pdev))
112 				ret = 1;
113 		} else
114 			enable_irq(pdev->irq);
115 
116 		vdev->ctx[0].masked = (ret > 0);
117 	}
118 
119 	spin_unlock_irqrestore(&vdev->irqlock, flags);
120 
121 	return ret;
122 }
123 
124 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
125 {
126 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
127 		vfio_send_intx_eventfd(vdev, NULL);
128 }
129 
130 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
131 {
132 	struct vfio_pci_core_device *vdev = dev_id;
133 	unsigned long flags;
134 	int ret = IRQ_NONE;
135 
136 	spin_lock_irqsave(&vdev->irqlock, flags);
137 
138 	if (!vdev->pci_2_3) {
139 		disable_irq_nosync(vdev->pdev->irq);
140 		vdev->ctx[0].masked = true;
141 		ret = IRQ_HANDLED;
142 	} else if (!vdev->ctx[0].masked &&  /* may be shared */
143 		   pci_check_and_mask_intx(vdev->pdev)) {
144 		vdev->ctx[0].masked = true;
145 		ret = IRQ_HANDLED;
146 	}
147 
148 	spin_unlock_irqrestore(&vdev->irqlock, flags);
149 
150 	if (ret == IRQ_HANDLED)
151 		vfio_send_intx_eventfd(vdev, NULL);
152 
153 	return ret;
154 }
155 
156 static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
157 {
158 	if (!is_irq_none(vdev))
159 		return -EINVAL;
160 
161 	if (!vdev->pdev->irq)
162 		return -ENODEV;
163 
164 	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
165 	if (!vdev->ctx)
166 		return -ENOMEM;
167 
168 	vdev->num_ctx = 1;
169 
170 	/*
171 	 * If the virtual interrupt is masked, restore it.  Devices
172 	 * supporting DisINTx can be masked at the hardware level
173 	 * here, non-PCI-2.3 devices will have to wait until the
174 	 * interrupt is enabled.
175 	 */
176 	vdev->ctx[0].masked = vdev->virq_disabled;
177 	if (vdev->pci_2_3)
178 		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
179 
180 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
181 
182 	return 0;
183 }
184 
185 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
186 {
187 	struct pci_dev *pdev = vdev->pdev;
188 	unsigned long irqflags = IRQF_SHARED;
189 	struct eventfd_ctx *trigger;
190 	unsigned long flags;
191 	int ret;
192 
193 	if (vdev->ctx[0].trigger) {
194 		free_irq(pdev->irq, vdev);
195 		kfree(vdev->ctx[0].name);
196 		eventfd_ctx_put(vdev->ctx[0].trigger);
197 		vdev->ctx[0].trigger = NULL;
198 	}
199 
200 	if (fd < 0) /* Disable only */
201 		return 0;
202 
203 	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
204 				      pci_name(pdev));
205 	if (!vdev->ctx[0].name)
206 		return -ENOMEM;
207 
208 	trigger = eventfd_ctx_fdget(fd);
209 	if (IS_ERR(trigger)) {
210 		kfree(vdev->ctx[0].name);
211 		return PTR_ERR(trigger);
212 	}
213 
214 	vdev->ctx[0].trigger = trigger;
215 
216 	if (!vdev->pci_2_3)
217 		irqflags = 0;
218 
219 	ret = request_irq(pdev->irq, vfio_intx_handler,
220 			  irqflags, vdev->ctx[0].name, vdev);
221 	if (ret) {
222 		vdev->ctx[0].trigger = NULL;
223 		kfree(vdev->ctx[0].name);
224 		eventfd_ctx_put(trigger);
225 		return ret;
226 	}
227 
228 	/*
229 	 * INTx disable will stick across the new irq setup,
230 	 * disable_irq won't.
231 	 */
232 	spin_lock_irqsave(&vdev->irqlock, flags);
233 	if (!vdev->pci_2_3 && vdev->ctx[0].masked)
234 		disable_irq_nosync(pdev->irq);
235 	spin_unlock_irqrestore(&vdev->irqlock, flags);
236 
237 	return 0;
238 }
239 
240 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
241 {
242 	vfio_virqfd_disable(&vdev->ctx[0].unmask);
243 	vfio_virqfd_disable(&vdev->ctx[0].mask);
244 	vfio_intx_set_signal(vdev, -1);
245 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
246 	vdev->num_ctx = 0;
247 	kfree(vdev->ctx);
248 }
249 
250 /*
251  * MSI/MSI-X
252  */
253 static irqreturn_t vfio_msihandler(int irq, void *arg)
254 {
255 	struct eventfd_ctx *trigger = arg;
256 
257 	eventfd_signal(trigger, 1);
258 	return IRQ_HANDLED;
259 }
260 
261 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
262 {
263 	struct pci_dev *pdev = vdev->pdev;
264 	unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
265 	int ret;
266 	u16 cmd;
267 
268 	if (!is_irq_none(vdev))
269 		return -EINVAL;
270 
271 	vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
272 	if (!vdev->ctx)
273 		return -ENOMEM;
274 
275 	/* return the number of supported vectors if we can't get all: */
276 	cmd = vfio_pci_memory_lock_and_enable(vdev);
277 	ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
278 	if (ret < nvec) {
279 		if (ret > 0)
280 			pci_free_irq_vectors(pdev);
281 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
282 		kfree(vdev->ctx);
283 		return ret;
284 	}
285 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
286 
287 	vdev->num_ctx = nvec;
288 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
289 				VFIO_PCI_MSI_IRQ_INDEX;
290 
291 	if (!msix) {
292 		/*
293 		 * Compute the virtual hardware field for max msi vectors -
294 		 * it is the log base 2 of the number of vectors.
295 		 */
296 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
297 	}
298 
299 	return 0;
300 }
301 
302 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
303 				      int vector, int fd, bool msix)
304 {
305 	struct pci_dev *pdev = vdev->pdev;
306 	struct eventfd_ctx *trigger;
307 	int irq, ret;
308 	u16 cmd;
309 
310 	if (vector < 0 || vector >= vdev->num_ctx)
311 		return -EINVAL;
312 
313 	irq = pci_irq_vector(pdev, vector);
314 
315 	if (vdev->ctx[vector].trigger) {
316 		irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
317 
318 		cmd = vfio_pci_memory_lock_and_enable(vdev);
319 		free_irq(irq, vdev->ctx[vector].trigger);
320 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
321 
322 		kfree(vdev->ctx[vector].name);
323 		eventfd_ctx_put(vdev->ctx[vector].trigger);
324 		vdev->ctx[vector].trigger = NULL;
325 	}
326 
327 	if (fd < 0)
328 		return 0;
329 
330 	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
331 					   msix ? "x" : "", vector,
332 					   pci_name(pdev));
333 	if (!vdev->ctx[vector].name)
334 		return -ENOMEM;
335 
336 	trigger = eventfd_ctx_fdget(fd);
337 	if (IS_ERR(trigger)) {
338 		kfree(vdev->ctx[vector].name);
339 		return PTR_ERR(trigger);
340 	}
341 
342 	/*
343 	 * The MSIx vector table resides in device memory which may be cleared
344 	 * via backdoor resets. We don't allow direct access to the vector
345 	 * table so even if a userspace driver attempts to save/restore around
346 	 * such a reset it would be unsuccessful. To avoid this, restore the
347 	 * cached value of the message prior to enabling.
348 	 */
349 	cmd = vfio_pci_memory_lock_and_enable(vdev);
350 	if (msix) {
351 		struct msi_msg msg;
352 
353 		get_cached_msi_msg(irq, &msg);
354 		pci_write_msi_msg(irq, &msg);
355 	}
356 
357 	ret = request_irq(irq, vfio_msihandler, 0,
358 			  vdev->ctx[vector].name, trigger);
359 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
360 	if (ret) {
361 		kfree(vdev->ctx[vector].name);
362 		eventfd_ctx_put(trigger);
363 		return ret;
364 	}
365 
366 	vdev->ctx[vector].producer.token = trigger;
367 	vdev->ctx[vector].producer.irq = irq;
368 	ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
369 	if (unlikely(ret)) {
370 		dev_info(&pdev->dev,
371 		"irq bypass producer (token %p) registration fails: %d\n",
372 		vdev->ctx[vector].producer.token, ret);
373 
374 		vdev->ctx[vector].producer.token = NULL;
375 	}
376 	vdev->ctx[vector].trigger = trigger;
377 
378 	return 0;
379 }
380 
381 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
382 			      unsigned count, int32_t *fds, bool msix)
383 {
384 	int i, j, ret = 0;
385 
386 	if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
387 		return -EINVAL;
388 
389 	for (i = 0, j = start; i < count && !ret; i++, j++) {
390 		int fd = fds ? fds[i] : -1;
391 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
392 	}
393 
394 	if (ret) {
395 		for (--j; j >= (int)start; j--)
396 			vfio_msi_set_vector_signal(vdev, j, -1, msix);
397 	}
398 
399 	return ret;
400 }
401 
402 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
403 {
404 	struct pci_dev *pdev = vdev->pdev;
405 	int i;
406 	u16 cmd;
407 
408 	for (i = 0; i < vdev->num_ctx; i++) {
409 		vfio_virqfd_disable(&vdev->ctx[i].unmask);
410 		vfio_virqfd_disable(&vdev->ctx[i].mask);
411 	}
412 
413 	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
414 
415 	cmd = vfio_pci_memory_lock_and_enable(vdev);
416 	pci_free_irq_vectors(pdev);
417 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
418 
419 	/*
420 	 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
421 	 * via their shutdown paths.  Restore for NoINTx devices.
422 	 */
423 	if (vdev->nointx)
424 		pci_intx(pdev, 0);
425 
426 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
427 	vdev->num_ctx = 0;
428 	kfree(vdev->ctx);
429 }
430 
431 /*
432  * IOCTL support
433  */
434 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
435 				    unsigned index, unsigned start,
436 				    unsigned count, uint32_t flags, void *data)
437 {
438 	if (!is_intx(vdev) || start != 0 || count != 1)
439 		return -EINVAL;
440 
441 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
442 		vfio_pci_intx_unmask(vdev);
443 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
444 		uint8_t unmask = *(uint8_t *)data;
445 		if (unmask)
446 			vfio_pci_intx_unmask(vdev);
447 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
448 		int32_t fd = *(int32_t *)data;
449 		if (fd >= 0)
450 			return vfio_virqfd_enable((void *) vdev,
451 						  vfio_pci_intx_unmask_handler,
452 						  vfio_send_intx_eventfd, NULL,
453 						  &vdev->ctx[0].unmask, fd);
454 
455 		vfio_virqfd_disable(&vdev->ctx[0].unmask);
456 	}
457 
458 	return 0;
459 }
460 
461 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
462 				  unsigned index, unsigned start,
463 				  unsigned count, uint32_t flags, void *data)
464 {
465 	if (!is_intx(vdev) || start != 0 || count != 1)
466 		return -EINVAL;
467 
468 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
469 		vfio_pci_intx_mask(vdev);
470 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
471 		uint8_t mask = *(uint8_t *)data;
472 		if (mask)
473 			vfio_pci_intx_mask(vdev);
474 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
475 		return -ENOTTY; /* XXX implement me */
476 	}
477 
478 	return 0;
479 }
480 
481 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
482 				     unsigned index, unsigned start,
483 				     unsigned count, uint32_t flags, void *data)
484 {
485 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
486 		vfio_intx_disable(vdev);
487 		return 0;
488 	}
489 
490 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
491 		return -EINVAL;
492 
493 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
494 		int32_t fd = *(int32_t *)data;
495 		int ret;
496 
497 		if (is_intx(vdev))
498 			return vfio_intx_set_signal(vdev, fd);
499 
500 		ret = vfio_intx_enable(vdev);
501 		if (ret)
502 			return ret;
503 
504 		ret = vfio_intx_set_signal(vdev, fd);
505 		if (ret)
506 			vfio_intx_disable(vdev);
507 
508 		return ret;
509 	}
510 
511 	if (!is_intx(vdev))
512 		return -EINVAL;
513 
514 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
515 		vfio_send_intx_eventfd(vdev, NULL);
516 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
517 		uint8_t trigger = *(uint8_t *)data;
518 		if (trigger)
519 			vfio_send_intx_eventfd(vdev, NULL);
520 	}
521 	return 0;
522 }
523 
524 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
525 				    unsigned index, unsigned start,
526 				    unsigned count, uint32_t flags, void *data)
527 {
528 	int i;
529 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
530 
531 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
532 		vfio_msi_disable(vdev, msix);
533 		return 0;
534 	}
535 
536 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
537 		return -EINVAL;
538 
539 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
540 		int32_t *fds = data;
541 		int ret;
542 
543 		if (vdev->irq_type == index)
544 			return vfio_msi_set_block(vdev, start, count,
545 						  fds, msix);
546 
547 		ret = vfio_msi_enable(vdev, start + count, msix);
548 		if (ret)
549 			return ret;
550 
551 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
552 		if (ret)
553 			vfio_msi_disable(vdev, msix);
554 
555 		return ret;
556 	}
557 
558 	if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
559 		return -EINVAL;
560 
561 	for (i = start; i < start + count; i++) {
562 		if (!vdev->ctx[i].trigger)
563 			continue;
564 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
565 			eventfd_signal(vdev->ctx[i].trigger, 1);
566 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
567 			uint8_t *bools = data;
568 			if (bools[i - start])
569 				eventfd_signal(vdev->ctx[i].trigger, 1);
570 		}
571 	}
572 	return 0;
573 }
574 
575 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
576 					   unsigned int count, uint32_t flags,
577 					   void *data)
578 {
579 	/* DATA_NONE/DATA_BOOL enables loopback testing */
580 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
581 		if (*ctx) {
582 			if (count) {
583 				eventfd_signal(*ctx, 1);
584 			} else {
585 				eventfd_ctx_put(*ctx);
586 				*ctx = NULL;
587 			}
588 			return 0;
589 		}
590 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
591 		uint8_t trigger;
592 
593 		if (!count)
594 			return -EINVAL;
595 
596 		trigger = *(uint8_t *)data;
597 		if (trigger && *ctx)
598 			eventfd_signal(*ctx, 1);
599 
600 		return 0;
601 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
602 		int32_t fd;
603 
604 		if (!count)
605 			return -EINVAL;
606 
607 		fd = *(int32_t *)data;
608 		if (fd == -1) {
609 			if (*ctx)
610 				eventfd_ctx_put(*ctx);
611 			*ctx = NULL;
612 		} else if (fd >= 0) {
613 			struct eventfd_ctx *efdctx;
614 
615 			efdctx = eventfd_ctx_fdget(fd);
616 			if (IS_ERR(efdctx))
617 				return PTR_ERR(efdctx);
618 
619 			if (*ctx)
620 				eventfd_ctx_put(*ctx);
621 
622 			*ctx = efdctx;
623 		}
624 		return 0;
625 	}
626 
627 	return -EINVAL;
628 }
629 
630 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
631 				    unsigned index, unsigned start,
632 				    unsigned count, uint32_t flags, void *data)
633 {
634 	if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
635 		return -EINVAL;
636 
637 	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
638 					       count, flags, data);
639 }
640 
641 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
642 				    unsigned index, unsigned start,
643 				    unsigned count, uint32_t flags, void *data)
644 {
645 	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
646 		return -EINVAL;
647 
648 	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
649 					       count, flags, data);
650 }
651 
652 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
653 			    unsigned index, unsigned start, unsigned count,
654 			    void *data)
655 {
656 	int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
657 		    unsigned start, unsigned count, uint32_t flags,
658 		    void *data) = NULL;
659 
660 	switch (index) {
661 	case VFIO_PCI_INTX_IRQ_INDEX:
662 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
663 		case VFIO_IRQ_SET_ACTION_MASK:
664 			func = vfio_pci_set_intx_mask;
665 			break;
666 		case VFIO_IRQ_SET_ACTION_UNMASK:
667 			func = vfio_pci_set_intx_unmask;
668 			break;
669 		case VFIO_IRQ_SET_ACTION_TRIGGER:
670 			func = vfio_pci_set_intx_trigger;
671 			break;
672 		}
673 		break;
674 	case VFIO_PCI_MSI_IRQ_INDEX:
675 	case VFIO_PCI_MSIX_IRQ_INDEX:
676 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
677 		case VFIO_IRQ_SET_ACTION_MASK:
678 		case VFIO_IRQ_SET_ACTION_UNMASK:
679 			/* XXX Need masking support exported */
680 			break;
681 		case VFIO_IRQ_SET_ACTION_TRIGGER:
682 			func = vfio_pci_set_msi_trigger;
683 			break;
684 		}
685 		break;
686 	case VFIO_PCI_ERR_IRQ_INDEX:
687 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
688 		case VFIO_IRQ_SET_ACTION_TRIGGER:
689 			if (pci_is_pcie(vdev->pdev))
690 				func = vfio_pci_set_err_trigger;
691 			break;
692 		}
693 		break;
694 	case VFIO_PCI_REQ_IRQ_INDEX:
695 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
696 		case VFIO_IRQ_SET_ACTION_TRIGGER:
697 			func = vfio_pci_set_req_trigger;
698 			break;
699 		}
700 		break;
701 	}
702 
703 	if (!func)
704 		return -ENOTTY;
705 
706 	return func(vdev, index, start, count, flags, data);
707 }
708