xref: /linux/drivers/vfio/pci/vfio_pci_intrs.c (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO PCI interrupt handling
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Derived from original vfio:
9  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
10  * Author: Tom Lyon, pugs@cisco.com
11  */
12 
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
22 
23 #include "vfio_pci_priv.h"
24 
25 struct vfio_pci_irq_ctx {
26 	struct vfio_pci_core_device	*vdev;
27 	struct eventfd_ctx		*trigger;
28 	struct virqfd			*unmask;
29 	struct virqfd			*mask;
30 	char				*name;
31 	bool				masked;
32 	struct irq_bypass_producer	producer;
33 };
34 
irq_is(struct vfio_pci_core_device * vdev,int type)35 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
36 {
37 	return vdev->irq_type == type;
38 }
39 
is_intx(struct vfio_pci_core_device * vdev)40 static bool is_intx(struct vfio_pci_core_device *vdev)
41 {
42 	return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
43 }
44 
is_irq_none(struct vfio_pci_core_device * vdev)45 static bool is_irq_none(struct vfio_pci_core_device *vdev)
46 {
47 	return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
48 		 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
49 		 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
50 }
51 
52 static
vfio_irq_ctx_get(struct vfio_pci_core_device * vdev,unsigned long index)53 struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev,
54 					  unsigned long index)
55 {
56 	return xa_load(&vdev->ctx, index);
57 }
58 
vfio_irq_ctx_free(struct vfio_pci_core_device * vdev,struct vfio_pci_irq_ctx * ctx,unsigned long index)59 static void vfio_irq_ctx_free(struct vfio_pci_core_device *vdev,
60 			      struct vfio_pci_irq_ctx *ctx, unsigned long index)
61 {
62 	xa_erase(&vdev->ctx, index);
63 	kfree(ctx);
64 }
65 
66 static struct vfio_pci_irq_ctx *
vfio_irq_ctx_alloc(struct vfio_pci_core_device * vdev,unsigned long index)67 vfio_irq_ctx_alloc(struct vfio_pci_core_device *vdev, unsigned long index)
68 {
69 	struct vfio_pci_irq_ctx *ctx;
70 	int ret;
71 
72 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
73 	if (!ctx)
74 		return NULL;
75 
76 	ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT);
77 	if (ret) {
78 		kfree(ctx);
79 		return NULL;
80 	}
81 
82 	return ctx;
83 }
84 
85 /*
86  * INTx
87  */
vfio_send_intx_eventfd(void * opaque,void * data)88 static void vfio_send_intx_eventfd(void *opaque, void *data)
89 {
90 	struct vfio_pci_core_device *vdev = opaque;
91 
92 	if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
93 		struct vfio_pci_irq_ctx *ctx = data;
94 		struct eventfd_ctx *trigger = READ_ONCE(ctx->trigger);
95 
96 		if (likely(trigger))
97 			eventfd_signal(trigger);
98 	}
99 }
100 
101 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
__vfio_pci_intx_mask(struct vfio_pci_core_device * vdev)102 static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
103 {
104 	struct pci_dev *pdev = vdev->pdev;
105 	struct vfio_pci_irq_ctx *ctx;
106 	unsigned long flags;
107 	bool masked_changed = false;
108 
109 	lockdep_assert_held(&vdev->igate);
110 
111 	spin_lock_irqsave(&vdev->irqlock, flags);
112 
113 	/*
114 	 * Masking can come from interrupt, ioctl, or config space
115 	 * via INTx disable.  The latter means this can get called
116 	 * even when not using intx delivery.  In this case, just
117 	 * try to have the physical bit follow the virtual bit.
118 	 */
119 	if (unlikely(!is_intx(vdev))) {
120 		if (vdev->pci_2_3)
121 			pci_intx(pdev, 0);
122 		goto out_unlock;
123 	}
124 
125 	ctx = vfio_irq_ctx_get(vdev, 0);
126 	if (WARN_ON_ONCE(!ctx))
127 		goto out_unlock;
128 
129 	if (!ctx->masked) {
130 		/*
131 		 * Can't use check_and_mask here because we always want to
132 		 * mask, not just when something is pending.
133 		 */
134 		if (vdev->pci_2_3)
135 			pci_intx(pdev, 0);
136 		else
137 			disable_irq_nosync(pdev->irq);
138 
139 		ctx->masked = true;
140 		masked_changed = true;
141 	}
142 
143 out_unlock:
144 	spin_unlock_irqrestore(&vdev->irqlock, flags);
145 	return masked_changed;
146 }
147 
vfio_pci_intx_mask(struct vfio_pci_core_device * vdev)148 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
149 {
150 	bool mask_changed;
151 
152 	mutex_lock(&vdev->igate);
153 	mask_changed = __vfio_pci_intx_mask(vdev);
154 	mutex_unlock(&vdev->igate);
155 
156 	return mask_changed;
157 }
158 
159 /*
160  * If this is triggered by an eventfd, we can't call eventfd_signal
161  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
162  * a signal is necessary, which can then be handled via a work queue
163  * or directly depending on the caller.
164  */
vfio_pci_intx_unmask_handler(void * opaque,void * data)165 static int vfio_pci_intx_unmask_handler(void *opaque, void *data)
166 {
167 	struct vfio_pci_core_device *vdev = opaque;
168 	struct pci_dev *pdev = vdev->pdev;
169 	struct vfio_pci_irq_ctx *ctx = data;
170 	unsigned long flags;
171 	int ret = 0;
172 
173 	spin_lock_irqsave(&vdev->irqlock, flags);
174 
175 	/*
176 	 * Unmasking comes from ioctl or config, so again, have the
177 	 * physical bit follow the virtual even when not using INTx.
178 	 */
179 	if (unlikely(!is_intx(vdev))) {
180 		if (vdev->pci_2_3)
181 			pci_intx(pdev, 1);
182 		goto out_unlock;
183 	}
184 
185 	if (ctx->masked && !vdev->virq_disabled) {
186 		/*
187 		 * A pending interrupt here would immediately trigger,
188 		 * but we can avoid that overhead by just re-sending
189 		 * the interrupt to the user.
190 		 */
191 		if (vdev->pci_2_3) {
192 			if (!pci_check_and_unmask_intx(pdev))
193 				ret = 1;
194 		} else
195 			enable_irq(pdev->irq);
196 
197 		ctx->masked = (ret > 0);
198 	}
199 
200 out_unlock:
201 	spin_unlock_irqrestore(&vdev->irqlock, flags);
202 
203 	return ret;
204 }
205 
__vfio_pci_intx_unmask(struct vfio_pci_core_device * vdev)206 static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
207 {
208 	struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
209 
210 	lockdep_assert_held(&vdev->igate);
211 
212 	if (vfio_pci_intx_unmask_handler(vdev, ctx) > 0)
213 		vfio_send_intx_eventfd(vdev, ctx);
214 }
215 
vfio_pci_intx_unmask(struct vfio_pci_core_device * vdev)216 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
217 {
218 	mutex_lock(&vdev->igate);
219 	__vfio_pci_intx_unmask(vdev);
220 	mutex_unlock(&vdev->igate);
221 }
222 
vfio_intx_handler(int irq,void * dev_id)223 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
224 {
225 	struct vfio_pci_irq_ctx *ctx = dev_id;
226 	struct vfio_pci_core_device *vdev = ctx->vdev;
227 	unsigned long flags;
228 	int ret = IRQ_NONE;
229 
230 	spin_lock_irqsave(&vdev->irqlock, flags);
231 
232 	if (!vdev->pci_2_3) {
233 		disable_irq_nosync(vdev->pdev->irq);
234 		ctx->masked = true;
235 		ret = IRQ_HANDLED;
236 	} else if (!ctx->masked &&  /* may be shared */
237 		   pci_check_and_mask_intx(vdev->pdev)) {
238 		ctx->masked = true;
239 		ret = IRQ_HANDLED;
240 	}
241 
242 	spin_unlock_irqrestore(&vdev->irqlock, flags);
243 
244 	if (ret == IRQ_HANDLED)
245 		vfio_send_intx_eventfd(vdev, ctx);
246 
247 	return ret;
248 }
249 
vfio_intx_enable(struct vfio_pci_core_device * vdev,struct eventfd_ctx * trigger)250 static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
251 			    struct eventfd_ctx *trigger)
252 {
253 	struct pci_dev *pdev = vdev->pdev;
254 	struct vfio_pci_irq_ctx *ctx;
255 	unsigned long irqflags;
256 	char *name;
257 	int ret;
258 
259 	if (!is_irq_none(vdev))
260 		return -EINVAL;
261 
262 	if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED)
263 		return -ENODEV;
264 
265 	name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
266 	if (!name)
267 		return -ENOMEM;
268 
269 	ctx = vfio_irq_ctx_alloc(vdev, 0);
270 	if (!ctx) {
271 		kfree(name);
272 		return -ENOMEM;
273 	}
274 
275 	ctx->name = name;
276 	ctx->trigger = trigger;
277 	ctx->vdev = vdev;
278 
279 	/*
280 	 * Fill the initial masked state based on virq_disabled.  After
281 	 * enable, changing the DisINTx bit in vconfig directly changes INTx
282 	 * masking.  igate prevents races during setup, once running masked
283 	 * is protected via irqlock.
284 	 *
285 	 * Devices supporting DisINTx also reflect the current mask state in
286 	 * the physical DisINTx bit, which is not affected during IRQ setup.
287 	 *
288 	 * Devices without DisINTx support require an exclusive interrupt.
289 	 * IRQ masking is performed at the IRQ chip.  Again, igate protects
290 	 * against races during setup and IRQ handlers and irqfds are not
291 	 * yet active, therefore masked is stable and can be used to
292 	 * conditionally auto-enable the IRQ.
293 	 *
294 	 * irq_type must be stable while the IRQ handler is registered,
295 	 * therefore it must be set before request_irq().
296 	 */
297 	ctx->masked = vdev->virq_disabled;
298 	if (vdev->pci_2_3) {
299 		pci_intx(pdev, !ctx->masked);
300 		irqflags = IRQF_SHARED;
301 	} else {
302 		irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
303 	}
304 
305 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
306 
307 	ret = request_irq(pdev->irq, vfio_intx_handler,
308 			  irqflags, ctx->name, ctx);
309 	if (ret) {
310 		vdev->irq_type = VFIO_PCI_NUM_IRQS;
311 		kfree(name);
312 		vfio_irq_ctx_free(vdev, ctx, 0);
313 		return ret;
314 	}
315 
316 	return 0;
317 }
318 
vfio_intx_set_signal(struct vfio_pci_core_device * vdev,struct eventfd_ctx * trigger)319 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev,
320 				struct eventfd_ctx *trigger)
321 {
322 	struct pci_dev *pdev = vdev->pdev;
323 	struct vfio_pci_irq_ctx *ctx;
324 	struct eventfd_ctx *old;
325 
326 	ctx = vfio_irq_ctx_get(vdev, 0);
327 	if (WARN_ON_ONCE(!ctx))
328 		return -EINVAL;
329 
330 	old = ctx->trigger;
331 
332 	WRITE_ONCE(ctx->trigger, trigger);
333 
334 	/* Releasing an old ctx requires synchronizing in-flight users */
335 	if (old) {
336 		synchronize_irq(pdev->irq);
337 		vfio_virqfd_flush_thread(&ctx->unmask);
338 		eventfd_ctx_put(old);
339 	}
340 
341 	return 0;
342 }
343 
vfio_intx_disable(struct vfio_pci_core_device * vdev)344 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
345 {
346 	struct pci_dev *pdev = vdev->pdev;
347 	struct vfio_pci_irq_ctx *ctx;
348 
349 	ctx = vfio_irq_ctx_get(vdev, 0);
350 	WARN_ON_ONCE(!ctx);
351 	if (ctx) {
352 		vfio_virqfd_disable(&ctx->unmask);
353 		vfio_virqfd_disable(&ctx->mask);
354 		free_irq(pdev->irq, ctx);
355 		if (ctx->trigger)
356 			eventfd_ctx_put(ctx->trigger);
357 		kfree(ctx->name);
358 		vfio_irq_ctx_free(vdev, ctx, 0);
359 	}
360 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
361 }
362 
363 /*
364  * MSI/MSI-X
365  */
vfio_msihandler(int irq,void * arg)366 static irqreturn_t vfio_msihandler(int irq, void *arg)
367 {
368 	struct eventfd_ctx *trigger = arg;
369 
370 	eventfd_signal(trigger);
371 	return IRQ_HANDLED;
372 }
373 
vfio_msi_enable(struct vfio_pci_core_device * vdev,int nvec,bool msix)374 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
375 {
376 	struct pci_dev *pdev = vdev->pdev;
377 	unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
378 	int ret;
379 	u16 cmd;
380 
381 	if (!is_irq_none(vdev))
382 		return -EINVAL;
383 
384 	/* return the number of supported vectors if we can't get all: */
385 	cmd = vfio_pci_memory_lock_and_enable(vdev);
386 	ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
387 	if (ret < nvec) {
388 		if (ret > 0)
389 			pci_free_irq_vectors(pdev);
390 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
391 		return ret;
392 	}
393 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
394 
395 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
396 				VFIO_PCI_MSI_IRQ_INDEX;
397 
398 	if (!msix) {
399 		/*
400 		 * Compute the virtual hardware field for max msi vectors -
401 		 * it is the log base 2 of the number of vectors.
402 		 */
403 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
404 	}
405 
406 	return 0;
407 }
408 
409 /*
410  * vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
411  * interrupt vector. If a Linux IRQ number is not available then a new
412  * interrupt is allocated if dynamic MSI-X is supported.
413  *
414  * Where is vfio_msi_free_irq()? Allocated interrupts are maintained,
415  * essentially forming a cache that subsequent allocations can draw from.
416  * Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
417  * disabled.
418  */
vfio_msi_alloc_irq(struct vfio_pci_core_device * vdev,unsigned int vector,bool msix)419 static int vfio_msi_alloc_irq(struct vfio_pci_core_device *vdev,
420 			      unsigned int vector, bool msix)
421 {
422 	struct pci_dev *pdev = vdev->pdev;
423 	struct msi_map map;
424 	int irq;
425 	u16 cmd;
426 
427 	irq = pci_irq_vector(pdev, vector);
428 	if (WARN_ON_ONCE(irq == 0))
429 		return -EINVAL;
430 	if (irq > 0 || !msix || !vdev->has_dyn_msix)
431 		return irq;
432 
433 	cmd = vfio_pci_memory_lock_and_enable(vdev);
434 	map = pci_msix_alloc_irq_at(pdev, vector, NULL);
435 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
436 
437 	return map.index < 0 ? map.index : map.virq;
438 }
439 
vfio_msi_set_vector_signal(struct vfio_pci_core_device * vdev,unsigned int vector,int fd,bool msix)440 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
441 				      unsigned int vector, int fd, bool msix)
442 {
443 	struct pci_dev *pdev = vdev->pdev;
444 	struct vfio_pci_irq_ctx *ctx;
445 	struct eventfd_ctx *trigger;
446 	int irq = -EINVAL, ret;
447 	u16 cmd;
448 
449 	ctx = vfio_irq_ctx_get(vdev, vector);
450 
451 	if (ctx) {
452 		irq_bypass_unregister_producer(&ctx->producer);
453 		irq = pci_irq_vector(pdev, vector);
454 		cmd = vfio_pci_memory_lock_and_enable(vdev);
455 		free_irq(irq, ctx->trigger);
456 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
457 		/* Interrupt stays allocated, will be freed at MSI-X disable. */
458 		kfree(ctx->name);
459 		eventfd_ctx_put(ctx->trigger);
460 		vfio_irq_ctx_free(vdev, ctx, vector);
461 	}
462 
463 	if (fd < 0)
464 		return 0;
465 
466 	if (irq == -EINVAL) {
467 		/* Interrupt stays allocated, will be freed at MSI-X disable. */
468 		irq = vfio_msi_alloc_irq(vdev, vector, msix);
469 		if (irq < 0)
470 			return irq;
471 	}
472 
473 	ctx = vfio_irq_ctx_alloc(vdev, vector);
474 	if (!ctx)
475 		return -ENOMEM;
476 
477 	ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
478 			      msix ? "x" : "", vector, pci_name(pdev));
479 	if (!ctx->name) {
480 		ret = -ENOMEM;
481 		goto out_free_ctx;
482 	}
483 
484 	trigger = eventfd_ctx_fdget(fd);
485 	if (IS_ERR(trigger)) {
486 		ret = PTR_ERR(trigger);
487 		goto out_free_name;
488 	}
489 
490 	/*
491 	 * If the vector was previously allocated, refresh the on-device
492 	 * message data before enabling in case it had been cleared or
493 	 * corrupted (e.g. due to backdoor resets) since writing.
494 	 */
495 	cmd = vfio_pci_memory_lock_and_enable(vdev);
496 	if (msix) {
497 		struct msi_msg msg;
498 
499 		get_cached_msi_msg(irq, &msg);
500 		pci_write_msi_msg(irq, &msg);
501 	}
502 
503 	ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger);
504 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
505 	if (ret)
506 		goto out_put_eventfd_ctx;
507 
508 	ret = irq_bypass_register_producer(&ctx->producer, trigger, irq);
509 	if (unlikely(ret)) {
510 		dev_info(&pdev->dev,
511 		"irq bypass producer (eventfd %p) registration fails: %d\n",
512 		trigger, ret);
513 	}
514 	ctx->trigger = trigger;
515 
516 	return 0;
517 
518 out_put_eventfd_ctx:
519 	eventfd_ctx_put(trigger);
520 out_free_name:
521 	kfree(ctx->name);
522 out_free_ctx:
523 	vfio_irq_ctx_free(vdev, ctx, vector);
524 	return ret;
525 }
526 
vfio_msi_set_block(struct vfio_pci_core_device * vdev,unsigned start,unsigned count,int32_t * fds,bool msix)527 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
528 			      unsigned count, int32_t *fds, bool msix)
529 {
530 	unsigned int i, j;
531 	int ret = 0;
532 
533 	for (i = 0, j = start; i < count && !ret; i++, j++) {
534 		int fd = fds ? fds[i] : -1;
535 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
536 	}
537 
538 	if (ret) {
539 		for (i = start; i < j; i++)
540 			vfio_msi_set_vector_signal(vdev, i, -1, msix);
541 	}
542 
543 	return ret;
544 }
545 
vfio_msi_disable(struct vfio_pci_core_device * vdev,bool msix)546 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
547 {
548 	struct pci_dev *pdev = vdev->pdev;
549 	struct vfio_pci_irq_ctx *ctx;
550 	unsigned long i;
551 	u16 cmd;
552 
553 	xa_for_each(&vdev->ctx, i, ctx) {
554 		vfio_virqfd_disable(&ctx->unmask);
555 		vfio_virqfd_disable(&ctx->mask);
556 		vfio_msi_set_vector_signal(vdev, i, -1, msix);
557 	}
558 
559 	cmd = vfio_pci_memory_lock_and_enable(vdev);
560 	pci_free_irq_vectors(pdev);
561 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
562 
563 	/*
564 	 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
565 	 * via their shutdown paths.  Restore for NoINTx devices.
566 	 */
567 	if (vdev->nointx)
568 		pci_intx(pdev, 0);
569 
570 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
571 }
572 
573 /*
574  * IOCTL support
575  */
vfio_pci_set_intx_unmask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)576 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
577 				    unsigned index, unsigned start,
578 				    unsigned count, uint32_t flags, void *data)
579 {
580 	if (!is_intx(vdev) || start != 0 || count != 1)
581 		return -EINVAL;
582 
583 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
584 		__vfio_pci_intx_unmask(vdev);
585 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
586 		uint8_t unmask = *(uint8_t *)data;
587 		if (unmask)
588 			__vfio_pci_intx_unmask(vdev);
589 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
590 		struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
591 		int32_t fd = *(int32_t *)data;
592 
593 		if (WARN_ON_ONCE(!ctx))
594 			return -EINVAL;
595 		if (fd >= 0)
596 			return vfio_virqfd_enable((void *) vdev,
597 						  vfio_pci_intx_unmask_handler,
598 						  vfio_send_intx_eventfd, ctx,
599 						  &ctx->unmask, fd);
600 
601 		vfio_virqfd_disable(&ctx->unmask);
602 	}
603 
604 	return 0;
605 }
606 
vfio_pci_set_intx_mask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)607 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
608 				  unsigned index, unsigned start,
609 				  unsigned count, uint32_t flags, void *data)
610 {
611 	if (!is_intx(vdev) || start != 0 || count != 1)
612 		return -EINVAL;
613 
614 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
615 		__vfio_pci_intx_mask(vdev);
616 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
617 		uint8_t mask = *(uint8_t *)data;
618 		if (mask)
619 			__vfio_pci_intx_mask(vdev);
620 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
621 		return -ENOTTY; /* XXX implement me */
622 	}
623 
624 	return 0;
625 }
626 
vfio_pci_set_intx_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)627 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
628 				     unsigned index, unsigned start,
629 				     unsigned count, uint32_t flags, void *data)
630 {
631 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
632 		vfio_intx_disable(vdev);
633 		return 0;
634 	}
635 
636 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
637 		return -EINVAL;
638 
639 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
640 		struct eventfd_ctx *trigger = NULL;
641 		int32_t fd = *(int32_t *)data;
642 		int ret;
643 
644 		if (fd >= 0) {
645 			trigger = eventfd_ctx_fdget(fd);
646 			if (IS_ERR(trigger))
647 				return PTR_ERR(trigger);
648 		}
649 
650 		if (is_intx(vdev))
651 			ret = vfio_intx_set_signal(vdev, trigger);
652 		else
653 			ret = vfio_intx_enable(vdev, trigger);
654 
655 		if (ret && trigger)
656 			eventfd_ctx_put(trigger);
657 
658 		return ret;
659 	}
660 
661 	if (!is_intx(vdev))
662 		return -EINVAL;
663 
664 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
665 		vfio_send_intx_eventfd(vdev, vfio_irq_ctx_get(vdev, 0));
666 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
667 		uint8_t trigger = *(uint8_t *)data;
668 		if (trigger)
669 			vfio_send_intx_eventfd(vdev, vfio_irq_ctx_get(vdev, 0));
670 	}
671 	return 0;
672 }
673 
vfio_pci_set_msi_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)674 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
675 				    unsigned index, unsigned start,
676 				    unsigned count, uint32_t flags, void *data)
677 {
678 	struct vfio_pci_irq_ctx *ctx;
679 	unsigned int i;
680 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
681 
682 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
683 		vfio_msi_disable(vdev, msix);
684 		return 0;
685 	}
686 
687 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
688 		return -EINVAL;
689 
690 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
691 		int32_t *fds = data;
692 		int ret;
693 
694 		if (vdev->irq_type == index)
695 			return vfio_msi_set_block(vdev, start, count,
696 						  fds, msix);
697 
698 		ret = vfio_msi_enable(vdev, start + count, msix);
699 		if (ret)
700 			return ret;
701 
702 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
703 		if (ret)
704 			vfio_msi_disable(vdev, msix);
705 
706 		return ret;
707 	}
708 
709 	if (!irq_is(vdev, index))
710 		return -EINVAL;
711 
712 	for (i = start; i < start + count; i++) {
713 		ctx = vfio_irq_ctx_get(vdev, i);
714 		if (!ctx)
715 			continue;
716 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
717 			eventfd_signal(ctx->trigger);
718 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
719 			uint8_t *bools = data;
720 			if (bools[i - start])
721 				eventfd_signal(ctx->trigger);
722 		}
723 	}
724 	return 0;
725 }
726 
vfio_pci_set_ctx_trigger_single(struct eventfd_ctx ** ctx,unsigned int count,uint32_t flags,void * data)727 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
728 					   unsigned int count, uint32_t flags,
729 					   void *data)
730 {
731 	/* DATA_NONE/DATA_BOOL enables loopback testing */
732 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
733 		if (*ctx) {
734 			if (count) {
735 				eventfd_signal(*ctx);
736 			} else {
737 				eventfd_ctx_put(*ctx);
738 				*ctx = NULL;
739 			}
740 			return 0;
741 		}
742 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
743 		uint8_t trigger;
744 
745 		if (!count)
746 			return -EINVAL;
747 
748 		trigger = *(uint8_t *)data;
749 		if (trigger && *ctx)
750 			eventfd_signal(*ctx);
751 
752 		return 0;
753 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
754 		int32_t fd;
755 
756 		if (!count)
757 			return -EINVAL;
758 
759 		fd = *(int32_t *)data;
760 		if (fd == -1) {
761 			if (*ctx)
762 				eventfd_ctx_put(*ctx);
763 			*ctx = NULL;
764 		} else if (fd >= 0) {
765 			struct eventfd_ctx *efdctx;
766 
767 			efdctx = eventfd_ctx_fdget(fd);
768 			if (IS_ERR(efdctx))
769 				return PTR_ERR(efdctx);
770 
771 			if (*ctx)
772 				eventfd_ctx_put(*ctx);
773 
774 			*ctx = efdctx;
775 		}
776 		return 0;
777 	}
778 
779 	return -EINVAL;
780 }
781 
vfio_pci_set_err_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)782 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
783 				    unsigned index, unsigned start,
784 				    unsigned count, uint32_t flags, void *data)
785 {
786 	if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
787 		return -EINVAL;
788 
789 	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
790 					       count, flags, data);
791 }
792 
vfio_pci_set_req_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)793 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
794 				    unsigned index, unsigned start,
795 				    unsigned count, uint32_t flags, void *data)
796 {
797 	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
798 		return -EINVAL;
799 
800 	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
801 					       count, flags, data);
802 }
803 
vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device * vdev,uint32_t flags,unsigned index,unsigned start,unsigned count,void * data)804 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
805 			    unsigned index, unsigned start, unsigned count,
806 			    void *data)
807 {
808 	int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
809 		    unsigned start, unsigned count, uint32_t flags,
810 		    void *data) = NULL;
811 
812 	switch (index) {
813 	case VFIO_PCI_INTX_IRQ_INDEX:
814 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
815 		case VFIO_IRQ_SET_ACTION_MASK:
816 			func = vfio_pci_set_intx_mask;
817 			break;
818 		case VFIO_IRQ_SET_ACTION_UNMASK:
819 			func = vfio_pci_set_intx_unmask;
820 			break;
821 		case VFIO_IRQ_SET_ACTION_TRIGGER:
822 			func = vfio_pci_set_intx_trigger;
823 			break;
824 		}
825 		break;
826 	case VFIO_PCI_MSI_IRQ_INDEX:
827 	case VFIO_PCI_MSIX_IRQ_INDEX:
828 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
829 		case VFIO_IRQ_SET_ACTION_MASK:
830 		case VFIO_IRQ_SET_ACTION_UNMASK:
831 			/* XXX Need masking support exported */
832 			break;
833 		case VFIO_IRQ_SET_ACTION_TRIGGER:
834 			func = vfio_pci_set_msi_trigger;
835 			break;
836 		}
837 		break;
838 	case VFIO_PCI_ERR_IRQ_INDEX:
839 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
840 		case VFIO_IRQ_SET_ACTION_TRIGGER:
841 			if (pci_is_pcie(vdev->pdev))
842 				func = vfio_pci_set_err_trigger;
843 			break;
844 		}
845 		break;
846 	case VFIO_PCI_REQ_IRQ_INDEX:
847 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
848 		case VFIO_IRQ_SET_ACTION_TRIGGER:
849 			func = vfio_pci_set_req_trigger;
850 			break;
851 		}
852 		break;
853 	}
854 
855 	if (!func)
856 		return -ENOTTY;
857 
858 	return func(vdev, index, start, count, flags, data);
859 }
860