1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VFIO PCI interrupt handling
4 *
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
7 *
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
11 */
12
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
22
23 #include "vfio_pci_priv.h"
24
25 struct vfio_pci_irq_ctx {
26 struct vfio_pci_core_device *vdev;
27 struct eventfd_ctx *trigger;
28 struct virqfd *unmask;
29 struct virqfd *mask;
30 char *name;
31 bool masked;
32 struct irq_bypass_producer producer;
33 };
34
irq_is(struct vfio_pci_core_device * vdev,int type)35 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
36 {
37 return vdev->irq_type == type;
38 }
39
is_intx(struct vfio_pci_core_device * vdev)40 static bool is_intx(struct vfio_pci_core_device *vdev)
41 {
42 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
43 }
44
is_irq_none(struct vfio_pci_core_device * vdev)45 static bool is_irq_none(struct vfio_pci_core_device *vdev)
46 {
47 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
48 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
49 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
50 }
51
52 static
vfio_irq_ctx_get(struct vfio_pci_core_device * vdev,unsigned long index)53 struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev,
54 unsigned long index)
55 {
56 return xa_load(&vdev->ctx, index);
57 }
58
vfio_irq_ctx_free(struct vfio_pci_core_device * vdev,struct vfio_pci_irq_ctx * ctx,unsigned long index)59 static void vfio_irq_ctx_free(struct vfio_pci_core_device *vdev,
60 struct vfio_pci_irq_ctx *ctx, unsigned long index)
61 {
62 xa_erase(&vdev->ctx, index);
63 kfree(ctx);
64 }
65
66 static struct vfio_pci_irq_ctx *
vfio_irq_ctx_alloc(struct vfio_pci_core_device * vdev,unsigned long index)67 vfio_irq_ctx_alloc(struct vfio_pci_core_device *vdev, unsigned long index)
68 {
69 struct vfio_pci_irq_ctx *ctx;
70 int ret;
71
72 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
73 if (!ctx)
74 return NULL;
75
76 ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT);
77 if (ret) {
78 kfree(ctx);
79 return NULL;
80 }
81
82 return ctx;
83 }
84
85 /*
86 * INTx
87 */
vfio_send_intx_eventfd(void * opaque,void * data)88 static void vfio_send_intx_eventfd(void *opaque, void *data)
89 {
90 struct vfio_pci_core_device *vdev = opaque;
91
92 if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
93 struct vfio_pci_irq_ctx *ctx = data;
94 struct eventfd_ctx *trigger = READ_ONCE(ctx->trigger);
95
96 if (likely(trigger))
97 eventfd_signal(trigger);
98 }
99 }
100
101 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
__vfio_pci_intx_mask(struct vfio_pci_core_device * vdev)102 static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
103 {
104 struct pci_dev *pdev = vdev->pdev;
105 struct vfio_pci_irq_ctx *ctx;
106 unsigned long flags;
107 bool masked_changed = false;
108
109 lockdep_assert_held(&vdev->igate);
110
111 spin_lock_irqsave(&vdev->irqlock, flags);
112
113 /*
114 * Masking can come from interrupt, ioctl, or config space
115 * via INTx disable. The latter means this can get called
116 * even when not using intx delivery. In this case, just
117 * try to have the physical bit follow the virtual bit.
118 */
119 if (unlikely(!is_intx(vdev))) {
120 if (vdev->pci_2_3)
121 pci_intx(pdev, 0);
122 goto out_unlock;
123 }
124
125 ctx = vfio_irq_ctx_get(vdev, 0);
126 if (WARN_ON_ONCE(!ctx))
127 goto out_unlock;
128
129 if (!ctx->masked) {
130 /*
131 * Can't use check_and_mask here because we always want to
132 * mask, not just when something is pending.
133 */
134 if (vdev->pci_2_3)
135 pci_intx(pdev, 0);
136 else
137 disable_irq_nosync(pdev->irq);
138
139 ctx->masked = true;
140 masked_changed = true;
141 }
142
143 out_unlock:
144 spin_unlock_irqrestore(&vdev->irqlock, flags);
145 return masked_changed;
146 }
147
vfio_pci_intx_mask(struct vfio_pci_core_device * vdev)148 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
149 {
150 bool mask_changed;
151
152 mutex_lock(&vdev->igate);
153 mask_changed = __vfio_pci_intx_mask(vdev);
154 mutex_unlock(&vdev->igate);
155
156 return mask_changed;
157 }
158
159 /*
160 * If this is triggered by an eventfd, we can't call eventfd_signal
161 * or else we'll deadlock on the eventfd wait queue. Return >0 when
162 * a signal is necessary, which can then be handled via a work queue
163 * or directly depending on the caller.
164 */
vfio_pci_intx_unmask_handler(void * opaque,void * data)165 static int vfio_pci_intx_unmask_handler(void *opaque, void *data)
166 {
167 struct vfio_pci_core_device *vdev = opaque;
168 struct pci_dev *pdev = vdev->pdev;
169 struct vfio_pci_irq_ctx *ctx = data;
170 unsigned long flags;
171 int ret = 0;
172
173 spin_lock_irqsave(&vdev->irqlock, flags);
174
175 /*
176 * Unmasking comes from ioctl or config, so again, have the
177 * physical bit follow the virtual even when not using INTx.
178 */
179 if (unlikely(!is_intx(vdev))) {
180 if (vdev->pci_2_3)
181 pci_intx(pdev, 1);
182 goto out_unlock;
183 }
184
185 if (ctx->masked && !vdev->virq_disabled) {
186 /*
187 * A pending interrupt here would immediately trigger,
188 * but we can avoid that overhead by just re-sending
189 * the interrupt to the user.
190 */
191 if (vdev->pci_2_3) {
192 if (!pci_check_and_unmask_intx(pdev))
193 ret = 1;
194 } else
195 enable_irq(pdev->irq);
196
197 ctx->masked = (ret > 0);
198 }
199
200 out_unlock:
201 spin_unlock_irqrestore(&vdev->irqlock, flags);
202
203 return ret;
204 }
205
__vfio_pci_intx_unmask(struct vfio_pci_core_device * vdev)206 static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
207 {
208 struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
209
210 lockdep_assert_held(&vdev->igate);
211
212 if (vfio_pci_intx_unmask_handler(vdev, ctx) > 0)
213 vfio_send_intx_eventfd(vdev, ctx);
214 }
215
vfio_pci_intx_unmask(struct vfio_pci_core_device * vdev)216 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
217 {
218 mutex_lock(&vdev->igate);
219 __vfio_pci_intx_unmask(vdev);
220 mutex_unlock(&vdev->igate);
221 }
222
vfio_intx_handler(int irq,void * dev_id)223 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
224 {
225 struct vfio_pci_irq_ctx *ctx = dev_id;
226 struct vfio_pci_core_device *vdev = ctx->vdev;
227 unsigned long flags;
228 int ret = IRQ_NONE;
229
230 spin_lock_irqsave(&vdev->irqlock, flags);
231
232 if (!vdev->pci_2_3) {
233 disable_irq_nosync(vdev->pdev->irq);
234 ctx->masked = true;
235 ret = IRQ_HANDLED;
236 } else if (!ctx->masked && /* may be shared */
237 pci_check_and_mask_intx(vdev->pdev)) {
238 ctx->masked = true;
239 ret = IRQ_HANDLED;
240 }
241
242 spin_unlock_irqrestore(&vdev->irqlock, flags);
243
244 if (ret == IRQ_HANDLED)
245 vfio_send_intx_eventfd(vdev, ctx);
246
247 return ret;
248 }
249
vfio_intx_enable(struct vfio_pci_core_device * vdev,struct eventfd_ctx * trigger)250 static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
251 struct eventfd_ctx *trigger)
252 {
253 struct pci_dev *pdev = vdev->pdev;
254 struct vfio_pci_irq_ctx *ctx;
255 unsigned long irqflags;
256 char *name;
257 int ret;
258
259 if (!is_irq_none(vdev))
260 return -EINVAL;
261
262 if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED)
263 return -ENODEV;
264
265 name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
266 if (!name)
267 return -ENOMEM;
268
269 ctx = vfio_irq_ctx_alloc(vdev, 0);
270 if (!ctx) {
271 kfree(name);
272 return -ENOMEM;
273 }
274
275 ctx->name = name;
276 ctx->trigger = trigger;
277 ctx->vdev = vdev;
278
279 /*
280 * Fill the initial masked state based on virq_disabled. After
281 * enable, changing the DisINTx bit in vconfig directly changes INTx
282 * masking. igate prevents races during setup, once running masked
283 * is protected via irqlock.
284 *
285 * Devices supporting DisINTx also reflect the current mask state in
286 * the physical DisINTx bit, which is not affected during IRQ setup.
287 *
288 * Devices without DisINTx support require an exclusive interrupt.
289 * IRQ masking is performed at the IRQ chip. Again, igate protects
290 * against races during setup and IRQ handlers and irqfds are not
291 * yet active, therefore masked is stable and can be used to
292 * conditionally auto-enable the IRQ.
293 *
294 * irq_type must be stable while the IRQ handler is registered,
295 * therefore it must be set before request_irq().
296 */
297 ctx->masked = vdev->virq_disabled;
298 if (vdev->pci_2_3) {
299 pci_intx(pdev, !ctx->masked);
300 irqflags = IRQF_SHARED;
301 } else {
302 irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
303 }
304
305 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
306
307 if (!vdev->pci_2_3)
308 irq_set_status_flags(pdev->irq, IRQ_DISABLE_UNLAZY);
309
310 ret = request_irq(pdev->irq, vfio_intx_handler,
311 irqflags, ctx->name, ctx);
312 if (ret) {
313 if (!vdev->pci_2_3)
314 irq_clear_status_flags(pdev->irq, IRQ_DISABLE_UNLAZY);
315 vdev->irq_type = VFIO_PCI_NUM_IRQS;
316 kfree(name);
317 vfio_irq_ctx_free(vdev, ctx, 0);
318 return ret;
319 }
320
321 return 0;
322 }
323
vfio_intx_set_signal(struct vfio_pci_core_device * vdev,struct eventfd_ctx * trigger)324 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev,
325 struct eventfd_ctx *trigger)
326 {
327 struct pci_dev *pdev = vdev->pdev;
328 struct vfio_pci_irq_ctx *ctx;
329 struct eventfd_ctx *old;
330
331 ctx = vfio_irq_ctx_get(vdev, 0);
332 if (WARN_ON_ONCE(!ctx))
333 return -EINVAL;
334
335 old = ctx->trigger;
336
337 WRITE_ONCE(ctx->trigger, trigger);
338
339 /* Releasing an old ctx requires synchronizing in-flight users */
340 if (old) {
341 synchronize_irq(pdev->irq);
342 vfio_virqfd_flush_thread(&ctx->unmask);
343 eventfd_ctx_put(old);
344 }
345
346 return 0;
347 }
348
vfio_intx_disable(struct vfio_pci_core_device * vdev)349 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
350 {
351 struct pci_dev *pdev = vdev->pdev;
352 struct vfio_pci_irq_ctx *ctx;
353
354 ctx = vfio_irq_ctx_get(vdev, 0);
355 WARN_ON_ONCE(!ctx);
356 if (ctx) {
357 vfio_virqfd_disable(&ctx->unmask);
358 vfio_virqfd_disable(&ctx->mask);
359 free_irq(pdev->irq, ctx);
360 if (!vdev->pci_2_3)
361 irq_clear_status_flags(pdev->irq, IRQ_DISABLE_UNLAZY);
362 if (ctx->trigger)
363 eventfd_ctx_put(ctx->trigger);
364 kfree(ctx->name);
365 vfio_irq_ctx_free(vdev, ctx, 0);
366 }
367 vdev->irq_type = VFIO_PCI_NUM_IRQS;
368 }
369
370 /*
371 * MSI/MSI-X
372 */
vfio_msihandler(int irq,void * arg)373 static irqreturn_t vfio_msihandler(int irq, void *arg)
374 {
375 struct eventfd_ctx *trigger = arg;
376
377 eventfd_signal(trigger);
378 return IRQ_HANDLED;
379 }
380
vfio_msi_enable(struct vfio_pci_core_device * vdev,int nvec,bool msix)381 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
382 {
383 struct pci_dev *pdev = vdev->pdev;
384 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
385 int ret;
386 u16 cmd;
387
388 if (!is_irq_none(vdev))
389 return -EINVAL;
390
391 /* return the number of supported vectors if we can't get all: */
392 cmd = vfio_pci_memory_lock_and_enable(vdev);
393 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
394 if (ret < nvec) {
395 if (ret > 0)
396 pci_free_irq_vectors(pdev);
397 vfio_pci_memory_unlock_and_restore(vdev, cmd);
398 return ret;
399 }
400 vfio_pci_memory_unlock_and_restore(vdev, cmd);
401
402 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
403 VFIO_PCI_MSI_IRQ_INDEX;
404
405 if (!msix) {
406 /*
407 * Compute the virtual hardware field for max msi vectors -
408 * it is the log base 2 of the number of vectors.
409 */
410 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
411 }
412
413 return 0;
414 }
415
416 /*
417 * vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
418 * interrupt vector. If a Linux IRQ number is not available then a new
419 * interrupt is allocated if dynamic MSI-X is supported.
420 *
421 * Where is vfio_msi_free_irq()? Allocated interrupts are maintained,
422 * essentially forming a cache that subsequent allocations can draw from.
423 * Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
424 * disabled.
425 */
vfio_msi_alloc_irq(struct vfio_pci_core_device * vdev,unsigned int vector,bool msix)426 static int vfio_msi_alloc_irq(struct vfio_pci_core_device *vdev,
427 unsigned int vector, bool msix)
428 {
429 struct pci_dev *pdev = vdev->pdev;
430 struct msi_map map;
431 int irq;
432 u16 cmd;
433
434 irq = pci_irq_vector(pdev, vector);
435 if (WARN_ON_ONCE(irq == 0))
436 return -EINVAL;
437 if (irq > 0 || !msix || !vdev->has_dyn_msix)
438 return irq;
439
440 cmd = vfio_pci_memory_lock_and_enable(vdev);
441 map = pci_msix_alloc_irq_at(pdev, vector, NULL);
442 vfio_pci_memory_unlock_and_restore(vdev, cmd);
443
444 return map.index < 0 ? map.index : map.virq;
445 }
446
vfio_msi_set_vector_signal(struct vfio_pci_core_device * vdev,unsigned int vector,int fd,bool msix)447 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
448 unsigned int vector, int fd, bool msix)
449 {
450 struct pci_dev *pdev = vdev->pdev;
451 struct vfio_pci_irq_ctx *ctx;
452 struct eventfd_ctx *trigger;
453 int irq = -EINVAL, ret;
454 u16 cmd;
455
456 ctx = vfio_irq_ctx_get(vdev, vector);
457
458 if (ctx) {
459 irq_bypass_unregister_producer(&ctx->producer);
460 irq = pci_irq_vector(pdev, vector);
461 cmd = vfio_pci_memory_lock_and_enable(vdev);
462 free_irq(irq, ctx->trigger);
463 vfio_pci_memory_unlock_and_restore(vdev, cmd);
464 /* Interrupt stays allocated, will be freed at MSI-X disable. */
465 kfree(ctx->name);
466 eventfd_ctx_put(ctx->trigger);
467 vfio_irq_ctx_free(vdev, ctx, vector);
468 }
469
470 if (fd < 0)
471 return 0;
472
473 if (irq == -EINVAL) {
474 /* Interrupt stays allocated, will be freed at MSI-X disable. */
475 irq = vfio_msi_alloc_irq(vdev, vector, msix);
476 if (irq < 0)
477 return irq;
478 }
479
480 ctx = vfio_irq_ctx_alloc(vdev, vector);
481 if (!ctx)
482 return -ENOMEM;
483
484 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
485 msix ? "x" : "", vector, pci_name(pdev));
486 if (!ctx->name) {
487 ret = -ENOMEM;
488 goto out_free_ctx;
489 }
490
491 trigger = eventfd_ctx_fdget(fd);
492 if (IS_ERR(trigger)) {
493 ret = PTR_ERR(trigger);
494 goto out_free_name;
495 }
496
497 /*
498 * If the vector was previously allocated, refresh the on-device
499 * message data before enabling in case it had been cleared or
500 * corrupted (e.g. due to backdoor resets) since writing.
501 */
502 cmd = vfio_pci_memory_lock_and_enable(vdev);
503 if (msix) {
504 struct msi_msg msg;
505
506 get_cached_msi_msg(irq, &msg);
507 pci_write_msi_msg(irq, &msg);
508 }
509
510 ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger);
511 vfio_pci_memory_unlock_and_restore(vdev, cmd);
512 if (ret)
513 goto out_put_eventfd_ctx;
514
515 ret = irq_bypass_register_producer(&ctx->producer, trigger, irq);
516 if (unlikely(ret)) {
517 dev_info(&pdev->dev,
518 "irq bypass producer (eventfd %p) registration fails: %d\n",
519 trigger, ret);
520 }
521 ctx->trigger = trigger;
522
523 return 0;
524
525 out_put_eventfd_ctx:
526 eventfd_ctx_put(trigger);
527 out_free_name:
528 kfree(ctx->name);
529 out_free_ctx:
530 vfio_irq_ctx_free(vdev, ctx, vector);
531 return ret;
532 }
533
vfio_msi_set_block(struct vfio_pci_core_device * vdev,unsigned start,unsigned count,int32_t * fds,bool msix)534 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
535 unsigned count, int32_t *fds, bool msix)
536 {
537 unsigned int i, j;
538 int ret = 0;
539
540 for (i = 0, j = start; i < count && !ret; i++, j++) {
541 int fd = fds ? fds[i] : -1;
542 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
543 }
544
545 if (ret) {
546 for (i = start; i < j; i++)
547 vfio_msi_set_vector_signal(vdev, i, -1, msix);
548 }
549
550 return ret;
551 }
552
vfio_msi_disable(struct vfio_pci_core_device * vdev,bool msix)553 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
554 {
555 struct pci_dev *pdev = vdev->pdev;
556 struct vfio_pci_irq_ctx *ctx;
557 unsigned long i;
558 u16 cmd;
559
560 xa_for_each(&vdev->ctx, i, ctx) {
561 vfio_virqfd_disable(&ctx->unmask);
562 vfio_virqfd_disable(&ctx->mask);
563 vfio_msi_set_vector_signal(vdev, i, -1, msix);
564 }
565
566 cmd = vfio_pci_memory_lock_and_enable(vdev);
567 pci_free_irq_vectors(pdev);
568 vfio_pci_memory_unlock_and_restore(vdev, cmd);
569
570 /*
571 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
572 * via their shutdown paths. Restore for NoINTx devices.
573 */
574 if (vdev->nointx)
575 pci_intx(pdev, 0);
576
577 vdev->irq_type = VFIO_PCI_NUM_IRQS;
578 }
579
580 /*
581 * IOCTL support
582 */
vfio_pci_set_intx_unmask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)583 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
584 unsigned index, unsigned start,
585 unsigned count, uint32_t flags, void *data)
586 {
587 if (!is_intx(vdev) || start != 0 || count != 1)
588 return -EINVAL;
589
590 if (flags & VFIO_IRQ_SET_DATA_NONE) {
591 __vfio_pci_intx_unmask(vdev);
592 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
593 uint8_t unmask = *(uint8_t *)data;
594 if (unmask)
595 __vfio_pci_intx_unmask(vdev);
596 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
597 struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
598 int32_t fd = *(int32_t *)data;
599
600 if (WARN_ON_ONCE(!ctx))
601 return -EINVAL;
602 if (fd >= 0)
603 return vfio_virqfd_enable((void *) vdev,
604 vfio_pci_intx_unmask_handler,
605 vfio_send_intx_eventfd, ctx,
606 &ctx->unmask, fd);
607
608 vfio_virqfd_disable(&ctx->unmask);
609 }
610
611 return 0;
612 }
613
vfio_pci_set_intx_mask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)614 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
615 unsigned index, unsigned start,
616 unsigned count, uint32_t flags, void *data)
617 {
618 if (!is_intx(vdev) || start != 0 || count != 1)
619 return -EINVAL;
620
621 if (flags & VFIO_IRQ_SET_DATA_NONE) {
622 __vfio_pci_intx_mask(vdev);
623 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
624 uint8_t mask = *(uint8_t *)data;
625 if (mask)
626 __vfio_pci_intx_mask(vdev);
627 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
628 return -ENOTTY; /* XXX implement me */
629 }
630
631 return 0;
632 }
633
vfio_pci_set_intx_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)634 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
635 unsigned index, unsigned start,
636 unsigned count, uint32_t flags, void *data)
637 {
638 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
639 vfio_intx_disable(vdev);
640 return 0;
641 }
642
643 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
644 return -EINVAL;
645
646 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
647 struct eventfd_ctx *trigger = NULL;
648 int32_t fd = *(int32_t *)data;
649 int ret;
650
651 if (fd >= 0) {
652 trigger = eventfd_ctx_fdget(fd);
653 if (IS_ERR(trigger))
654 return PTR_ERR(trigger);
655 }
656
657 if (is_intx(vdev))
658 ret = vfio_intx_set_signal(vdev, trigger);
659 else
660 ret = vfio_intx_enable(vdev, trigger);
661
662 if (ret && trigger)
663 eventfd_ctx_put(trigger);
664
665 return ret;
666 }
667
668 if (!is_intx(vdev))
669 return -EINVAL;
670
671 if (flags & VFIO_IRQ_SET_DATA_NONE) {
672 vfio_send_intx_eventfd(vdev, vfio_irq_ctx_get(vdev, 0));
673 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
674 uint8_t trigger = *(uint8_t *)data;
675 if (trigger)
676 vfio_send_intx_eventfd(vdev, vfio_irq_ctx_get(vdev, 0));
677 }
678 return 0;
679 }
680
vfio_pci_set_msi_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)681 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
682 unsigned index, unsigned start,
683 unsigned count, uint32_t flags, void *data)
684 {
685 struct vfio_pci_irq_ctx *ctx;
686 unsigned int i;
687 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX);
688
689 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
690 vfio_msi_disable(vdev, msix);
691 return 0;
692 }
693
694 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
695 return -EINVAL;
696
697 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
698 int32_t *fds = data;
699 int ret;
700
701 if (vdev->irq_type == index)
702 return vfio_msi_set_block(vdev, start, count,
703 fds, msix);
704
705 ret = vfio_msi_enable(vdev, start + count, msix);
706 if (ret)
707 return ret;
708
709 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
710 if (ret)
711 vfio_msi_disable(vdev, msix);
712
713 return ret;
714 }
715
716 if (!irq_is(vdev, index))
717 return -EINVAL;
718
719 for (i = start; i < start + count; i++) {
720 ctx = vfio_irq_ctx_get(vdev, i);
721 if (!ctx)
722 continue;
723 if (flags & VFIO_IRQ_SET_DATA_NONE) {
724 eventfd_signal(ctx->trigger);
725 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
726 uint8_t *bools = data;
727 if (bools[i - start])
728 eventfd_signal(ctx->trigger);
729 }
730 }
731 return 0;
732 }
733
vfio_pci_set_ctx_trigger_single(struct vfio_pci_core_device * vdev,struct vfio_pci_eventfd __rcu ** peventfd,unsigned int count,uint32_t flags,void * data)734 static int vfio_pci_set_ctx_trigger_single(struct vfio_pci_core_device *vdev,
735 struct vfio_pci_eventfd __rcu **peventfd,
736 unsigned int count, uint32_t flags,
737 void *data)
738 {
739 /* DATA_NONE/DATA_BOOL enables loopback testing */
740 if (flags & VFIO_IRQ_SET_DATA_NONE) {
741 struct vfio_pci_eventfd *eventfd;
742
743 eventfd = rcu_dereference_protected(*peventfd,
744 lockdep_is_held(&vdev->igate));
745
746 if (!eventfd)
747 return -EINVAL;
748
749 if (count) {
750 eventfd_signal(eventfd->ctx);
751 return 0;
752 }
753
754 return vfio_pci_eventfd_replace_locked(vdev, peventfd, NULL);
755 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
756 uint8_t trigger;
757
758 if (!count)
759 return -EINVAL;
760
761 trigger = *(uint8_t *)data;
762
763 if (trigger) {
764 struct vfio_pci_eventfd *eventfd =
765 rcu_dereference_protected(*peventfd,
766 lockdep_is_held(&vdev->igate));
767
768 if (eventfd)
769 eventfd_signal(eventfd->ctx);
770 }
771
772 return 0;
773 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
774 int32_t fd;
775
776 if (!count)
777 return -EINVAL;
778
779 fd = *(int32_t *)data;
780 if (fd == -1) {
781 return vfio_pci_eventfd_replace_locked(vdev,
782 peventfd, NULL);
783 } else if (fd >= 0) {
784 struct eventfd_ctx *efdctx;
785 int ret;
786
787 efdctx = eventfd_ctx_fdget(fd);
788 if (IS_ERR(efdctx))
789 return PTR_ERR(efdctx);
790
791 ret = vfio_pci_eventfd_replace_locked(vdev,
792 peventfd, efdctx);
793 if (ret)
794 eventfd_ctx_put(efdctx);
795
796 return ret;
797 }
798 }
799
800 return -EINVAL;
801 }
802
vfio_pci_set_err_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)803 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
804 unsigned index, unsigned start,
805 unsigned count, uint32_t flags, void *data)
806 {
807 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
808 return -EINVAL;
809
810 return vfio_pci_set_ctx_trigger_single(vdev, &vdev->err_trigger,
811 count, flags, data);
812 }
813
vfio_pci_set_req_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)814 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
815 unsigned index, unsigned start,
816 unsigned count, uint32_t flags, void *data)
817 {
818 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
819 return -EINVAL;
820
821 return vfio_pci_set_ctx_trigger_single(vdev, &vdev->req_trigger,
822 count, flags, data);
823 }
824
vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device * vdev,uint32_t flags,unsigned index,unsigned start,unsigned count,void * data)825 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
826 unsigned index, unsigned start, unsigned count,
827 void *data)
828 {
829 int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
830 unsigned start, unsigned count, uint32_t flags,
831 void *data) = NULL;
832
833 switch (index) {
834 case VFIO_PCI_INTX_IRQ_INDEX:
835 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
836 case VFIO_IRQ_SET_ACTION_MASK:
837 func = vfio_pci_set_intx_mask;
838 break;
839 case VFIO_IRQ_SET_ACTION_UNMASK:
840 func = vfio_pci_set_intx_unmask;
841 break;
842 case VFIO_IRQ_SET_ACTION_TRIGGER:
843 func = vfio_pci_set_intx_trigger;
844 break;
845 }
846 break;
847 case VFIO_PCI_MSI_IRQ_INDEX:
848 case VFIO_PCI_MSIX_IRQ_INDEX:
849 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
850 case VFIO_IRQ_SET_ACTION_MASK:
851 case VFIO_IRQ_SET_ACTION_UNMASK:
852 /* XXX Need masking support exported */
853 break;
854 case VFIO_IRQ_SET_ACTION_TRIGGER:
855 func = vfio_pci_set_msi_trigger;
856 break;
857 }
858 break;
859 case VFIO_PCI_ERR_IRQ_INDEX:
860 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
861 case VFIO_IRQ_SET_ACTION_TRIGGER:
862 if (pci_is_pcie(vdev->pdev))
863 func = vfio_pci_set_err_trigger;
864 break;
865 }
866 break;
867 case VFIO_PCI_REQ_IRQ_INDEX:
868 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
869 case VFIO_IRQ_SET_ACTION_TRIGGER:
870 func = vfio_pci_set_req_trigger;
871 break;
872 }
873 break;
874 }
875
876 if (!func)
877 return -ENOTTY;
878
879 return func(vdev, index, start, count, flags, data);
880 }
881