1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/module.h>
34 #include <sys/bus.h>
35 #include <sys/pciio.h>
36 #include <sys/rman.h>
37 #include <sys/smp.h>
38 #include <sys/sysctl.h>
39
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcireg.h>
42
43 #include <machine/resource.h>
44 #include <machine/vmm.h>
45 #include <machine/vmm_dev.h>
46
47 #include <dev/vmm/vmm_ktr.h>
48
49 #include "vmm_lapic.h"
50
51 #include "iommu.h"
52 #include "ppt.h"
53
54 /* XXX locking */
55
56 #define MAX_MSIMSGS 32
57
58 /*
59 * If the MSI-X table is located in the middle of a BAR then that MMIO
60 * region gets split into two segments - one segment above the MSI-X table
61 * and the other segment below the MSI-X table - with a hole in place of
62 * the MSI-X table so accesses to it can be trapped and emulated.
63 *
64 * So, allocate a MMIO segment for each BAR register + 1 additional segment.
65 */
66 #define MAX_MMIOSEGS ((PCIR_MAX_BAR_0 + 1) + 1)
67
68 MALLOC_DEFINE(M_PPTMSIX, "pptmsix", "Passthru MSI-X resources");
69
70 struct pptintr_arg { /* pptintr(pptintr_arg) */
71 struct pptdev *pptdev;
72 uint64_t addr;
73 uint64_t msg_data;
74 };
75
76 struct pptseg {
77 vm_paddr_t gpa;
78 size_t len;
79 int wired;
80 };
81
82 struct pptdev {
83 device_t dev;
84 struct vm *vm; /* owner of this device */
85 TAILQ_ENTRY(pptdev) next;
86 struct pptseg mmio[MAX_MMIOSEGS];
87 struct {
88 int num_msgs; /* guest state */
89
90 int startrid; /* host state */
91 struct resource *res[MAX_MSIMSGS];
92 void *cookie[MAX_MSIMSGS];
93 struct pptintr_arg arg[MAX_MSIMSGS];
94 } msi;
95
96 struct {
97 int num_msgs;
98 int startrid;
99 int msix_table_rid;
100 int msix_pba_rid;
101 struct resource *msix_table_res;
102 struct resource *msix_pba_res;
103 struct resource **res;
104 void **cookie;
105 struct pptintr_arg *arg;
106 } msix;
107 };
108
109 SYSCTL_DECL(_hw_vmm);
110 SYSCTL_NODE(_hw_vmm, OID_AUTO, ppt, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
111 "bhyve passthru devices");
112
113 static int num_pptdevs;
114 SYSCTL_INT(_hw_vmm_ppt, OID_AUTO, devices, CTLFLAG_RD, &num_pptdevs, 0,
115 "number of pci passthru devices");
116
117 static TAILQ_HEAD(, pptdev) pptdev_list = TAILQ_HEAD_INITIALIZER(pptdev_list);
118
119 static int
ppt_probe(device_t dev)120 ppt_probe(device_t dev)
121 {
122 int bus, slot, func;
123 struct pci_devinfo *dinfo;
124
125 dinfo = (struct pci_devinfo *)device_get_ivars(dev);
126
127 bus = pci_get_bus(dev);
128 slot = pci_get_slot(dev);
129 func = pci_get_function(dev);
130
131 /*
132 * To qualify as a pci passthrough device a device must:
133 * - be allowed by administrator to be used in this role
134 * - be an endpoint device
135 */
136 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
137 return (ENXIO);
138 else if (vmm_is_pptdev(bus, slot, func))
139 return (0);
140 else
141 /*
142 * Returning BUS_PROBE_NOWILDCARD here matches devices that the
143 * SR-IOV infrastructure specified as "ppt" passthrough devices.
144 * All normal devices that did not have "ppt" specified as their
145 * driver will not be matched by this.
146 */
147 return (BUS_PROBE_NOWILDCARD);
148 }
149
150 static int
ppt_attach(device_t dev)151 ppt_attach(device_t dev)
152 {
153 struct pptdev *ppt;
154 uint16_t cmd, cmd1;
155 int error;
156
157 ppt = device_get_softc(dev);
158
159 cmd1 = cmd = pci_read_config(dev, PCIR_COMMAND, 2);
160 cmd &= ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
161 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
162 error = iommu_remove_device(iommu_host_domain(), dev, pci_get_rid(dev));
163 if (error != 0) {
164 pci_write_config(dev, PCIR_COMMAND, cmd1, 2);
165 return (error);
166 }
167 num_pptdevs++;
168 TAILQ_INSERT_TAIL(&pptdev_list, ppt, next);
169 ppt->dev = dev;
170
171 if (bootverbose)
172 device_printf(dev, "attached\n");
173
174 return (0);
175 }
176
177 static int
ppt_detach(device_t dev)178 ppt_detach(device_t dev)
179 {
180 struct pptdev *ppt;
181 int error;
182
183 ppt = device_get_softc(dev);
184
185 if (ppt->vm != NULL)
186 return (EBUSY);
187 if (iommu_host_domain() != NULL) {
188 error = iommu_add_device(iommu_host_domain(), dev,
189 pci_get_rid(dev));
190 } else {
191 error = 0;
192 }
193 if (error != 0)
194 return (error);
195 num_pptdevs--;
196 TAILQ_REMOVE(&pptdev_list, ppt, next);
197
198 return (0);
199 }
200
201 static device_method_t ppt_methods[] = {
202 /* Device interface */
203 DEVMETHOD(device_probe, ppt_probe),
204 DEVMETHOD(device_attach, ppt_attach),
205 DEVMETHOD(device_detach, ppt_detach),
206 {0, 0}
207 };
208
209 DEFINE_CLASS_0(ppt, ppt_driver, ppt_methods, sizeof(struct pptdev));
210 DRIVER_MODULE(ppt, pci, ppt_driver, NULL, NULL);
211
212 static int
ppt_find(struct vm * vm,int bus,int slot,int func,struct pptdev ** pptp)213 ppt_find(struct vm *vm, int bus, int slot, int func, struct pptdev **pptp)
214 {
215 device_t dev;
216 struct pptdev *ppt;
217 int b, s, f;
218
219 TAILQ_FOREACH(ppt, &pptdev_list, next) {
220 dev = ppt->dev;
221 b = pci_get_bus(dev);
222 s = pci_get_slot(dev);
223 f = pci_get_function(dev);
224 if (bus == b && slot == s && func == f)
225 break;
226 }
227
228 if (ppt == NULL)
229 return (ENOENT);
230 if (ppt->vm != vm) /* Make sure we own this device */
231 return (EBUSY);
232 *pptp = ppt;
233 return (0);
234 }
235
236 static void
ppt_unmap_all_mmio(struct vm * vm,struct pptdev * ppt)237 ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt)
238 {
239 int i;
240 struct pptseg *seg;
241
242 for (i = 0; i < MAX_MMIOSEGS; i++) {
243 seg = &ppt->mmio[i];
244 if (seg->len == 0)
245 continue;
246 (void)vm_unmap_mmio(vm, seg->gpa, seg->len);
247 bzero(seg, sizeof(struct pptseg));
248 }
249 }
250
251 static void
ppt_teardown_msi(struct pptdev * ppt)252 ppt_teardown_msi(struct pptdev *ppt)
253 {
254 int i, rid;
255 void *cookie;
256 struct resource *res;
257
258 if (ppt->msi.num_msgs == 0)
259 return;
260
261 for (i = 0; i < ppt->msi.num_msgs; i++) {
262 rid = ppt->msi.startrid + i;
263 res = ppt->msi.res[i];
264 cookie = ppt->msi.cookie[i];
265
266 if (cookie != NULL)
267 bus_teardown_intr(ppt->dev, res, cookie);
268
269 if (res != NULL)
270 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
271
272 ppt->msi.res[i] = NULL;
273 ppt->msi.cookie[i] = NULL;
274 }
275
276 if (ppt->msi.startrid == 1)
277 pci_release_msi(ppt->dev);
278
279 ppt->msi.num_msgs = 0;
280 }
281
282 static void
ppt_teardown_msix_intr(struct pptdev * ppt,int idx)283 ppt_teardown_msix_intr(struct pptdev *ppt, int idx)
284 {
285 int rid;
286 struct resource *res;
287 void *cookie;
288
289 rid = ppt->msix.startrid + idx;
290 res = ppt->msix.res[idx];
291 cookie = ppt->msix.cookie[idx];
292
293 if (cookie != NULL)
294 bus_teardown_intr(ppt->dev, res, cookie);
295
296 if (res != NULL)
297 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
298
299 ppt->msix.res[idx] = NULL;
300 ppt->msix.cookie[idx] = NULL;
301 }
302
303 static void
ppt_teardown_msix(struct pptdev * ppt)304 ppt_teardown_msix(struct pptdev *ppt)
305 {
306 int i;
307
308 if (ppt->msix.num_msgs == 0)
309 return;
310
311 for (i = 0; i < ppt->msix.num_msgs; i++)
312 ppt_teardown_msix_intr(ppt, i);
313
314 free(ppt->msix.res, M_PPTMSIX);
315 free(ppt->msix.cookie, M_PPTMSIX);
316 free(ppt->msix.arg, M_PPTMSIX);
317
318 pci_release_msi(ppt->dev);
319
320 if (ppt->msix.msix_table_res) {
321 bus_release_resource(ppt->dev, SYS_RES_MEMORY,
322 ppt->msix.msix_table_rid,
323 ppt->msix.msix_table_res);
324 ppt->msix.msix_table_res = NULL;
325 ppt->msix.msix_table_rid = 0;
326 }
327 if (ppt->msix.msix_pba_res) {
328 bus_release_resource(ppt->dev, SYS_RES_MEMORY,
329 ppt->msix.msix_pba_rid,
330 ppt->msix.msix_pba_res);
331 ppt->msix.msix_pba_res = NULL;
332 ppt->msix.msix_pba_rid = 0;
333 }
334
335 ppt->msix.num_msgs = 0;
336 }
337
338 int
ppt_assigned_devices(struct vm * vm)339 ppt_assigned_devices(struct vm *vm)
340 {
341 struct pptdev *ppt;
342 int num;
343
344 num = 0;
345 TAILQ_FOREACH(ppt, &pptdev_list, next) {
346 if (ppt->vm == vm)
347 num++;
348 }
349 return (num);
350 }
351
352 bool
ppt_is_mmio(struct vm * vm,vm_paddr_t gpa)353 ppt_is_mmio(struct vm *vm, vm_paddr_t gpa)
354 {
355 int i;
356 struct pptdev *ppt;
357 struct pptseg *seg;
358
359 TAILQ_FOREACH(ppt, &pptdev_list, next) {
360 if (ppt->vm != vm)
361 continue;
362
363 for (i = 0; i < MAX_MMIOSEGS; i++) {
364 seg = &ppt->mmio[i];
365 if (seg->len == 0)
366 continue;
367 if (gpa >= seg->gpa && gpa < seg->gpa + seg->len)
368 return (true);
369 }
370 }
371
372 return (false);
373 }
374
375 static void
ppt_pci_reset(device_t dev)376 ppt_pci_reset(device_t dev)
377 {
378
379 if (pcie_flr(dev,
380 max(pcie_get_max_completion_timeout(dev) / 1000, 10), true))
381 return;
382
383 pci_power_reset(dev);
384 }
385
386 static uint16_t
ppt_bar_enables(struct pptdev * ppt)387 ppt_bar_enables(struct pptdev *ppt)
388 {
389 struct pci_map *pm;
390 uint16_t cmd;
391
392 cmd = 0;
393 for (pm = pci_first_bar(ppt->dev); pm != NULL; pm = pci_next_bar(pm)) {
394 if (PCI_BAR_IO(pm->pm_value))
395 cmd |= PCIM_CMD_PORTEN;
396 if (PCI_BAR_MEM(pm->pm_value))
397 cmd |= PCIM_CMD_MEMEN;
398 }
399 return (cmd);
400 }
401
402 int
ppt_assign_device(struct vm * vm,int bus,int slot,int func)403 ppt_assign_device(struct vm *vm, int bus, int slot, int func)
404 {
405 struct pptdev *ppt;
406 int error;
407 uint16_t cmd;
408
409 /* Passing NULL requires the device to be unowned. */
410 error = ppt_find(NULL, bus, slot, func, &ppt);
411 if (error)
412 return (error);
413
414 pci_save_state(ppt->dev);
415 ppt_pci_reset(ppt->dev);
416 pci_restore_state(ppt->dev);
417 error = iommu_add_device(vm_iommu_domain(vm), ppt->dev,
418 pci_get_rid(ppt->dev));
419 if (error != 0)
420 return (error);
421 ppt->vm = vm;
422 cmd = pci_read_config(ppt->dev, PCIR_COMMAND, 2);
423 cmd |= PCIM_CMD_BUSMASTEREN | ppt_bar_enables(ppt);
424 pci_write_config(ppt->dev, PCIR_COMMAND, cmd, 2);
425 return (0);
426 }
427
428 int
ppt_unassign_device(struct vm * vm,int bus,int slot,int func)429 ppt_unassign_device(struct vm *vm, int bus, int slot, int func)
430 {
431 struct pptdev *ppt;
432 int error;
433 uint16_t cmd;
434
435 error = ppt_find(vm, bus, slot, func, &ppt);
436 if (error)
437 return (error);
438
439 cmd = pci_read_config(ppt->dev, PCIR_COMMAND, 2);
440 cmd &= ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
441 pci_write_config(ppt->dev, PCIR_COMMAND, cmd, 2);
442 pci_save_state(ppt->dev);
443 ppt_pci_reset(ppt->dev);
444 pci_restore_state(ppt->dev);
445 ppt_unmap_all_mmio(vm, ppt);
446 ppt_teardown_msi(ppt);
447 ppt_teardown_msix(ppt);
448 error = iommu_remove_device(vm_iommu_domain(vm), ppt->dev,
449 pci_get_rid(ppt->dev));
450 ppt->vm = NULL;
451 return (error);
452 }
453
454 int
ppt_unassign_all(struct vm * vm)455 ppt_unassign_all(struct vm *vm)
456 {
457 struct pptdev *ppt;
458 int bus, slot, func;
459 device_t dev;
460
461 TAILQ_FOREACH(ppt, &pptdev_list, next) {
462 if (ppt->vm == vm) {
463 dev = ppt->dev;
464 bus = pci_get_bus(dev);
465 slot = pci_get_slot(dev);
466 func = pci_get_function(dev);
467 vm_unassign_pptdev(vm, bus, slot, func);
468 }
469 }
470
471 return (0);
472 }
473
474 static bool
ppt_valid_bar_mapping(struct pptdev * ppt,vm_paddr_t hpa,size_t len)475 ppt_valid_bar_mapping(struct pptdev *ppt, vm_paddr_t hpa, size_t len)
476 {
477 struct pci_map *pm;
478 pci_addr_t base, size;
479
480 for (pm = pci_first_bar(ppt->dev); pm != NULL; pm = pci_next_bar(pm)) {
481 if (!PCI_BAR_MEM(pm->pm_value))
482 continue;
483 base = pm->pm_value & PCIM_BAR_MEM_BASE;
484 size = (pci_addr_t)1 << pm->pm_size;
485 if (hpa >= base && hpa + len <= base + size)
486 return (true);
487 }
488 return (false);
489 }
490
491 int
ppt_map_mmio(struct vm * vm,int bus,int slot,int func,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)492 ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
493 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
494 {
495 int i, error;
496 struct pptseg *seg;
497 struct pptdev *ppt;
498
499 if (len % PAGE_SIZE != 0 || len == 0 || gpa % PAGE_SIZE != 0 ||
500 hpa % PAGE_SIZE != 0 || gpa + len < gpa || hpa + len < hpa)
501 return (EINVAL);
502
503 error = ppt_find(vm, bus, slot, func, &ppt);
504 if (error)
505 return (error);
506
507 if (!ppt_valid_bar_mapping(ppt, hpa, len))
508 return (EINVAL);
509
510 for (i = 0; i < MAX_MMIOSEGS; i++) {
511 seg = &ppt->mmio[i];
512 if (seg->len == 0) {
513 error = vm_map_mmio(vm, gpa, len, hpa);
514 if (error == 0) {
515 seg->gpa = gpa;
516 seg->len = len;
517 }
518 return (error);
519 }
520 }
521 return (ENOSPC);
522 }
523
524 int
ppt_unmap_mmio(struct vm * vm,int bus,int slot,int func,vm_paddr_t gpa,size_t len)525 ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func,
526 vm_paddr_t gpa, size_t len)
527 {
528 int i, error;
529 struct pptseg *seg;
530 struct pptdev *ppt;
531
532 error = ppt_find(vm, bus, slot, func, &ppt);
533 if (error)
534 return (error);
535
536 for (i = 0; i < MAX_MMIOSEGS; i++) {
537 seg = &ppt->mmio[i];
538 if (seg->gpa == gpa && seg->len == len) {
539 error = vm_unmap_mmio(vm, seg->gpa, seg->len);
540 if (error == 0) {
541 seg->gpa = 0;
542 seg->len = 0;
543 }
544 return (error);
545 }
546 }
547 return (ENOENT);
548 }
549
550 static int
pptintr(void * arg)551 pptintr(void *arg)
552 {
553 struct pptdev *ppt;
554 struct pptintr_arg *pptarg;
555
556 pptarg = arg;
557 ppt = pptarg->pptdev;
558
559 if (ppt->vm != NULL)
560 lapic_intr_msi(ppt->vm, pptarg->addr, pptarg->msg_data);
561 else {
562 /*
563 * XXX
564 * This is not expected to happen - panic?
565 */
566 }
567
568 /*
569 * For legacy interrupts give other filters a chance in case
570 * the interrupt was not generated by the passthrough device.
571 */
572 if (ppt->msi.startrid == 0)
573 return (FILTER_STRAY);
574 else
575 return (FILTER_HANDLED);
576 }
577
578 int
ppt_setup_msi(struct vm * vm,int bus,int slot,int func,uint64_t addr,uint64_t msg,int numvec)579 ppt_setup_msi(struct vm *vm, int bus, int slot, int func,
580 uint64_t addr, uint64_t msg, int numvec)
581 {
582 int i, rid, flags;
583 int msi_count, startrid, error, tmp;
584 struct pptdev *ppt;
585
586 if (numvec < 0 || numvec > MAX_MSIMSGS)
587 return (EINVAL);
588
589 error = ppt_find(vm, bus, slot, func, &ppt);
590 if (error)
591 return (error);
592
593 /* Reject attempts to enable MSI while MSI-X is active. */
594 if (ppt->msix.num_msgs != 0 && numvec != 0)
595 return (EBUSY);
596
597 /* Free any allocated resources */
598 ppt_teardown_msi(ppt);
599
600 if (numvec == 0) /* nothing more to do */
601 return (0);
602
603 flags = RF_ACTIVE;
604 msi_count = pci_msi_count(ppt->dev);
605 if (msi_count == 0) {
606 startrid = 0; /* legacy interrupt */
607 msi_count = 1;
608 flags |= RF_SHAREABLE;
609 } else
610 startrid = 1; /* MSI */
611
612 /*
613 * The device must be capable of supporting the number of vectors
614 * the guest wants to allocate.
615 */
616 if (numvec > msi_count)
617 return (EINVAL);
618
619 /*
620 * Make sure that we can allocate all the MSI vectors that are needed
621 * by the guest.
622 */
623 if (startrid == 1) {
624 tmp = numvec;
625 error = pci_alloc_msi(ppt->dev, &tmp);
626 if (error)
627 return (error);
628 else if (tmp != numvec) {
629 pci_release_msi(ppt->dev);
630 return (ENOSPC);
631 } else {
632 /* success */
633 }
634 }
635
636 ppt->msi.startrid = startrid;
637
638 /*
639 * Allocate the irq resource and attach it to the interrupt handler.
640 */
641 for (i = 0; i < numvec; i++) {
642 ppt->msi.num_msgs = i + 1;
643 ppt->msi.cookie[i] = NULL;
644
645 rid = startrid + i;
646 ppt->msi.res[i] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
647 &rid, flags);
648 if (ppt->msi.res[i] == NULL)
649 break;
650
651 ppt->msi.arg[i].pptdev = ppt;
652 ppt->msi.arg[i].addr = addr;
653 ppt->msi.arg[i].msg_data = msg + i;
654
655 error = bus_setup_intr(ppt->dev, ppt->msi.res[i],
656 INTR_TYPE_NET | INTR_MPSAFE,
657 pptintr, NULL, &ppt->msi.arg[i],
658 &ppt->msi.cookie[i]);
659 if (error != 0)
660 break;
661 }
662
663 if (i < numvec) {
664 ppt_teardown_msi(ppt);
665 return (ENXIO);
666 }
667
668 return (0);
669 }
670
671 int
ppt_setup_msix(struct vm * vm,int bus,int slot,int func,int idx,uint64_t addr,uint64_t msg,uint32_t vector_control)672 ppt_setup_msix(struct vm *vm, int bus, int slot, int func,
673 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
674 {
675 struct pptdev *ppt;
676 struct pci_devinfo *dinfo;
677 int numvec, alloced, rid, error;
678 size_t res_size, cookie_size, arg_size;
679
680 error = ppt_find(vm, bus, slot, func, &ppt);
681 if (error)
682 return (error);
683
684 /* Reject attempts to enable MSI-X while MSI is active. */
685 if (ppt->msi.num_msgs != 0)
686 return (EBUSY);
687
688 dinfo = device_get_ivars(ppt->dev);
689 if (!dinfo)
690 return (ENXIO);
691
692 /*
693 * First-time configuration:
694 * Allocate the MSI-X table
695 * Allocate the IRQ resources
696 * Set up some variables in ppt->msix
697 */
698 if (ppt->msix.num_msgs == 0) {
699 numvec = pci_msix_count(ppt->dev);
700 if (numvec <= 0)
701 return (EINVAL);
702
703 ppt->msix.startrid = 1;
704 ppt->msix.num_msgs = numvec;
705
706 res_size = numvec * sizeof(ppt->msix.res[0]);
707 cookie_size = numvec * sizeof(ppt->msix.cookie[0]);
708 arg_size = numvec * sizeof(ppt->msix.arg[0]);
709
710 ppt->msix.res = malloc(res_size, M_PPTMSIX, M_WAITOK | M_ZERO);
711 ppt->msix.cookie = malloc(cookie_size, M_PPTMSIX,
712 M_WAITOK | M_ZERO);
713 ppt->msix.arg = malloc(arg_size, M_PPTMSIX, M_WAITOK | M_ZERO);
714
715 rid = dinfo->cfg.msix.msix_table_bar;
716 ppt->msix.msix_table_res = bus_alloc_resource_any(ppt->dev,
717 SYS_RES_MEMORY, &rid, RF_ACTIVE);
718
719 if (ppt->msix.msix_table_res == NULL) {
720 ppt_teardown_msix(ppt);
721 return (ENOSPC);
722 }
723 ppt->msix.msix_table_rid = rid;
724
725 if (dinfo->cfg.msix.msix_table_bar !=
726 dinfo->cfg.msix.msix_pba_bar) {
727 rid = dinfo->cfg.msix.msix_pba_bar;
728 ppt->msix.msix_pba_res = bus_alloc_resource_any(
729 ppt->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
730
731 if (ppt->msix.msix_pba_res == NULL) {
732 ppt_teardown_msix(ppt);
733 return (ENOSPC);
734 }
735 ppt->msix.msix_pba_rid = rid;
736 }
737
738 alloced = numvec;
739 error = pci_alloc_msix(ppt->dev, &alloced);
740 if (error || alloced != numvec) {
741 ppt_teardown_msix(ppt);
742 return (error == 0 ? ENOSPC: error);
743 }
744 }
745
746 if (idx >= ppt->msix.num_msgs)
747 return (EINVAL);
748
749 if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
750 /* Tear down the IRQ if it's already set up */
751 ppt_teardown_msix_intr(ppt, idx);
752
753 /* Allocate the IRQ resource */
754 ppt->msix.cookie[idx] = NULL;
755 rid = ppt->msix.startrid + idx;
756 ppt->msix.res[idx] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
757 &rid, RF_ACTIVE);
758 if (ppt->msix.res[idx] == NULL)
759 return (ENXIO);
760
761 ppt->msix.arg[idx].pptdev = ppt;
762 ppt->msix.arg[idx].addr = addr;
763 ppt->msix.arg[idx].msg_data = msg;
764
765 /* Setup the MSI-X interrupt */
766 error = bus_setup_intr(ppt->dev, ppt->msix.res[idx],
767 INTR_TYPE_NET | INTR_MPSAFE,
768 pptintr, NULL, &ppt->msix.arg[idx],
769 &ppt->msix.cookie[idx]);
770
771 if (error != 0) {
772 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, ppt->msix.res[idx]);
773 ppt->msix.cookie[idx] = NULL;
774 ppt->msix.res[idx] = NULL;
775 return (ENXIO);
776 }
777 } else {
778 /* Masked, tear it down if it's already been set up */
779 ppt_teardown_msix_intr(ppt, idx);
780 }
781
782 return (0);
783 }
784
785 int
ppt_disable_msix(struct vm * vm,int bus,int slot,int func)786 ppt_disable_msix(struct vm *vm, int bus, int slot, int func)
787 {
788 struct pptdev *ppt;
789 int error;
790
791 error = ppt_find(vm, bus, slot, func, &ppt);
792 if (error)
793 return (error);
794
795 ppt_teardown_msix(ppt);
796 return (0);
797 }
798