xref: /freebsd/sys/amd64/vmm/io/ppt.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/module.h>
35 #include <sys/bus.h>
36 #include <sys/pciio.h>
37 #include <sys/rman.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcireg.h>
43 
44 #include <machine/resource.h>
45 
46 #include <machine/vmm.h>
47 #include <machine/vmm_dev.h>
48 
49 #include "vmm_lapic.h"
50 #include "vmm_ktr.h"
51 
52 #include "iommu.h"
53 #include "ppt.h"
54 
55 /* XXX locking */
56 
57 #define	MAX_MSIMSGS	32
58 
59 /*
60  * If the MSI-X table is located in the middle of a BAR then that MMIO
61  * region gets split into two segments - one segment above the MSI-X table
62  * and the other segment below the MSI-X table - with a hole in place of
63  * the MSI-X table so accesses to it can be trapped and emulated.
64  *
65  * So, allocate a MMIO segment for each BAR register + 1 additional segment.
66  */
67 #define	MAX_MMIOSEGS	((PCIR_MAX_BAR_0 + 1) + 1)
68 
69 MALLOC_DEFINE(M_PPTMSIX, "pptmsix", "Passthru MSI-X resources");
70 
71 struct pptintr_arg {				/* pptintr(pptintr_arg) */
72 	struct pptdev	*pptdev;
73 	uint64_t	addr;
74 	uint64_t	msg_data;
75 };
76 
77 struct pptseg {
78 	vm_paddr_t	gpa;
79 	size_t		len;
80 	int		wired;
81 };
82 
83 struct pptdev {
84 	device_t	dev;
85 	struct vm	*vm;			/* owner of this device */
86 	TAILQ_ENTRY(pptdev)	next;
87 	struct pptseg mmio[MAX_MMIOSEGS];
88 	struct {
89 		int	num_msgs;		/* guest state */
90 
91 		int	startrid;		/* host state */
92 		struct resource *res[MAX_MSIMSGS];
93 		void	*cookie[MAX_MSIMSGS];
94 		struct pptintr_arg arg[MAX_MSIMSGS];
95 	} msi;
96 
97 	struct {
98 		int num_msgs;
99 		int startrid;
100 		int msix_table_rid;
101 		int msix_pba_rid;
102 		struct resource *msix_table_res;
103 		struct resource *msix_pba_res;
104 		struct resource **res;
105 		void **cookie;
106 		struct pptintr_arg *arg;
107 	} msix;
108 };
109 
110 SYSCTL_DECL(_hw_vmm);
111 SYSCTL_NODE(_hw_vmm, OID_AUTO, ppt, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
112     "bhyve passthru devices");
113 
114 static int num_pptdevs;
115 SYSCTL_INT(_hw_vmm_ppt, OID_AUTO, devices, CTLFLAG_RD, &num_pptdevs, 0,
116     "number of pci passthru devices");
117 
118 static TAILQ_HEAD(, pptdev) pptdev_list = TAILQ_HEAD_INITIALIZER(pptdev_list);
119 
120 static int
121 ppt_probe(device_t dev)
122 {
123 	int bus, slot, func;
124 	struct pci_devinfo *dinfo;
125 
126 	dinfo = (struct pci_devinfo *)device_get_ivars(dev);
127 
128 	bus = pci_get_bus(dev);
129 	slot = pci_get_slot(dev);
130 	func = pci_get_function(dev);
131 
132 	/*
133 	 * To qualify as a pci passthrough device a device must:
134 	 * - be allowed by administrator to be used in this role
135 	 * - be an endpoint device
136 	 */
137 	if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
138 		return (ENXIO);
139 	else if (vmm_is_pptdev(bus, slot, func))
140 		return (0);
141 	else
142 		/*
143 		 * Returning BUS_PROBE_NOWILDCARD here matches devices that the
144 		 * SR-IOV infrastructure specified as "ppt" passthrough devices.
145 		 * All normal devices that did not have "ppt" specified as their
146 		 * driver will not be matched by this.
147 		 */
148 		return (BUS_PROBE_NOWILDCARD);
149 }
150 
151 static int
152 ppt_attach(device_t dev)
153 {
154 	struct pptdev *ppt;
155 
156 	ppt = device_get_softc(dev);
157 
158 	iommu_remove_device(iommu_host_domain(), pci_get_rid(dev));
159 	num_pptdevs++;
160 	TAILQ_INSERT_TAIL(&pptdev_list, ppt, next);
161 	ppt->dev = dev;
162 
163 	if (bootverbose)
164 		device_printf(dev, "attached\n");
165 
166 	return (0);
167 }
168 
169 static int
170 ppt_detach(device_t dev)
171 {
172 	struct pptdev *ppt;
173 
174 	ppt = device_get_softc(dev);
175 
176 	if (ppt->vm != NULL)
177 		return (EBUSY);
178 	num_pptdevs--;
179 	TAILQ_REMOVE(&pptdev_list, ppt, next);
180 	pci_disable_busmaster(dev);
181 
182 	if (iommu_host_domain() != NULL)
183 		iommu_add_device(iommu_host_domain(), pci_get_rid(dev));
184 
185 	return (0);
186 }
187 
188 static device_method_t ppt_methods[] = {
189 	/* Device interface */
190 	DEVMETHOD(device_probe,		ppt_probe),
191 	DEVMETHOD(device_attach,	ppt_attach),
192 	DEVMETHOD(device_detach,	ppt_detach),
193 	{0, 0}
194 };
195 
196 DEFINE_CLASS_0(ppt, ppt_driver, ppt_methods, sizeof(struct pptdev));
197 DRIVER_MODULE(ppt, pci, ppt_driver, NULL, NULL);
198 
199 static int
200 ppt_find(struct vm *vm, int bus, int slot, int func, struct pptdev **pptp)
201 {
202 	device_t dev;
203 	struct pptdev *ppt;
204 	int b, s, f;
205 
206 	TAILQ_FOREACH(ppt, &pptdev_list, next) {
207 		dev = ppt->dev;
208 		b = pci_get_bus(dev);
209 		s = pci_get_slot(dev);
210 		f = pci_get_function(dev);
211 		if (bus == b && slot == s && func == f)
212 			break;
213 	}
214 
215 	if (ppt == NULL)
216 		return (ENOENT);
217 	if (ppt->vm != vm)		/* Make sure we own this device */
218 		return (EBUSY);
219 	*pptp = ppt;
220 	return (0);
221 }
222 
223 static void
224 ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt)
225 {
226 	int i;
227 	struct pptseg *seg;
228 
229 	for (i = 0; i < MAX_MMIOSEGS; i++) {
230 		seg = &ppt->mmio[i];
231 		if (seg->len == 0)
232 			continue;
233 		(void)vm_unmap_mmio(vm, seg->gpa, seg->len);
234 		bzero(seg, sizeof(struct pptseg));
235 	}
236 }
237 
238 static void
239 ppt_teardown_msi(struct pptdev *ppt)
240 {
241 	int i, rid;
242 	void *cookie;
243 	struct resource *res;
244 
245 	if (ppt->msi.num_msgs == 0)
246 		return;
247 
248 	for (i = 0; i < ppt->msi.num_msgs; i++) {
249 		rid = ppt->msi.startrid + i;
250 		res = ppt->msi.res[i];
251 		cookie = ppt->msi.cookie[i];
252 
253 		if (cookie != NULL)
254 			bus_teardown_intr(ppt->dev, res, cookie);
255 
256 		if (res != NULL)
257 			bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
258 
259 		ppt->msi.res[i] = NULL;
260 		ppt->msi.cookie[i] = NULL;
261 	}
262 
263 	if (ppt->msi.startrid == 1)
264 		pci_release_msi(ppt->dev);
265 
266 	ppt->msi.num_msgs = 0;
267 }
268 
269 static void
270 ppt_teardown_msix_intr(struct pptdev *ppt, int idx)
271 {
272 	int rid;
273 	struct resource *res;
274 	void *cookie;
275 
276 	rid = ppt->msix.startrid + idx;
277 	res = ppt->msix.res[idx];
278 	cookie = ppt->msix.cookie[idx];
279 
280 	if (cookie != NULL)
281 		bus_teardown_intr(ppt->dev, res, cookie);
282 
283 	if (res != NULL)
284 		bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
285 
286 	ppt->msix.res[idx] = NULL;
287 	ppt->msix.cookie[idx] = NULL;
288 }
289 
290 static void
291 ppt_teardown_msix(struct pptdev *ppt)
292 {
293 	int i;
294 
295 	if (ppt->msix.num_msgs == 0)
296 		return;
297 
298 	for (i = 0; i < ppt->msix.num_msgs; i++)
299 		ppt_teardown_msix_intr(ppt, i);
300 
301 	free(ppt->msix.res, M_PPTMSIX);
302 	free(ppt->msix.cookie, M_PPTMSIX);
303 	free(ppt->msix.arg, M_PPTMSIX);
304 
305 	pci_release_msi(ppt->dev);
306 
307 	if (ppt->msix.msix_table_res) {
308 		bus_release_resource(ppt->dev, SYS_RES_MEMORY,
309 				     ppt->msix.msix_table_rid,
310 				     ppt->msix.msix_table_res);
311 		ppt->msix.msix_table_res = NULL;
312 		ppt->msix.msix_table_rid = 0;
313 	}
314 	if (ppt->msix.msix_pba_res) {
315 		bus_release_resource(ppt->dev, SYS_RES_MEMORY,
316 				     ppt->msix.msix_pba_rid,
317 				     ppt->msix.msix_pba_res);
318 		ppt->msix.msix_pba_res = NULL;
319 		ppt->msix.msix_pba_rid = 0;
320 	}
321 
322 	ppt->msix.num_msgs = 0;
323 }
324 
325 int
326 ppt_avail_devices(void)
327 {
328 
329 	return (num_pptdevs);
330 }
331 
332 int
333 ppt_assigned_devices(struct vm *vm)
334 {
335 	struct pptdev *ppt;
336 	int num;
337 
338 	num = 0;
339 	TAILQ_FOREACH(ppt, &pptdev_list, next) {
340 		if (ppt->vm == vm)
341 			num++;
342 	}
343 	return (num);
344 }
345 
346 bool
347 ppt_is_mmio(struct vm *vm, vm_paddr_t gpa)
348 {
349 	int i;
350 	struct pptdev *ppt;
351 	struct pptseg *seg;
352 
353 	TAILQ_FOREACH(ppt, &pptdev_list, next) {
354 		if (ppt->vm != vm)
355 			continue;
356 
357 		for (i = 0; i < MAX_MMIOSEGS; i++) {
358 			seg = &ppt->mmio[i];
359 			if (seg->len == 0)
360 				continue;
361 			if (gpa >= seg->gpa && gpa < seg->gpa + seg->len)
362 				return (true);
363 		}
364 	}
365 
366 	return (false);
367 }
368 
369 static void
370 ppt_pci_reset(device_t dev)
371 {
372 
373 	if (pcie_flr(dev,
374 	     max(pcie_get_max_completion_timeout(dev) / 1000, 10), true))
375 		return;
376 
377 	pci_power_reset(dev);
378 }
379 
380 int
381 ppt_assign_device(struct vm *vm, int bus, int slot, int func)
382 {
383 	struct pptdev *ppt;
384 	int error;
385 
386 	/* Passing NULL requires the device to be unowned. */
387 	error = ppt_find(NULL, bus, slot, func, &ppt);
388 	if (error)
389 		return (error);
390 
391 	pci_save_state(ppt->dev);
392 	ppt_pci_reset(ppt->dev);
393 	pci_restore_state(ppt->dev);
394 	ppt->vm = vm;
395 	iommu_add_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
396 	return (0);
397 }
398 
399 int
400 ppt_unassign_device(struct vm *vm, int bus, int slot, int func)
401 {
402 	struct pptdev *ppt;
403 	int error;
404 
405 	error = ppt_find(vm, bus, slot, func, &ppt);
406 	if (error)
407 		return (error);
408 
409 	pci_save_state(ppt->dev);
410 	ppt_pci_reset(ppt->dev);
411 	pci_restore_state(ppt->dev);
412 	ppt_unmap_all_mmio(vm, ppt);
413 	ppt_teardown_msi(ppt);
414 	ppt_teardown_msix(ppt);
415 	iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
416 	ppt->vm = NULL;
417 	return (0);
418 }
419 
420 int
421 ppt_unassign_all(struct vm *vm)
422 {
423 	struct pptdev *ppt;
424 	int bus, slot, func;
425 	device_t dev;
426 
427 	TAILQ_FOREACH(ppt, &pptdev_list, next) {
428 		if (ppt->vm == vm) {
429 			dev = ppt->dev;
430 			bus = pci_get_bus(dev);
431 			slot = pci_get_slot(dev);
432 			func = pci_get_function(dev);
433 			vm_unassign_pptdev(vm, bus, slot, func);
434 		}
435 	}
436 
437 	return (0);
438 }
439 
440 static bool
441 ppt_valid_bar_mapping(struct pptdev *ppt, vm_paddr_t hpa, size_t len)
442 {
443 	struct pci_map *pm;
444 	pci_addr_t base, size;
445 
446 	for (pm = pci_first_bar(ppt->dev); pm != NULL; pm = pci_next_bar(pm)) {
447 		if (!PCI_BAR_MEM(pm->pm_value))
448 			continue;
449 		base = pm->pm_value & PCIM_BAR_MEM_BASE;
450 		size = (pci_addr_t)1 << pm->pm_size;
451 		if (hpa >= base && hpa + len <= base + size)
452 			return (true);
453 	}
454 	return (false);
455 }
456 
457 int
458 ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
459 	     vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
460 {
461 	int i, error;
462 	struct pptseg *seg;
463 	struct pptdev *ppt;
464 
465 	if (len % PAGE_SIZE != 0 || len == 0 || gpa % PAGE_SIZE != 0 ||
466 	    hpa % PAGE_SIZE != 0 || gpa + len < gpa || hpa + len < hpa)
467 		return (EINVAL);
468 
469 	error = ppt_find(vm, bus, slot, func, &ppt);
470 	if (error)
471 		return (error);
472 
473 	if (!ppt_valid_bar_mapping(ppt, hpa, len))
474 		return (EINVAL);
475 
476 	for (i = 0; i < MAX_MMIOSEGS; i++) {
477 		seg = &ppt->mmio[i];
478 		if (seg->len == 0) {
479 			error = vm_map_mmio(vm, gpa, len, hpa);
480 			if (error == 0) {
481 				seg->gpa = gpa;
482 				seg->len = len;
483 			}
484 			return (error);
485 		}
486 	}
487 	return (ENOSPC);
488 }
489 
490 int
491 ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func,
492 	       vm_paddr_t gpa, size_t len)
493 {
494 	int i, error;
495 	struct pptseg *seg;
496 	struct pptdev *ppt;
497 
498 	error = ppt_find(vm, bus, slot, func, &ppt);
499 	if (error)
500 		return (error);
501 
502 	for (i = 0; i < MAX_MMIOSEGS; i++) {
503 		seg = &ppt->mmio[i];
504 		if (seg->gpa == gpa && seg->len == len) {
505 			error = vm_unmap_mmio(vm, seg->gpa, seg->len);
506 			if (error == 0) {
507 				seg->gpa = 0;
508 				seg->len = 0;
509 			}
510 			return (error);
511 		}
512 	}
513 	return (ENOENT);
514 }
515 
516 static int
517 pptintr(void *arg)
518 {
519 	struct pptdev *ppt;
520 	struct pptintr_arg *pptarg;
521 
522 	pptarg = arg;
523 	ppt = pptarg->pptdev;
524 
525 	if (ppt->vm != NULL)
526 		lapic_intr_msi(ppt->vm, pptarg->addr, pptarg->msg_data);
527 	else {
528 		/*
529 		 * XXX
530 		 * This is not expected to happen - panic?
531 		 */
532 	}
533 
534 	/*
535 	 * For legacy interrupts give other filters a chance in case
536 	 * the interrupt was not generated by the passthrough device.
537 	 */
538 	if (ppt->msi.startrid == 0)
539 		return (FILTER_STRAY);
540 	else
541 		return (FILTER_HANDLED);
542 }
543 
544 int
545 ppt_setup_msi(struct vm *vm, int bus, int slot, int func,
546 	      uint64_t addr, uint64_t msg, int numvec)
547 {
548 	int i, rid, flags;
549 	int msi_count, startrid, error, tmp;
550 	struct pptdev *ppt;
551 
552 	if (numvec < 0 || numvec > MAX_MSIMSGS)
553 		return (EINVAL);
554 
555 	error = ppt_find(vm, bus, slot, func, &ppt);
556 	if (error)
557 		return (error);
558 
559 	/* Reject attempts to enable MSI while MSI-X is active. */
560 	if (ppt->msix.num_msgs != 0 && numvec != 0)
561 		return (EBUSY);
562 
563 	/* Free any allocated resources */
564 	ppt_teardown_msi(ppt);
565 
566 	if (numvec == 0)		/* nothing more to do */
567 		return (0);
568 
569 	flags = RF_ACTIVE;
570 	msi_count = pci_msi_count(ppt->dev);
571 	if (msi_count == 0) {
572 		startrid = 0;		/* legacy interrupt */
573 		msi_count = 1;
574 		flags |= RF_SHAREABLE;
575 	} else
576 		startrid = 1;		/* MSI */
577 
578 	/*
579 	 * The device must be capable of supporting the number of vectors
580 	 * the guest wants to allocate.
581 	 */
582 	if (numvec > msi_count)
583 		return (EINVAL);
584 
585 	/*
586 	 * Make sure that we can allocate all the MSI vectors that are needed
587 	 * by the guest.
588 	 */
589 	if (startrid == 1) {
590 		tmp = numvec;
591 		error = pci_alloc_msi(ppt->dev, &tmp);
592 		if (error)
593 			return (error);
594 		else if (tmp != numvec) {
595 			pci_release_msi(ppt->dev);
596 			return (ENOSPC);
597 		} else {
598 			/* success */
599 		}
600 	}
601 
602 	ppt->msi.startrid = startrid;
603 
604 	/*
605 	 * Allocate the irq resource and attach it to the interrupt handler.
606 	 */
607 	for (i = 0; i < numvec; i++) {
608 		ppt->msi.num_msgs = i + 1;
609 		ppt->msi.cookie[i] = NULL;
610 
611 		rid = startrid + i;
612 		ppt->msi.res[i] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
613 							 &rid, flags);
614 		if (ppt->msi.res[i] == NULL)
615 			break;
616 
617 		ppt->msi.arg[i].pptdev = ppt;
618 		ppt->msi.arg[i].addr = addr;
619 		ppt->msi.arg[i].msg_data = msg + i;
620 
621 		error = bus_setup_intr(ppt->dev, ppt->msi.res[i],
622 				       INTR_TYPE_NET | INTR_MPSAFE,
623 				       pptintr, NULL, &ppt->msi.arg[i],
624 				       &ppt->msi.cookie[i]);
625 		if (error != 0)
626 			break;
627 	}
628 
629 	if (i < numvec) {
630 		ppt_teardown_msi(ppt);
631 		return (ENXIO);
632 	}
633 
634 	return (0);
635 }
636 
637 int
638 ppt_setup_msix(struct vm *vm, int bus, int slot, int func,
639 	       int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
640 {
641 	struct pptdev *ppt;
642 	struct pci_devinfo *dinfo;
643 	int numvec, alloced, rid, error;
644 	size_t res_size, cookie_size, arg_size;
645 
646 	error = ppt_find(vm, bus, slot, func, &ppt);
647 	if (error)
648 		return (error);
649 
650 	/* Reject attempts to enable MSI-X while MSI is active. */
651 	if (ppt->msi.num_msgs != 0)
652 		return (EBUSY);
653 
654 	dinfo = device_get_ivars(ppt->dev);
655 	if (!dinfo)
656 		return (ENXIO);
657 
658 	/*
659 	 * First-time configuration:
660 	 * 	Allocate the MSI-X table
661 	 *	Allocate the IRQ resources
662 	 *	Set up some variables in ppt->msix
663 	 */
664 	if (ppt->msix.num_msgs == 0) {
665 		numvec = pci_msix_count(ppt->dev);
666 		if (numvec <= 0)
667 			return (EINVAL);
668 
669 		ppt->msix.startrid = 1;
670 		ppt->msix.num_msgs = numvec;
671 
672 		res_size = numvec * sizeof(ppt->msix.res[0]);
673 		cookie_size = numvec * sizeof(ppt->msix.cookie[0]);
674 		arg_size = numvec * sizeof(ppt->msix.arg[0]);
675 
676 		ppt->msix.res = malloc(res_size, M_PPTMSIX, M_WAITOK | M_ZERO);
677 		ppt->msix.cookie = malloc(cookie_size, M_PPTMSIX,
678 					  M_WAITOK | M_ZERO);
679 		ppt->msix.arg = malloc(arg_size, M_PPTMSIX, M_WAITOK | M_ZERO);
680 
681 		rid = dinfo->cfg.msix.msix_table_bar;
682 		ppt->msix.msix_table_res = bus_alloc_resource_any(ppt->dev,
683 					       SYS_RES_MEMORY, &rid, RF_ACTIVE);
684 
685 		if (ppt->msix.msix_table_res == NULL) {
686 			ppt_teardown_msix(ppt);
687 			return (ENOSPC);
688 		}
689 		ppt->msix.msix_table_rid = rid;
690 
691 		if (dinfo->cfg.msix.msix_table_bar !=
692 		    dinfo->cfg.msix.msix_pba_bar) {
693 			rid = dinfo->cfg.msix.msix_pba_bar;
694 			ppt->msix.msix_pba_res = bus_alloc_resource_any(
695 			    ppt->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
696 
697 			if (ppt->msix.msix_pba_res == NULL) {
698 				ppt_teardown_msix(ppt);
699 				return (ENOSPC);
700 			}
701 			ppt->msix.msix_pba_rid = rid;
702 		}
703 
704 		alloced = numvec;
705 		error = pci_alloc_msix(ppt->dev, &alloced);
706 		if (error || alloced != numvec) {
707 			ppt_teardown_msix(ppt);
708 			return (error == 0 ? ENOSPC: error);
709 		}
710 	}
711 
712 	if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
713 		/* Tear down the IRQ if it's already set up */
714 		ppt_teardown_msix_intr(ppt, idx);
715 
716 		/* Allocate the IRQ resource */
717 		ppt->msix.cookie[idx] = NULL;
718 		rid = ppt->msix.startrid + idx;
719 		ppt->msix.res[idx] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
720 							    &rid, RF_ACTIVE);
721 		if (ppt->msix.res[idx] == NULL)
722 			return (ENXIO);
723 
724 		ppt->msix.arg[idx].pptdev = ppt;
725 		ppt->msix.arg[idx].addr = addr;
726 		ppt->msix.arg[idx].msg_data = msg;
727 
728 		/* Setup the MSI-X interrupt */
729 		error = bus_setup_intr(ppt->dev, ppt->msix.res[idx],
730 				       INTR_TYPE_NET | INTR_MPSAFE,
731 				       pptintr, NULL, &ppt->msix.arg[idx],
732 				       &ppt->msix.cookie[idx]);
733 
734 		if (error != 0) {
735 			bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, ppt->msix.res[idx]);
736 			ppt->msix.cookie[idx] = NULL;
737 			ppt->msix.res[idx] = NULL;
738 			return (ENXIO);
739 		}
740 	} else {
741 		/* Masked, tear it down if it's already been set up */
742 		ppt_teardown_msix_intr(ppt, idx);
743 	}
744 
745 	return (0);
746 }
747 
748 int
749 ppt_disable_msix(struct vm *vm, int bus, int slot, int func)
750 {
751 	struct pptdev *ppt;
752 	int error;
753 
754 	error = ppt_find(vm, bus, slot, func, &ppt);
755 	if (error)
756 		return (error);
757 
758 	ppt_teardown_msix(ppt);
759 	return (0);
760 }
761