xref: /freebsd/sys/dev/vmd/vmd.c (revision c8e7f78a3d28ff6e6223ed136ada8e1e2f34965e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
5  * Copyright 2019 Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/limits.h>
36 #include <sys/module.h>
37 #include <sys/sysctl.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <machine/intr_machdep.h>
44 #include <sys/rman.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 
48 #include <sys/pciio.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pci_private.h>
52 #include <dev/pci/pcib_private.h>
53 
54 #include <dev/vmd/vmd.h>
55 
56 #include "pcib_if.h"
57 
58 struct vmd_type {
59 	u_int16_t	vmd_vid;
60 	u_int16_t	vmd_did;
61 	char		*vmd_name;
62 	int		flags;
63 #define BUS_RESTRICT	1
64 #define VECTOR_OFFSET	2
65 #define CAN_BYPASS_MSI	4
66 };
67 
68 #define VMD_CAP		0x40
69 #define VMD_BUS_RESTRICT	0x1
70 
71 #define VMD_CONFIG	0x44
72 #define VMD_BYPASS_MSI		0x2
73 #define VMD_BUS_START(x)	((x >> 8) & 0x3)
74 
75 #define VMD_LOCK	0x70
76 
77 SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
78     "Intel Volume Management Device tuning parameters");
79 
80 /*
81  * By default all VMD devices remap children MSI/MSI-X interrupts into their
82  * own.  It creates additional isolation, but also complicates things due to
83  * sharing, etc.  Fortunately some VMD devices can bypass the remapping.
84  */
85 static int vmd_bypass_msi = 1;
86 SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0,
87     "Bypass MSI remapping on capable hardware");
88 
89 /*
90  * All MSIs within a group share address, so VMD can't distinguish them.
91  * It makes no sense to use more than one per device, only if required by
92  * some specific device drivers.
93  */
94 static int vmd_max_msi = 1;
95 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0,
96     "Maximum number of MSI vectors per device");
97 
98 /*
99  * MSI-X can use different addresses, but we have limited number of MSI-X
100  * we can route to, so use conservative default to try to avoid sharing.
101  */
102 static int vmd_max_msix = 3;
103 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0,
104     "Maximum number of MSI-X vectors per device");
105 
106 static struct vmd_type vmd_devs[] = {
107         { 0x8086, 0x201d, "Intel Volume Management Device", 0 },
108         { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI },
109         { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
110         { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
111         { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
112         { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
113         { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
114         { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
115         { 0, 0, NULL, 0 }
116 };
117 
118 static int
119 vmd_probe(device_t dev)
120 {
121 	struct vmd_type *t;
122 	uint16_t vid, did;
123 
124 	vid = pci_get_vendor(dev);
125 	did = pci_get_device(dev);
126 	for (t = vmd_devs; t->vmd_name != NULL; t++) {
127 		if (vid == t->vmd_vid && did == t->vmd_did) {
128 			device_set_desc(dev, t->vmd_name);
129 			return (BUS_PROBE_DEFAULT);
130 		}
131 	}
132 	return (ENXIO);
133 }
134 
135 static void
136 vmd_free(struct vmd_softc *sc)
137 {
138 	struct vmd_irq *vi;
139 	struct vmd_irq_user *u;
140 	int i;
141 
142 	if (sc->psc.bus.rman.rm_end != 0)
143 		rman_fini(&sc->psc.bus.rman);
144 	if (sc->psc.mem.rman.rm_end != 0)
145 		rman_fini(&sc->psc.mem.rman);
146 	while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) {
147 		LIST_REMOVE(u, viu_link);
148 		free(u, M_DEVBUF);
149 	}
150 	if (sc->vmd_irq != NULL) {
151 		for (i = 0; i < sc->vmd_msix_count; i++) {
152 			vi = &sc->vmd_irq[i];
153 			if (vi->vi_res == NULL)
154 				continue;
155 			bus_teardown_intr(sc->psc.dev, vi->vi_res,
156 			    vi->vi_handle);
157 			bus_release_resource(sc->psc.dev, SYS_RES_IRQ,
158 			    vi->vi_rid, vi->vi_res);
159 		}
160 	}
161 	free(sc->vmd_irq, M_DEVBUF);
162 	sc->vmd_irq = NULL;
163 	pci_release_msi(sc->psc.dev);
164 	for (i = 0; i < VMD_MAX_BAR; i++) {
165 		if (sc->vmd_regs_res[i] != NULL)
166 			bus_release_resource(sc->psc.dev, SYS_RES_MEMORY,
167 			    sc->vmd_regs_rid[i], sc->vmd_regs_res[i]);
168 	}
169 }
170 
171 /* Hidden PCI Roots are hidden in BAR(0). */
172 
173 static uint32_t
174 vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
175 {
176 	struct vmd_softc *sc;
177 	bus_addr_t offset;
178 
179 	sc = device_get_softc(dev);
180 	if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
181 		return (0xffffffff);
182 
183 	offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
184 
185 	switch (width) {
186 	case 4:
187 		return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
188 		    offset));
189 	case 2:
190 		return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle,
191 		    offset));
192 	case 1:
193 		return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
194 		    offset));
195 	default:
196 		__assert_unreachable();
197 		return (0xffffffff);
198 	}
199 }
200 
201 static void
202 vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
203     uint32_t val, int width)
204 {
205 	struct vmd_softc *sc;
206 	bus_addr_t offset;
207 
208 	sc = device_get_softc(dev);
209 	if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
210 		return;
211 
212 	offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
213 
214 	switch (width) {
215 	case 4:
216 		return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
217 		    offset, val));
218 	case 2:
219 		return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle,
220 		    offset, val));
221 	case 1:
222 		return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
223 		    offset, val));
224 	default:
225 		__assert_unreachable();
226 	}
227 }
228 
229 static void
230 vmd_set_msi_bypass(device_t dev, bool enable)
231 {
232 	uint16_t val;
233 
234 	val = pci_read_config(dev, VMD_CONFIG, 2);
235 	if (enable)
236 		val |= VMD_BYPASS_MSI;
237 	else
238 		val &= ~VMD_BYPASS_MSI;
239 	pci_write_config(dev, VMD_CONFIG, val, 2);
240 }
241 
242 static int
243 vmd_intr(void *arg)
244 {
245 	/*
246 	 * We have nothing to do here, but we have to register some interrupt
247 	 * handler to make PCI code setup and enable the MSI-X vector.
248 	 */
249 	return (FILTER_STRAY);
250 }
251 
252 static int
253 vmd_attach(device_t dev)
254 {
255 	struct vmd_softc *sc;
256 	struct pcib_secbus *bus;
257 	struct pcib_window *w;
258 	struct vmd_type *t;
259 	struct vmd_irq *vi;
260 	uint16_t vid, did;
261 	uint32_t bar;
262 	int i, j, error;
263 	char buf[64];
264 
265 	sc = device_get_softc(dev);
266 	bzero(sc, sizeof(*sc));
267 	sc->psc.dev = dev;
268 	sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev);
269 
270 	pci_enable_busmaster(dev);
271 
272 	for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) {
273 		sc->vmd_regs_rid[i] = PCIR_BAR(j);
274 		bar = pci_read_config(dev, PCIR_BAR(0), 4);
275 		if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
276 		    PCIM_BAR_MEM_64)
277 			j++;
278 		if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev,
279 		    SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) {
280 			device_printf(dev, "Cannot allocate resources\n");
281 			goto fail;
282 		}
283 	}
284 
285 	sc->vmd_btag = rman_get_bustag(sc->vmd_regs_res[0]);
286 	sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_res[0]);
287 
288 	vid = pci_get_vendor(dev);
289 	did = pci_get_device(dev);
290 	for (t = vmd_devs; t->vmd_name != NULL; t++) {
291 		if (vid == t->vmd_vid && did == t->vmd_did)
292 			break;
293 	}
294 
295 	sc->vmd_bus_start = 0;
296 	if ((t->flags & BUS_RESTRICT) &&
297 	    (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) {
298 		switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) {
299 		case 0:
300 			sc->vmd_bus_start = 0;
301 			break;
302 		case 1:
303 			sc->vmd_bus_start = 128;
304 			break;
305 		case 2:
306 			sc->vmd_bus_start = 224;
307 			break;
308 		default:
309 			device_printf(dev, "Unknown bus offset\n");
310 			goto fail;
311 		}
312 	}
313 	sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start +
314 	    (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1);
315 
316 	bus = &sc->psc.bus;
317 	bus->sec = sc->vmd_bus_start;
318 	bus->sub = sc->vmd_bus_end;
319 	bus->dev = dev;
320 	bus->rman.rm_start = 0;
321 	bus->rman.rm_end = PCI_BUSMAX;
322 	bus->rman.rm_type = RMAN_ARRAY;
323 	snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
324 	bus->rman.rm_descr = strdup(buf, M_DEVBUF);
325 	error = rman_init(&bus->rman);
326 	if (error) {
327 		device_printf(dev, "Failed to initialize bus rman\n");
328 		bus->rman.rm_end = 0;
329 		goto fail;
330 	}
331 	error = rman_manage_region(&bus->rman, sc->vmd_bus_start,
332 	    sc->vmd_bus_end);
333 	if (error) {
334 		device_printf(dev, "Failed to add resource to bus rman\n");
335 		goto fail;
336 	}
337 
338 	w = &sc->psc.mem;
339 	w->rman.rm_type = RMAN_ARRAY;
340 	snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev));
341 	w->rman.rm_descr = strdup(buf, M_DEVBUF);
342 	error = rman_init(&w->rman);
343 	if (error) {
344 		device_printf(dev, "Failed to initialize memory rman\n");
345 		w->rman.rm_end = 0;
346 		goto fail;
347 	}
348 	error = rman_manage_region(&w->rman,
349 	    rman_get_start(sc->vmd_regs_res[1]),
350 	    rman_get_end(sc->vmd_regs_res[1]));
351 	if (error) {
352 		device_printf(dev, "Failed to add resource to memory rman\n");
353 		goto fail;
354 	}
355 	error = rman_manage_region(&w->rman,
356 	    rman_get_start(sc->vmd_regs_res[2]) + 0x2000,
357 	    rman_get_end(sc->vmd_regs_res[2]));
358 	if (error) {
359 		device_printf(dev, "Failed to add resource to memory rman\n");
360 		goto fail;
361 	}
362 
363 	LIST_INIT(&sc->vmd_users);
364 	sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0;
365 	sc->vmd_msix_count = pci_msix_count(dev);
366 	if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) {
367 		sc->vmd_msix_count = 0;
368 		vmd_set_msi_bypass(dev, true);
369 	} else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
370 		sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
371 		    sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO);
372 		for (i = 0; i < sc->vmd_msix_count; i++) {
373 			vi = &sc->vmd_irq[i];
374 			vi->vi_rid = i + 1;
375 			vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
376 			    &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE);
377 			if (vi->vi_res == NULL) {
378 				device_printf(dev, "Failed to allocate irq\n");
379 				goto fail;
380 			}
381 			vi->vi_irq = rman_get_start(vi->vi_res);
382 			if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC |
383 			    INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) {
384 				device_printf(dev, "Can't set up interrupt\n");
385 				bus_release_resource(dev, SYS_RES_IRQ,
386 				    vi->vi_rid, vi->vi_res);
387 				vi->vi_res = NULL;
388 				goto fail;
389 			}
390 		}
391 		vmd_set_msi_bypass(dev, false);
392 	}
393 
394 	sc->vmd_dma_tag = bus_get_dma_tag(dev);
395 
396 	sc->psc.child = device_add_child(dev, "pci", -1);
397 	return (bus_generic_attach(dev));
398 
399 fail:
400 	vmd_free(sc);
401 	return (ENXIO);
402 }
403 
404 static int
405 vmd_detach(device_t dev)
406 {
407 	struct vmd_softc *sc = device_get_softc(dev);
408 	int error;
409 
410 	error = bus_generic_detach(dev);
411 	if (error)
412 		return (error);
413 	error = device_delete_children(dev);
414 	if (error)
415 		return (error);
416 	if (sc->vmd_msix_count == 0)
417 		vmd_set_msi_bypass(dev, false);
418 	vmd_free(sc);
419 	return (0);
420 }
421 
422 static bus_dma_tag_t
423 vmd_get_dma_tag(device_t dev, device_t child)
424 {
425 	struct vmd_softc *sc = device_get_softc(dev);
426 
427 	return (sc->vmd_dma_tag);
428 }
429 
430 static struct resource *
431 vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
432     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
433 {
434 	struct vmd_softc *sc = device_get_softc(dev);
435 	struct resource *res;
436 
437 	switch (type) {
438 	case SYS_RES_IRQ:
439 		/* VMD harwdare does not support legacy interrupts. */
440 		if (*rid == 0)
441 			return (NULL);
442 		return (bus_generic_alloc_resource(dev, child, type, rid,
443 		    start, end, count, flags | RF_SHAREABLE));
444 	case SYS_RES_MEMORY:
445 		res = rman_reserve_resource(&sc->psc.mem.rman, start, end,
446 		    count, flags, child);
447 		if (res == NULL)
448 			return (NULL);
449 		if (bootverbose)
450 			device_printf(dev,
451 			    "allocated memory range (%#jx-%#jx) for rid %d of %s\n",
452 			    rman_get_start(res), rman_get_end(res), *rid,
453 			    pcib_child_name(child));
454 		break;
455 	case PCI_RES_BUS:
456 		res = rman_reserve_resource(&sc->psc.bus.rman, start, end,
457 		    count, flags, child);
458 		if (res == NULL)
459 			return (NULL);
460 		if (bootverbose)
461 			device_printf(dev,
462 			    "allocated bus range (%ju-%ju) for rid %d of %s\n",
463 			    rman_get_start(res), rman_get_end(res), *rid,
464 			    pcib_child_name(child));
465 		break;
466 	default:
467 		/* VMD harwdare does not support I/O ports. */
468 		return (NULL);
469 	}
470 	rman_set_rid(res, *rid);
471 	return (res);
472 }
473 
474 static int
475 vmd_adjust_resource(device_t dev, device_t child, int type,
476     struct resource *r, rman_res_t start, rman_res_t end)
477 {
478 
479 	if (type == SYS_RES_IRQ) {
480 		return (bus_generic_adjust_resource(dev, child, type, r,
481 		    start, end));
482 	}
483 	return (rman_adjust_resource(r, start, end));
484 }
485 
486 static int
487 vmd_release_resource(device_t dev, device_t child, int type, int rid,
488     struct resource *r)
489 {
490 
491 	if (type == SYS_RES_IRQ) {
492 		return (bus_generic_release_resource(dev, child, type, rid,
493 		    r));
494 	}
495 	return (rman_release_resource(r));
496 }
497 
498 static int
499 vmd_route_interrupt(device_t dev, device_t child, int pin)
500 {
501 
502 	/* VMD harwdare does not support legacy interrupts. */
503 	return (PCI_INVALID_IRQ);
504 }
505 
506 static int
507 vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount,
508     int *irqs)
509 {
510 	struct vmd_softc *sc = device_get_softc(dev);
511 	struct vmd_irq_user *u;
512 	int i, ibest = 0, best = INT_MAX;
513 
514 	if (sc->vmd_msix_count == 0) {
515 		return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)),
516 		    child, count, maxcount, irqs));
517 	}
518 
519 	if (count > vmd_max_msi)
520 		return (ENOSPC);
521 	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
522 		if (u->viu_child == child)
523 			return (EBUSY);
524 	}
525 
526 	for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
527 		if (best > sc->vmd_irq[i].vi_nusers) {
528 			best = sc->vmd_irq[i].vi_nusers;
529 			ibest = i;
530 		}
531 	}
532 
533 	u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
534 	u->viu_child = child;
535 	u->viu_vector = ibest;
536 	LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
537 	sc->vmd_irq[ibest].vi_nusers += count;
538 
539 	for (i = 0; i < count; i++)
540 		irqs[i] = sc->vmd_irq[ibest].vi_irq;
541 	return (0);
542 }
543 
544 static int
545 vmd_release_msi(device_t dev, device_t child, int count, int *irqs)
546 {
547 	struct vmd_softc *sc = device_get_softc(dev);
548 	struct vmd_irq_user *u;
549 
550 	if (sc->vmd_msix_count == 0) {
551 		return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)),
552 		    child, count, irqs));
553 	}
554 
555 	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
556 		if (u->viu_child == child) {
557 			sc->vmd_irq[u->viu_vector].vi_nusers -= count;
558 			LIST_REMOVE(u, viu_link);
559 			free(u, M_DEVBUF);
560 			return (0);
561 		}
562 	}
563 	return (EINVAL);
564 }
565 
566 static int
567 vmd_alloc_msix(device_t dev, device_t child, int *irq)
568 {
569 	struct vmd_softc *sc = device_get_softc(dev);
570 	struct vmd_irq_user *u;
571 	int i, ibest = 0, best = INT_MAX;
572 
573 	if (sc->vmd_msix_count == 0) {
574 		return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)),
575 		    child, irq));
576 	}
577 
578 	i = 0;
579 	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
580 		if (u->viu_child == child)
581 			i++;
582 	}
583 	if (i >= vmd_max_msix)
584 		return (ENOSPC);
585 
586 	for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
587 		if (best > sc->vmd_irq[i].vi_nusers) {
588 			best = sc->vmd_irq[i].vi_nusers;
589 			ibest = i;
590 		}
591 	}
592 
593 	u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
594 	u->viu_child = child;
595 	u->viu_vector = ibest;
596 	LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
597 	sc->vmd_irq[ibest].vi_nusers++;
598 
599 	*irq = sc->vmd_irq[ibest].vi_irq;
600 	return (0);
601 }
602 
603 static int
604 vmd_release_msix(device_t dev, device_t child, int irq)
605 {
606 	struct vmd_softc *sc = device_get_softc(dev);
607 	struct vmd_irq_user *u;
608 
609 	if (sc->vmd_msix_count == 0) {
610 		return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
611 		    child, irq));
612 	}
613 
614 	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
615 		if (u->viu_child == child &&
616 		    sc->vmd_irq[u->viu_vector].vi_irq == irq) {
617 			sc->vmd_irq[u->viu_vector].vi_nusers--;
618 			LIST_REMOVE(u, viu_link);
619 			free(u, M_DEVBUF);
620 			return (0);
621 		}
622 	}
623 	return (EINVAL);
624 }
625 
626 static int
627 vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data)
628 {
629 	struct vmd_softc *sc = device_get_softc(dev);
630 	int i;
631 
632 	if (sc->vmd_msix_count == 0) {
633 		return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)),
634 		    child, irq, addr, data));
635 	}
636 
637 	for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
638 		if (sc->vmd_irq[i].vi_irq == irq)
639 			break;
640 	}
641 	if (i >= sc->vmd_msix_count)
642 		return (EINVAL);
643 	*addr = MSI_INTEL_ADDR_BASE | (i << 12);
644 	*data = 0;
645 	return (0);
646 }
647 
648 static device_method_t vmd_pci_methods[] = {
649 	/* Device interface */
650 	DEVMETHOD(device_probe,			vmd_probe),
651 	DEVMETHOD(device_attach,		vmd_attach),
652 	DEVMETHOD(device_detach,		vmd_detach),
653 	DEVMETHOD(device_suspend,		bus_generic_suspend),
654 	DEVMETHOD(device_resume,		bus_generic_resume),
655 	DEVMETHOD(device_shutdown,		bus_generic_shutdown),
656 
657 	/* Bus interface */
658 	DEVMETHOD(bus_get_dma_tag,		vmd_get_dma_tag),
659 	DEVMETHOD(bus_read_ivar,		pcib_read_ivar),
660 	DEVMETHOD(bus_write_ivar,		pcib_write_ivar),
661 	DEVMETHOD(bus_alloc_resource,		vmd_alloc_resource),
662 	DEVMETHOD(bus_adjust_resource,		vmd_adjust_resource),
663 	DEVMETHOD(bus_release_resource,		vmd_release_resource),
664 	DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
665 	DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
666 	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
667 	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
668 
669 	/* pcib interface */
670 	DEVMETHOD(pcib_maxslots,		pcib_maxslots),
671 	DEVMETHOD(pcib_read_config,		vmd_read_config),
672 	DEVMETHOD(pcib_write_config,		vmd_write_config),
673 	DEVMETHOD(pcib_route_interrupt,		vmd_route_interrupt),
674 	DEVMETHOD(pcib_alloc_msi,		vmd_alloc_msi),
675 	DEVMETHOD(pcib_release_msi,		vmd_release_msi),
676 	DEVMETHOD(pcib_alloc_msix,		vmd_alloc_msix),
677 	DEVMETHOD(pcib_release_msix,		vmd_release_msix),
678 	DEVMETHOD(pcib_map_msi,			vmd_map_msi),
679 	DEVMETHOD(pcib_request_feature,		pcib_request_feature_allow),
680 
681 	DEVMETHOD_END
682 };
683 
684 DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
685 DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL);
686 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
687     vmd_devs, nitems(vmd_devs) - 1);
688