xref: /freebsd/sys/dev/vmd/vmd.c (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright 2019 Cisco Systems, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/rman.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/taskqueue.h>
47 
48 #include <sys/pciio.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pci_private.h>
52 #include <dev/pci/pcib_private.h>
53 
54 #define TASK_QUEUE_INTR 1
55 #include <dev/vmd/vmd.h>
56 
57 #include "pcib_if.h"
58 #include "pci_if.h"
59 
60 struct vmd_type {
61 	u_int16_t	vmd_vid;
62 	u_int16_t	vmd_did;
63 	char		*vmd_name;
64 };
65 
66 #define INTEL_VENDOR_ID		0x8086
67 #define INTEL_DEVICE_ID_VMD	0x201d
68 #define INTEL_DEVICE_ID_VMD2	0x28c0
69 
70 static struct vmd_type vmd_devs[] = {
71         { INTEL_VENDOR_ID, INTEL_DEVICE_ID_VMD,  "Intel Volume Management Device" },
72         { INTEL_VENDOR_ID, INTEL_DEVICE_ID_VMD2, "Intel Volume Management Device" },
73         { 0, 0, NULL }
74 };
75 
76 static int
77 vmd_probe(device_t dev)
78 {
79 	struct vmd_type *t;
80 	uint16_t vid, did;
81 
82 	t = vmd_devs;
83 	vid = pci_get_vendor(dev);
84 	did = pci_get_device(dev);
85 
86 	while (t->vmd_name != NULL) {
87 		if (vid == t->vmd_vid &&
88 			did == t->vmd_did) {
89 			device_set_desc(dev, t->vmd_name);
90 			return (BUS_PROBE_DEFAULT);
91 		}
92 		t++;
93 	}
94 
95 return (ENXIO);
96 }
97 
98 static void
99 vmd_free(struct vmd_softc *sc)
100 {
101 	int i;
102 	struct vmd_irq_handler *elm, *tmp;
103 
104 	if (sc->vmd_bus.rman.rm_end != 0)
105 		rman_fini(&sc->vmd_bus.rman);
106 
107 #ifdef TASK_QUEUE_INTR
108 	if (sc->vmd_irq_tq != NULL) {
109 		taskqueue_drain(sc->vmd_irq_tq, &sc->vmd_irq_task);
110 		taskqueue_free(sc->vmd_irq_tq);
111 		sc->vmd_irq_tq = NULL;
112 	}
113 #endif
114 	if (sc->vmd_irq != NULL) {
115 		for (i = 0; i < sc->vmd_msix_count; i++) {
116 			if (sc->vmd_irq[i].vmd_res != NULL) {
117 				bus_teardown_intr(sc->vmd_dev,
118 				    sc->vmd_irq[i].vmd_res,
119 				    sc->vmd_irq[i].vmd_handle);
120 				bus_release_resource(sc->vmd_dev, SYS_RES_IRQ,
121 				    sc->vmd_irq[i].vmd_rid,
122 				    sc->vmd_irq[i].vmd_res);
123 			}
124 		}
125 		TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list ,vmd_link,
126 		    tmp) {
127 			TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
128 			free(elm, M_DEVBUF);
129 		}
130 	}
131 	free(sc->vmd_irq, M_DEVBUF);
132 	sc->vmd_irq = NULL;
133 	pci_release_msi(sc->vmd_dev);
134 	for (i = 0; i < VMD_MAX_BAR; i++) {
135 		if (sc->vmd_regs_resource[i] != NULL)
136 			bus_release_resource(sc->vmd_dev, SYS_RES_MEMORY,
137 			    sc->vmd_regs_rid[i],
138 			    sc->vmd_regs_resource[i]);
139 	}
140 	if (sc->vmd_io_resource)
141 		bus_release_resource(device_get_parent(sc->vmd_dev),
142 		    SYS_RES_IOPORT, sc->vmd_io_rid, sc->vmd_io_resource);
143 
144 #ifndef TASK_QUEUE_INTR
145 	if (mtx_initialized(&sc->vmd_irq_lock)) {
146 		mtx_destroy(&sc->vmd_irq_lock);
147 	}
148 #endif
149 }
150 
151 /* Hidden PCI Roots are hidden in BAR(0). */
152 
153 static uint32_t
154 vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
155 {
156 
157 	struct vmd_softc *sc;
158 	bus_addr_t offset;
159 
160 	offset = (b << 20) + (s << 15) + (f << 12) + reg;
161 	sc = device_get_softc(dev);
162 	switch(width) {
163 	case 4:
164 		return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
165 		    offset));
166 	case 2:
167 		return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle,
168 		    offset));
169 	case 1:
170 		return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
171 		    offset));
172 	default:
173 		KASSERT(1, ("Invalid width requested"));
174 		return (0xffffffff);
175 	}
176 }
177 
178 static void
179 vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
180     uint32_t val, int width)
181 {
182 
183 	struct vmd_softc *sc;
184 	bus_addr_t offset;
185 
186 	offset = (b << 20) + (s << 15) + (f << 12) + reg;
187 	sc = device_get_softc(dev);
188 
189 	switch(width) {
190 	case 4:
191 		return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
192 		    offset, val));
193 	case 2:
194 		return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle,
195 		    offset, val));
196 	case 1:
197 		return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
198 		    offset, val));
199 	default:
200 		panic("Failed to specific width");
201 	}
202 }
203 
204 static uint32_t
205 vmd_pci_read_config(device_t dev, device_t child, int reg, int width)
206 {
207 	struct pci_devinfo *dinfo = device_get_ivars(child);
208 	pcicfgregs *cfg = &dinfo->cfg;
209 
210 	return vmd_read_config(dev, cfg->bus, cfg->slot, cfg->func, reg, width);
211 }
212 
213 static void
214 vmd_pci_write_config(device_t dev, device_t child, int reg, uint32_t val,
215     int width)
216 {
217 	struct pci_devinfo *dinfo = device_get_ivars(child);
218 	pcicfgregs *cfg = &dinfo->cfg;
219 
220 	vmd_write_config(dev, cfg->bus, cfg->slot, cfg->func, reg, val, width);
221 }
222 
223 static struct pci_devinfo *
224 vmd_alloc_devinfo(device_t dev)
225 {
226 	struct pci_devinfo *dinfo;
227 
228 	dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO);
229 	return (dinfo);
230 }
231 
232 static void
233 vmd_intr(void *arg)
234 {
235 	struct vmd_irq  *irq;
236 	struct vmd_softc *sc;
237 #ifndef TASK_QUEUE_INTR
238 	struct vmd_irq_handler *elm, *tmp_elm;
239 #endif
240 
241 	irq = (struct vmd_irq *)arg;
242 	sc = irq->vmd_sc;
243 #ifdef TASK_QUEUE_INTR
244 	taskqueue_enqueue(sc->vmd_irq_tq, &sc->vmd_irq_task);
245 #else
246 	mtx_lock(&sc->vmd_irq_lock);
247 	TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
248 		(elm->vmd_intr)(elm->vmd_arg);
249 	}
250 	mtx_unlock(&sc->vmd_irq_lock);
251 #endif
252 }
253 
254 #ifdef TASK_QUEUE_INTR
255 static void
256 vmd_handle_irq(void *context, int pending)
257 {
258 	struct vmd_irq_handler *elm, *tmp_elm;
259 	struct vmd_softc *sc;
260 
261 	sc = context;
262 
263 	TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
264 		(elm->vmd_intr)(elm->vmd_arg);
265 	}
266 }
267 #endif
268 
269 static int
270 vmd_attach(device_t dev)
271 {
272 	struct vmd_softc *sc;
273 	struct pcib_secbus *bus;
274 	uint32_t bar;
275 	int i, j, error;
276 	int rid, sec_reg;
277 	static int b;
278 	static int s;
279 	static int f;
280 	int min_count = 1;
281 	char buf[64];
282 
283 	sc = device_get_softc(dev);
284 	bzero(sc, sizeof(*sc));
285 	sc->vmd_dev = dev;
286 	b = s = f = 0;
287 
288 	pci_enable_busmaster(dev);
289 
290 #ifdef TASK_QUEUE_INTR
291 	sc->vmd_irq_tq = taskqueue_create_fast("vmd_taskq", M_NOWAIT,
292 	    taskqueue_thread_enqueue, &sc->vmd_irq_tq);
293 	taskqueue_start_threads(&sc->vmd_irq_tq, 1, PI_DISK, "%s taskq",
294             device_get_nameunit(sc->vmd_dev));
295 	TASK_INIT(&sc->vmd_irq_task, 0, vmd_handle_irq, sc);
296 #else
297 	mtx_init(&sc->vmd_irq_lock, "VMD IRQ lock", NULL, MTX_DEF);
298 #endif
299 	for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++ ) {
300 		sc->vmd_regs_rid[i] = PCIR_BAR(j);
301 		bar = pci_read_config(dev, PCIR_BAR(0), 4);
302 		if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
303 		    PCIM_BAR_MEM_64)
304 			j++;
305 		if ((sc->vmd_regs_resource[i] = bus_alloc_resource_any(
306 		    sc->vmd_dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i],
307 		    RF_ACTIVE)) == NULL) {
308 			device_printf(dev, "Cannot allocate resources\n");
309 			goto fail;
310 		}
311 	}
312 
313 	sc->vmd_io_rid = PCIR_IOBASEL_1;
314 	sc->vmd_io_resource = bus_alloc_resource_any(
315 	    device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, &sc->vmd_io_rid,
316 	    RF_ACTIVE);
317 	if (sc->vmd_io_resource == NULL) {
318 		device_printf(dev, "Cannot allocate IO\n");
319 		goto fail;
320 	}
321 
322 	sc->vmd_btag = rman_get_bustag(sc->vmd_regs_resource[0]);
323 	sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_resource[0]);
324 
325 	pci_write_config(dev, PCIR_PRIBUS_2,
326 	    pcib_get_bus(device_get_parent(dev)), 1);
327 
328 	sec_reg = PCIR_SECBUS_1;
329 	bus = &sc->vmd_bus;
330 	bus->sub_reg = PCIR_SUBBUS_1;
331 	bus->sec = vmd_read_config(dev, b, s, f, sec_reg, 1);
332 	bus->sub = vmd_read_config(dev, b, s, f, bus->sub_reg, 1);
333 	bus->dev = dev;
334 	bus->rman.rm_start = 0;
335 	bus->rman.rm_end = PCI_BUSMAX;
336 	bus->rman.rm_type = RMAN_ARRAY;
337 	snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
338 	bus->rman.rm_descr = strdup(buf, M_DEVBUF);
339 	error = rman_init(&bus->rman);
340 	if (error) {
341 		device_printf(dev, "Failed to initialize %s bus number rman\n",
342 		    device_get_nameunit(dev));
343 		bus->rman.rm_end = 0;
344 		goto fail;
345 	}
346 
347 	/*
348 	 * Allocate a bus range.  This will return an existing bus range
349 	 * if one exists, or a new bus range if one does not.
350 	 */
351 	rid = 0;
352 	bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
353 	    min_count, 0);
354 	if (bus->res == NULL) {
355 		/*
356 		 * Fall back to just allocating a range of a single bus
357 		 * number.
358 		 */
359 		bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
360 		    1, 0);
361 	} else if (rman_get_size(bus->res) < min_count) {
362 		/*
363 		 * Attempt to grow the existing range to satisfy the
364 		 * minimum desired count.
365 		 */
366 		(void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
367 		    rman_get_start(bus->res), rman_get_start(bus->res) +
368 		    min_count - 1);
369 	}
370 
371 	/*
372 	 * Add the initial resource to the rman.
373 	 */
374 	if (bus->res != NULL) {
375 		error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
376 		    rman_get_end(bus->res));
377 		if (error) {
378 			device_printf(dev, "Failed to add resource to rman\n");
379 			goto fail;
380 		}
381 		bus->sec = rman_get_start(bus->res);
382 		bus->sub = rman_get_end(bus->res);
383 	}
384 
385 	sc->vmd_msix_count = pci_msix_count(dev);
386 	if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
387 		sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
388 		    sc->vmd_msix_count,
389 		    M_DEVBUF, M_WAITOK | M_ZERO);
390 
391 		for (i = 0; i < sc->vmd_msix_count; i++) {
392 			sc->vmd_irq[i].vmd_rid = i + 1;
393 			sc->vmd_irq[i].vmd_sc = sc;
394 			sc->vmd_irq[i].vmd_instance = i;
395 			sc->vmd_irq[i].vmd_res = bus_alloc_resource_any(dev,
396 			    SYS_RES_IRQ, &sc->vmd_irq[i].vmd_rid,
397 			    RF_ACTIVE);
398 			if (sc->vmd_irq[i].vmd_res == NULL) {
399 				device_printf(dev,"Failed to alloc irq\n");
400 				goto fail;
401 			}
402 
403 			TAILQ_INIT(&sc->vmd_irq[i].vmd_list);
404 			if (bus_setup_intr(dev, sc->vmd_irq[i].vmd_res,
405 			    INTR_TYPE_MISC | INTR_MPSAFE, NULL, vmd_intr,
406 			    &sc->vmd_irq[i], &sc->vmd_irq[i].vmd_handle)) {
407 				device_printf(sc->vmd_dev,
408 				    "Cannot set up interrupt\n");
409 				sc->vmd_irq[i].vmd_res = NULL;
410 				goto fail;
411 			}
412 		}
413 	}
414 
415 	sc->vmd_child = device_add_child(dev, NULL, -1);
416 	if (sc->vmd_child == NULL) {
417 		device_printf(dev, "Failed to attach child\n");
418 		goto fail;
419 	}
420 
421 	error = device_probe_and_attach(sc->vmd_child);
422 	if (error) {
423 		device_printf(dev, "Failed to add probe child: %d\n", error);
424 		(void)device_delete_child(dev, sc->vmd_child);
425 		goto fail;
426 	}
427 
428 	return (0);
429 
430 fail:
431 	vmd_free(sc);
432 	return (ENXIO);
433 }
434 
435 static int
436 vmd_detach(device_t dev)
437 {
438 	struct vmd_softc *sc;
439 	int err;
440 
441 	sc = device_get_softc(dev);
442 	if (sc->vmd_child != NULL) {
443 		err = bus_generic_detach(sc->vmd_child);
444 		if (err)
445 			return (err);
446 		err = device_delete_child(dev, sc->vmd_child);
447 		if (err)
448 			return (err);
449 	}
450 	vmd_free(sc);
451 	return (0);
452 }
453 
454 /* Pass request to alloc an MSI-X message up to the parent bridge. */
455 static int
456 vmd_alloc_msix(device_t pcib, device_t dev, int *irq)
457 {
458 	struct vmd_softc *sc = device_get_softc(pcib);
459 	device_t bus;
460 	int ret;
461 
462 	if (sc->vmd_flags & PCIB_DISABLE_MSIX)
463 		return (ENXIO);
464 	bus = device_get_parent(pcib);
465 	ret = PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq);
466         return (ret);
467 }
468 
469 static struct resource *
470 vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
471     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
472 {
473 	/* Start at max PCI vmd_domain and work down */
474 	if (type == PCI_RES_BUS) {
475 		return (pci_domain_alloc_bus(PCI_DOMAINMAX -
476 		    device_get_unit(dev), child, rid, start, end,
477 		    count, flags));
478 	}
479 
480 	return (pcib_alloc_resource(dev, child, type, rid, start, end,
481 				    count, flags));
482 }
483 
484 static int
485 vmd_adjust_resource(device_t dev, device_t child, int type,
486     struct resource *r, rman_res_t start, rman_res_t end)
487 {
488 	struct resource *res = r;
489 
490 	if (type == PCI_RES_BUS)
491 		return (pci_domain_adjust_bus(PCI_DOMAINMAX -
492 			device_get_unit(dev), child, res, start, end));
493 	return (pcib_adjust_resource(dev, child, type, res, start, end));
494 }
495 
496 static int
497 vmd_release_resource(device_t dev, device_t child, int type, int rid,
498     struct resource *r)
499 {
500 	if (type == PCI_RES_BUS)
501 		return (pci_domain_release_bus(PCI_DOMAINMAX -
502 		    device_get_unit(dev), child, rid, r));
503 	return (pcib_release_resource(dev, child, type, rid, r));
504 }
505 
506 static int
507 vmd_shutdown(device_t dev)
508 {
509 	return (0);
510 }
511 
512 static int
513 vmd_pcib_route_interrupt(device_t pcib, device_t dev, int pin)
514 {
515 	return (pcib_route_interrupt(pcib, dev, pin));
516 }
517 
518 static int
519 vmd_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount,
520     int *irqs)
521 {
522 	return (pcib_alloc_msi(pcib, dev, count, maxcount, irqs));
523 }
524 
525 static int
526 vmd_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
527 {
528 
529 	return (pcib_release_msi(pcib, dev, count, irqs));
530 }
531 
532 static int
533 vmd_pcib_release_msix(device_t pcib, device_t dev, int irq) {
534 	return	pcib_release_msix(pcib, dev, irq);
535 }
536 
537 static int
538 vmd_setup_intr(device_t dev, device_t child, struct resource *irq,
539     int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
540     void **cookiep)
541 {
542 	struct vmd_irq_handler *elm;
543 	struct vmd_softc *sc;
544 	int i;
545 
546 	sc = device_get_softc(dev);
547 
548 	/*
549 	 * There appears to be no steering of VMD interrupts from device
550 	 * to VMD interrupt
551 	 */
552 
553 	i = 0;
554 	elm = malloc(sizeof(*elm), M_DEVBUF, M_NOWAIT|M_ZERO);
555 	elm->vmd_child = child;
556 	elm->vmd_intr = intr;
557 	elm->vmd_rid = rman_get_rid(irq);
558 	elm->vmd_arg = arg;
559 	TAILQ_INSERT_TAIL(&sc->vmd_irq[i].vmd_list, elm, vmd_link);
560 
561 	return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
562 	    arg, cookiep));
563 }
564 
565 static int
566 vmd_teardown_intr(device_t dev, device_t child, struct resource *irq,
567     void *cookie)
568 {
569 	struct vmd_irq_handler *elm, *tmp;;
570 	struct vmd_softc *sc;
571 
572 	sc = device_get_softc(dev);
573 	TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp) {
574 		if (elm->vmd_child == child &&
575 		    elm->vmd_rid == rman_get_rid(irq)) {
576 			TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
577 			free(elm, M_DEVBUF);
578 		}
579 	}
580 
581 	return (bus_generic_teardown_intr(dev, child, irq, cookie));
582 }
583 
584 static device_method_t vmd_pci_methods[] = {
585 	/* Device interface */
586 	DEVMETHOD(device_probe,			vmd_probe),
587 	DEVMETHOD(device_attach,		vmd_attach),
588 	DEVMETHOD(device_detach,		vmd_detach),
589 	DEVMETHOD(device_shutdown,		vmd_shutdown),
590 
591 	/* Bus interface */
592 	DEVMETHOD(bus_read_ivar,		pcib_read_ivar),
593 	DEVMETHOD(bus_write_ivar,		pcib_write_ivar),
594 	DEVMETHOD(bus_alloc_resource,		vmd_alloc_resource),
595 	DEVMETHOD(bus_adjust_resource,		vmd_adjust_resource),
596 	DEVMETHOD(bus_release_resource,		vmd_release_resource),
597 	DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
598 	DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
599 	DEVMETHOD(bus_setup_intr,		vmd_setup_intr),
600 	DEVMETHOD(bus_teardown_intr,		vmd_teardown_intr),
601 
602 	/* pci interface */
603 	DEVMETHOD(pci_read_config,		vmd_pci_read_config),
604 	DEVMETHOD(pci_write_config,		vmd_pci_write_config),
605 	DEVMETHOD(pci_alloc_devinfo,		vmd_alloc_devinfo),
606 
607 	/* pcib interface */
608 	DEVMETHOD(pcib_maxslots,		pcib_maxslots),
609 	DEVMETHOD(pcib_read_config,		vmd_read_config),
610 	DEVMETHOD(pcib_write_config,		vmd_write_config),
611 	DEVMETHOD(pcib_route_interrupt,		vmd_pcib_route_interrupt),
612 	DEVMETHOD(pcib_alloc_msi,		vmd_pcib_alloc_msi),
613 	DEVMETHOD(pcib_release_msi,		vmd_pcib_release_msi),
614 	DEVMETHOD(pcib_alloc_msix,		vmd_alloc_msix),
615 	DEVMETHOD(pcib_release_msix,		vmd_pcib_release_msix),
616 	DEVMETHOD(pcib_map_msi,			pcib_map_msi),
617 
618 	DEVMETHOD_END
619 };
620 
621 static devclass_t vmd_devclass;
622 
623 DEFINE_CLASS_0(vmd, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
624 DRIVER_MODULE(vmd, pci, vmd_pci_driver, vmd_devclass, NULL, NULL);
625 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
626     vmd_devs, nitems(vmd_devs) - 1);
627 MODULE_DEPEND(vmd, vmd_bus, 1, 1, 1);
628