xref: /freebsd/sys/dev/virtio/pci/virtio_pci.c (revision b0d29bc47dba79f6f38e67eabadfb4b32ffd9390)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for the VirtIO PCI interface. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/malloc.h>
40 #include <sys/endian.h>
41 
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 
50 #include <dev/virtio/virtio.h>
51 #include <dev/virtio/virtqueue.h>
52 #include <dev/virtio/pci/virtio_pci.h>
53 
54 #include "virtio_bus_if.h"
55 #include "virtio_if.h"
56 
57 struct vtpci_interrupt {
58 	struct resource		*vti_irq;
59 	int			 vti_rid;
60 	void			*vti_handler;
61 };
62 
63 struct vtpci_virtqueue {
64 	struct virtqueue	*vtv_vq;
65 	int			 vtv_no_intr;
66 };
67 
68 struct vtpci_softc {
69 	device_t			 vtpci_dev;
70 	struct resource			*vtpci_res;
71 	struct resource			*vtpci_msix_res;
72 	uint64_t			 vtpci_features;
73 	uint32_t			 vtpci_flags;
74 #define VTPCI_FLAG_NO_MSI		0x0001
75 #define VTPCI_FLAG_NO_MSIX		0x0002
76 #define VTPCI_FLAG_LEGACY		0x1000
77 #define VTPCI_FLAG_MSI			0x2000
78 #define VTPCI_FLAG_MSIX			0x4000
79 #define VTPCI_FLAG_SHARED_MSIX		0x8000
80 #define VTPCI_FLAG_ITYPE_MASK		0xF000
81 
82 	/* This "bus" will only ever have one child. */
83 	device_t			 vtpci_child_dev;
84 	struct virtio_feature_desc	*vtpci_child_feat_desc;
85 
86 	int				 vtpci_nvqs;
87 	struct vtpci_virtqueue		*vtpci_vqs;
88 
89 	/*
90 	 * Ideally, each virtqueue that the driver provides a callback for will
91 	 * receive its own MSIX vector. If there are not sufficient vectors
92 	 * available, then attempt to have all the VQs share one vector. For
93 	 * MSIX, the configuration changed notifications must be on their own
94 	 * vector.
95 	 *
96 	 * If MSIX is not available, we will attempt to have the whole device
97 	 * share one MSI vector, and then, finally, one legacy interrupt.
98 	 */
99 	struct vtpci_interrupt		 vtpci_device_interrupt;
100 	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
101 	int				 vtpci_nmsix_resources;
102 };
103 
104 static int	vtpci_probe(device_t);
105 static int	vtpci_attach(device_t);
106 static int	vtpci_detach(device_t);
107 static int	vtpci_suspend(device_t);
108 static int	vtpci_resume(device_t);
109 static int	vtpci_shutdown(device_t);
110 static void	vtpci_driver_added(device_t, driver_t *);
111 static void	vtpci_child_detached(device_t, device_t);
112 static int	vtpci_read_ivar(device_t, device_t, int, uintptr_t *);
113 static int	vtpci_write_ivar(device_t, device_t, int, uintptr_t);
114 
115 static uint64_t	vtpci_negotiate_features(device_t, uint64_t);
116 static int	vtpci_with_feature(device_t, uint64_t);
117 static int	vtpci_alloc_virtqueues(device_t, int, int,
118 		    struct vq_alloc_info *);
119 static int	vtpci_setup_intr(device_t, enum intr_type);
120 static void	vtpci_stop(device_t);
121 static int	vtpci_reinit(device_t, uint64_t);
122 static void	vtpci_reinit_complete(device_t);
123 static void	vtpci_notify_virtqueue(device_t, uint16_t);
124 static uint8_t	vtpci_get_status(device_t);
125 static void	vtpci_set_status(device_t, uint8_t);
126 static void	vtpci_read_dev_config(device_t, bus_size_t, void *, int);
127 static void	vtpci_write_dev_config(device_t, bus_size_t, void *, int);
128 
129 static void	vtpci_describe_features(struct vtpci_softc *, const char *,
130 		    uint64_t);
131 static void	vtpci_probe_and_attach_child(struct vtpci_softc *);
132 
133 static int	vtpci_alloc_msix(struct vtpci_softc *, int);
134 static int	vtpci_alloc_msi(struct vtpci_softc *);
135 static int	vtpci_alloc_intr_msix_pervq(struct vtpci_softc *);
136 static int	vtpci_alloc_intr_msix_shared(struct vtpci_softc *);
137 static int	vtpci_alloc_intr_msi(struct vtpci_softc *);
138 static int	vtpci_alloc_intr_legacy(struct vtpci_softc *);
139 static int	vtpci_alloc_interrupt(struct vtpci_softc *, int, int,
140 		    struct vtpci_interrupt *);
141 static int	vtpci_alloc_intr_resources(struct vtpci_softc *);
142 
143 static int	vtpci_setup_legacy_interrupt(struct vtpci_softc *,
144 		    enum intr_type);
145 static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *,
146 		    enum intr_type);
147 static int	vtpci_setup_msix_interrupts(struct vtpci_softc *,
148 		    enum intr_type);
149 static int	vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type);
150 
151 static int	vtpci_register_msix_vector(struct vtpci_softc *, int,
152 		    struct vtpci_interrupt *);
153 static int	vtpci_set_host_msix_vectors(struct vtpci_softc *);
154 static int	vtpci_reinit_virtqueue(struct vtpci_softc *, int);
155 
156 static void	vtpci_free_interrupt(struct vtpci_softc *,
157 		    struct vtpci_interrupt *);
158 static void	vtpci_free_interrupts(struct vtpci_softc *);
159 static void	vtpci_free_virtqueues(struct vtpci_softc *);
160 static void	vtpci_release_child_resources(struct vtpci_softc *);
161 static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *);
162 static void	vtpci_reset(struct vtpci_softc *);
163 
164 static void	vtpci_select_virtqueue(struct vtpci_softc *, int);
165 
166 static void	vtpci_legacy_intr(void *);
167 static int	vtpci_vq_shared_intr_filter(void *);
168 static void	vtpci_vq_shared_intr(void *);
169 static int	vtpci_vq_intr_filter(void *);
170 static void	vtpci_vq_intr(void *);
171 static void	vtpci_config_intr(void *);
172 
173 #define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt
174 
175 #define VIRTIO_PCI_CONFIG(_sc) \
176     VIRTIO_PCI_CONFIG_OFF((((_sc)->vtpci_flags & VTPCI_FLAG_MSIX)) != 0)
177 
178 /*
179  * I/O port read/write wrappers.
180  */
181 #define vtpci_read_config_1(sc, o)	bus_read_1((sc)->vtpci_res, (o))
182 #define vtpci_read_config_2(sc, o)	bus_read_2((sc)->vtpci_res, (o))
183 #define vtpci_read_config_4(sc, o)	bus_read_4((sc)->vtpci_res, (o))
184 #define vtpci_write_config_1(sc, o, v)	bus_write_1((sc)->vtpci_res, (o), (v))
185 #define vtpci_write_config_2(sc, o, v)	bus_write_2((sc)->vtpci_res, (o), (v))
186 #define vtpci_write_config_4(sc, o, v)	bus_write_4((sc)->vtpci_res, (o), (v))
187 
188 /*
189  * Legacy VirtIO header is always PCI endianness (little), so if we
190  * are in a BE machine we need to swap bytes from LE to BE when reading
191  * and from BE to LE when writing.
192  * If we are in a LE machine, there will be no swaps.
193  */
194 #define vtpci_read_header_2(sc, o)	le16toh(vtpci_read_config_2(sc, o))
195 #define vtpci_read_header_4(sc, o)	le32toh(vtpci_read_config_4(sc, o))
196 #define vtpci_write_header_2(sc, o, v)  vtpci_write_config_2(sc, o, (htole16(v)))
197 #define vtpci_write_header_4(sc, o, v)  vtpci_write_config_4(sc, o, (htole32(v)))
198 
199 /* Tunables. */
200 static int vtpci_disable_msix = 0;
201 TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
202 
203 static device_method_t vtpci_methods[] = {
204 	/* Device interface. */
205 	DEVMETHOD(device_probe,			  vtpci_probe),
206 	DEVMETHOD(device_attach,		  vtpci_attach),
207 	DEVMETHOD(device_detach,		  vtpci_detach),
208 	DEVMETHOD(device_suspend,		  vtpci_suspend),
209 	DEVMETHOD(device_resume,		  vtpci_resume),
210 	DEVMETHOD(device_shutdown,		  vtpci_shutdown),
211 
212 	/* Bus interface. */
213 	DEVMETHOD(bus_driver_added,		  vtpci_driver_added),
214 	DEVMETHOD(bus_child_detached,		  vtpci_child_detached),
215 	DEVMETHOD(bus_child_pnpinfo_str,	  virtio_child_pnpinfo_str),
216 	DEVMETHOD(bus_read_ivar,		  vtpci_read_ivar),
217 	DEVMETHOD(bus_write_ivar,		  vtpci_write_ivar),
218 
219 	/* VirtIO bus interface. */
220 	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_negotiate_features),
221 	DEVMETHOD(virtio_bus_with_feature,	  vtpci_with_feature),
222 	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_alloc_virtqueues),
223 	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_setup_intr),
224 	DEVMETHOD(virtio_bus_stop,		  vtpci_stop),
225 	DEVMETHOD(virtio_bus_reinit,		  vtpci_reinit),
226 	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_reinit_complete),
227 	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_notify_virtqueue),
228 	DEVMETHOD(virtio_bus_read_device_config,  vtpci_read_dev_config),
229 	DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
230 
231 	DEVMETHOD_END
232 };
233 
234 static driver_t vtpci_driver = {
235 	"virtio_pci",
236 	vtpci_methods,
237 	sizeof(struct vtpci_softc)
238 };
239 
240 devclass_t vtpci_devclass;
241 
242 DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
243 MODULE_VERSION(virtio_pci, 1);
244 MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
245 MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
246 
247 static int
248 vtpci_probe(device_t dev)
249 {
250 	char desc[36];
251 	const char *name;
252 
253 	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
254 		return (ENXIO);
255 
256 	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
257 	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
258 		return (ENXIO);
259 
260 	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
261 		return (ENXIO);
262 
263 	name = virtio_device_name(pci_get_subdevice(dev));
264 	if (name == NULL)
265 		name = "Unknown";
266 
267 	snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
268 	device_set_desc_copy(dev, desc);
269 
270 	return (BUS_PROBE_DEFAULT);
271 }
272 
273 static int
274 vtpci_attach(device_t dev)
275 {
276 	struct vtpci_softc *sc;
277 	device_t child;
278 	int rid;
279 
280 	sc = device_get_softc(dev);
281 	sc->vtpci_dev = dev;
282 
283 	pci_enable_busmaster(dev);
284 
285 	rid = PCIR_BAR(0);
286 	sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
287 	    RF_ACTIVE);
288 	if (sc->vtpci_res == NULL) {
289 		device_printf(dev, "cannot map I/O space\n");
290 		return (ENXIO);
291 	}
292 
293 /*
294  * For legacy VirtIO, the device-specific configuration is guest
295  * endian, while the common configuration header is always
296  * PCI (little) endian and will be handled specifically in
297  * other parts of this file via functions
298  * 'vtpci_[read|write]_header_[2|4]'
299  */
300 #if _BYTE_ORDER == _BIG_ENDIAN
301 	rman_set_bustag(sc->vtpci_res, &bs_be_tag);
302 #endif
303 
304 	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
305 		sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;
306 
307 	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
308 		rid = PCIR_BAR(1);
309 		sc->vtpci_msix_res = bus_alloc_resource_any(dev,
310 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
311 	}
312 
313 	if (sc->vtpci_msix_res == NULL)
314 		sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
315 
316 	vtpci_reset(sc);
317 
318 	/* Tell the host we've noticed this device. */
319 	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
320 
321 	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
322 		device_printf(dev, "cannot create child device\n");
323 		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
324 		vtpci_detach(dev);
325 		return (ENOMEM);
326 	}
327 
328 	sc->vtpci_child_dev = child;
329 	vtpci_probe_and_attach_child(sc);
330 
331 	return (0);
332 }
333 
334 static int
335 vtpci_detach(device_t dev)
336 {
337 	struct vtpci_softc *sc;
338 	device_t child;
339 	int error;
340 
341 	sc = device_get_softc(dev);
342 
343 	if ((child = sc->vtpci_child_dev) != NULL) {
344 		error = device_delete_child(dev, child);
345 		if (error)
346 			return (error);
347 		sc->vtpci_child_dev = NULL;
348 	}
349 
350 	vtpci_reset(sc);
351 
352 	if (sc->vtpci_msix_res != NULL) {
353 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
354 		    sc->vtpci_msix_res);
355 		sc->vtpci_msix_res = NULL;
356 	}
357 
358 	if (sc->vtpci_res != NULL) {
359 		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
360 		    sc->vtpci_res);
361 		sc->vtpci_res = NULL;
362 	}
363 
364 	return (0);
365 }
366 
367 static int
368 vtpci_suspend(device_t dev)
369 {
370 
371 	return (bus_generic_suspend(dev));
372 }
373 
374 static int
375 vtpci_resume(device_t dev)
376 {
377 
378 	return (bus_generic_resume(dev));
379 }
380 
381 static int
382 vtpci_shutdown(device_t dev)
383 {
384 
385 	(void) bus_generic_shutdown(dev);
386 	/* Forcibly stop the host device. */
387 	vtpci_stop(dev);
388 
389 	return (0);
390 }
391 
392 static void
393 vtpci_driver_added(device_t dev, driver_t *driver)
394 {
395 	struct vtpci_softc *sc;
396 
397 	sc = device_get_softc(dev);
398 
399 	vtpci_probe_and_attach_child(sc);
400 }
401 
402 static void
403 vtpci_child_detached(device_t dev, device_t child)
404 {
405 	struct vtpci_softc *sc;
406 
407 	sc = device_get_softc(dev);
408 
409 	vtpci_reset(sc);
410 	vtpci_release_child_resources(sc);
411 }
412 
413 static int
414 vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
415 {
416 	struct vtpci_softc *sc;
417 
418 	sc = device_get_softc(dev);
419 
420 	if (sc->vtpci_child_dev != child)
421 		return (ENOENT);
422 
423 	switch (index) {
424 	case VIRTIO_IVAR_DEVTYPE:
425 	case VIRTIO_IVAR_SUBDEVICE:
426 		*result = pci_get_subdevice(dev);
427 		break;
428 	case VIRTIO_IVAR_VENDOR:
429 		*result = pci_get_vendor(dev);
430 		break;
431 	case VIRTIO_IVAR_DEVICE:
432 		*result = pci_get_device(dev);
433 		break;
434 	case VIRTIO_IVAR_SUBVENDOR:
435 		*result = pci_get_subvendor(dev);
436 		break;
437 	default:
438 		return (ENOENT);
439 	}
440 
441 	return (0);
442 }
443 
444 static int
445 vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
446 {
447 	struct vtpci_softc *sc;
448 
449 	sc = device_get_softc(dev);
450 
451 	if (sc->vtpci_child_dev != child)
452 		return (ENOENT);
453 
454 	switch (index) {
455 	case VIRTIO_IVAR_FEATURE_DESC:
456 		sc->vtpci_child_feat_desc = (void *) value;
457 		break;
458 	default:
459 		return (ENOENT);
460 	}
461 
462 	return (0);
463 }
464 
465 static uint64_t
466 vtpci_negotiate_features(device_t dev, uint64_t child_features)
467 {
468 	struct vtpci_softc *sc;
469 	uint64_t host_features, features;
470 
471 	sc = device_get_softc(dev);
472 
473 	host_features = vtpci_read_header_4(sc, VIRTIO_PCI_HOST_FEATURES);
474 	vtpci_describe_features(sc, "host", host_features);
475 
476 	/*
477 	 * Limit negotiated features to what the driver, virtqueue, and
478 	 * host all support.
479 	 */
480 	features = host_features & child_features;
481 	features = virtqueue_filter_features(features);
482 	sc->vtpci_features = features;
483 
484 	vtpci_describe_features(sc, "negotiated", features);
485 	vtpci_write_header_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
486 
487 	return (features);
488 }
489 
490 static int
491 vtpci_with_feature(device_t dev, uint64_t feature)
492 {
493 	struct vtpci_softc *sc;
494 
495 	sc = device_get_softc(dev);
496 
497 	return ((sc->vtpci_features & feature) != 0);
498 }
499 
500 static int
501 vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
502     struct vq_alloc_info *vq_info)
503 {
504 	struct vtpci_softc *sc;
505 	struct virtqueue *vq;
506 	struct vtpci_virtqueue *vqx;
507 	struct vq_alloc_info *info;
508 	int idx, error;
509 	uint16_t size;
510 
511 	sc = device_get_softc(dev);
512 
513 	if (sc->vtpci_nvqs != 0)
514 		return (EALREADY);
515 	if (nvqs <= 0)
516 		return (EINVAL);
517 
518 	sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
519 	    M_DEVBUF, M_NOWAIT | M_ZERO);
520 	if (sc->vtpci_vqs == NULL)
521 		return (ENOMEM);
522 
523 	for (idx = 0; idx < nvqs; idx++) {
524 		vqx = &sc->vtpci_vqs[idx];
525 		info = &vq_info[idx];
526 
527 		vtpci_select_virtqueue(sc, idx);
528 		size = vtpci_read_header_2(sc, VIRTIO_PCI_QUEUE_NUM);
529 
530 		error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN,
531 		    ~(vm_paddr_t)0, info, &vq);
532 		if (error) {
533 			device_printf(dev,
534 			    "cannot allocate virtqueue %d: %d\n", idx, error);
535 			break;
536 		}
537 
538 		vtpci_write_header_4(sc, VIRTIO_PCI_QUEUE_PFN,
539 		    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
540 
541 		vqx->vtv_vq = *info->vqai_vq = vq;
542 		vqx->vtv_no_intr = info->vqai_intr == NULL;
543 
544 		sc->vtpci_nvqs++;
545 	}
546 
547 	if (error)
548 		vtpci_free_virtqueues(sc);
549 
550 	return (error);
551 }
552 
553 static int
554 vtpci_setup_intr(device_t dev, enum intr_type type)
555 {
556 	struct vtpci_softc *sc;
557 	int attempt, error;
558 
559 	sc = device_get_softc(dev);
560 
561 	for (attempt = 0; attempt < 5; attempt++) {
562 		/*
563 		 * Start with the most desirable interrupt configuration and
564 		 * fallback towards less desirable ones.
565 		 */
566 		switch (attempt) {
567 		case 0:
568 			error = vtpci_alloc_intr_msix_pervq(sc);
569 			break;
570 		case 1:
571 			error = vtpci_alloc_intr_msix_shared(sc);
572 			break;
573 		case 2:
574 			error = vtpci_alloc_intr_msi(sc);
575 			break;
576 		case 3:
577 			error = vtpci_alloc_intr_legacy(sc);
578 			break;
579 		default:
580 			device_printf(dev,
581 			    "exhausted all interrupt allocation attempts\n");
582 			return (ENXIO);
583 		}
584 
585 		if (error == 0 && vtpci_setup_interrupts(sc, type) == 0)
586 			break;
587 
588 		vtpci_cleanup_setup_intr_attempt(sc);
589 	}
590 
591 	if (bootverbose) {
592 		if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
593 			device_printf(dev, "using legacy interrupt\n");
594 		else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
595 			device_printf(dev, "using MSI interrupt\n");
596 		else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
597 			device_printf(dev, "using shared MSIX interrupts\n");
598 		else
599 			device_printf(dev, "using per VQ MSIX interrupts\n");
600 	}
601 
602 	return (0);
603 }
604 
605 static void
606 vtpci_stop(device_t dev)
607 {
608 
609 	vtpci_reset(device_get_softc(dev));
610 }
611 
612 static int
613 vtpci_reinit(device_t dev, uint64_t features)
614 {
615 	struct vtpci_softc *sc;
616 	int idx, error;
617 
618 	sc = device_get_softc(dev);
619 
620 	/*
621 	 * Redrive the device initialization. This is a bit of an abuse of
622 	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
623 	 * play nice.
624 	 *
625 	 * We do not allow the host device to change from what was originally
626 	 * negotiated beyond what the guest driver changed. MSIX state should
627 	 * not change, number of virtqueues and their size remain the same, etc.
628 	 * This will need to be rethought when we want to support migration.
629 	 */
630 
631 	if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
632 		vtpci_stop(dev);
633 
634 	/*
635 	 * Quickly drive the status through ACK and DRIVER. The device
636 	 * does not become usable again until vtpci_reinit_complete().
637 	 */
638 	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
639 	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
640 
641 	vtpci_negotiate_features(dev, features);
642 
643 	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
644 		error = vtpci_reinit_virtqueue(sc, idx);
645 		if (error)
646 			return (error);
647 	}
648 
649 	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
650 		error = vtpci_set_host_msix_vectors(sc);
651 		if (error)
652 			return (error);
653 	}
654 
655 	return (0);
656 }
657 
658 static void
659 vtpci_reinit_complete(device_t dev)
660 {
661 
662 	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
663 }
664 
665 static void
666 vtpci_notify_virtqueue(device_t dev, uint16_t queue)
667 {
668 	struct vtpci_softc *sc;
669 
670 	sc = device_get_softc(dev);
671 
672 	vtpci_write_header_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
673 }
674 
675 static uint8_t
676 vtpci_get_status(device_t dev)
677 {
678 	struct vtpci_softc *sc;
679 
680 	sc = device_get_softc(dev);
681 
682 	return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
683 }
684 
685 static void
686 vtpci_set_status(device_t dev, uint8_t status)
687 {
688 	struct vtpci_softc *sc;
689 
690 	sc = device_get_softc(dev);
691 
692 	if (status != VIRTIO_CONFIG_STATUS_RESET)
693 		status |= vtpci_get_status(dev);
694 
695 	vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
696 }
697 
698 static void
699 vtpci_read_dev_config(device_t dev, bus_size_t offset,
700     void *dst, int length)
701 {
702 	struct vtpci_softc *sc;
703 	bus_size_t off;
704 	uint8_t *d;
705 	int size;
706 
707 	sc = device_get_softc(dev);
708 	off = VIRTIO_PCI_CONFIG(sc) + offset;
709 
710 	for (d = dst; length > 0; d += size, off += size, length -= size) {
711 		if (length >= 4) {
712 			size = 4;
713 			*(uint32_t *)d = vtpci_read_config_4(sc, off);
714 		} else if (length >= 2) {
715 			size = 2;
716 			*(uint16_t *)d = vtpci_read_config_2(sc, off);
717 		} else {
718 			size = 1;
719 			*d = vtpci_read_config_1(sc, off);
720 		}
721 	}
722 }
723 
724 static void
725 vtpci_write_dev_config(device_t dev, bus_size_t offset,
726     void *src, int length)
727 {
728 	struct vtpci_softc *sc;
729 	bus_size_t off;
730 	uint8_t *s;
731 	int size;
732 
733 	sc = device_get_softc(dev);
734 	off = VIRTIO_PCI_CONFIG(sc) + offset;
735 
736 	for (s = src; length > 0; s += size, off += size, length -= size) {
737 		if (length >= 4) {
738 			size = 4;
739 			vtpci_write_config_4(sc, off, *(uint32_t *)s);
740 		} else if (length >= 2) {
741 			size = 2;
742 			vtpci_write_config_2(sc, off, *(uint16_t *)s);
743 		} else {
744 			size = 1;
745 			vtpci_write_config_1(sc, off, *s);
746 		}
747 	}
748 }
749 
750 static void
751 vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
752     uint64_t features)
753 {
754 	device_t dev, child;
755 
756 	dev = sc->vtpci_dev;
757 	child = sc->vtpci_child_dev;
758 
759 	if (device_is_attached(child) || bootverbose == 0)
760 		return;
761 
762 	virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
763 }
764 
765 static void
766 vtpci_probe_and_attach_child(struct vtpci_softc *sc)
767 {
768 	device_t dev, child;
769 
770 	dev = sc->vtpci_dev;
771 	child = sc->vtpci_child_dev;
772 
773 	if (child == NULL)
774 		return;
775 
776 	if (device_get_state(child) != DS_NOTPRESENT)
777 		return;
778 
779 	if (device_probe(child) != 0)
780 		return;
781 
782 	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
783 	if (device_attach(child) != 0) {
784 		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
785 		vtpci_reset(sc);
786 		vtpci_release_child_resources(sc);
787 		/* Reset status for future attempt. */
788 		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
789 	} else {
790 		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
791 		VIRTIO_ATTACH_COMPLETED(child);
792 	}
793 }
794 
795 static int
796 vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
797 {
798 	device_t dev;
799 	int nmsix, cnt, required;
800 
801 	dev = sc->vtpci_dev;
802 
803 	/* Allocate an additional vector for the config changes. */
804 	required = nvectors + 1;
805 
806 	nmsix = pci_msix_count(dev);
807 	if (nmsix < required)
808 		return (1);
809 
810 	cnt = required;
811 	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
812 		sc->vtpci_nmsix_resources = required;
813 		return (0);
814 	}
815 
816 	pci_release_msi(dev);
817 
818 	return (1);
819 }
820 
821 static int
822 vtpci_alloc_msi(struct vtpci_softc *sc)
823 {
824 	device_t dev;
825 	int nmsi, cnt, required;
826 
827 	dev = sc->vtpci_dev;
828 	required = 1;
829 
830 	nmsi = pci_msi_count(dev);
831 	if (nmsi < required)
832 		return (1);
833 
834 	cnt = required;
835 	if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required)
836 		return (0);
837 
838 	pci_release_msi(dev);
839 
840 	return (1);
841 }
842 
843 static int
844 vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc)
845 {
846 	int i, nvectors, error;
847 
848 	if (vtpci_disable_msix != 0 ||
849 	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
850 		return (ENOTSUP);
851 
852 	for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) {
853 		if (sc->vtpci_vqs[i].vtv_no_intr == 0)
854 			nvectors++;
855 	}
856 
857 	error = vtpci_alloc_msix(sc, nvectors);
858 	if (error)
859 		return (error);
860 
861 	sc->vtpci_flags |= VTPCI_FLAG_MSIX;
862 
863 	return (0);
864 }
865 
866 static int
867 vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc)
868 {
869 	int error;
870 
871 	if (vtpci_disable_msix != 0 ||
872 	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
873 		return (ENOTSUP);
874 
875 	error = vtpci_alloc_msix(sc, 1);
876 	if (error)
877 		return (error);
878 
879 	sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
880 
881 	return (0);
882 }
883 
884 static int
885 vtpci_alloc_intr_msi(struct vtpci_softc *sc)
886 {
887 	int error;
888 
889 	/* Only BHyVe supports MSI. */
890 	if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI)
891 		return (ENOTSUP);
892 
893 	error = vtpci_alloc_msi(sc);
894 	if (error)
895 		return (error);
896 
897 	sc->vtpci_flags |= VTPCI_FLAG_MSI;
898 
899 	return (0);
900 }
901 
902 static int
903 vtpci_alloc_intr_legacy(struct vtpci_softc *sc)
904 {
905 
906 	sc->vtpci_flags |= VTPCI_FLAG_LEGACY;
907 
908 	return (0);
909 }
910 
911 static int
912 vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags,
913     struct vtpci_interrupt *intr)
914 {
915 	struct resource *irq;
916 
917 	irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags);
918 	if (irq == NULL)
919 		return (ENXIO);
920 
921 	intr->vti_irq = irq;
922 	intr->vti_rid = rid;
923 
924 	return (0);
925 }
926 
927 static int
928 vtpci_alloc_intr_resources(struct vtpci_softc *sc)
929 {
930 	struct vtpci_interrupt *intr;
931 	int i, rid, flags, nvq_intrs, error;
932 
933 	rid = 0;
934 	flags = RF_ACTIVE;
935 
936 	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
937 		flags |= RF_SHAREABLE;
938 	else
939 		rid = 1;
940 
941 	/*
942 	 * For legacy and MSI interrupts, this single resource handles all
943 	 * interrupts. For MSIX, this resource is used for the configuration
944 	 * changed interrupt.
945 	 */
946 	intr = &sc->vtpci_device_interrupt;
947 	error = vtpci_alloc_interrupt(sc, rid, flags, intr);
948 	if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY | VTPCI_FLAG_MSI))
949 		return (error);
950 
951 	/* Subtract one for the configuration changed interrupt. */
952 	nvq_intrs = sc->vtpci_nmsix_resources - 1;
953 
954 	intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
955 	    sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
956 	if (sc->vtpci_msix_vq_interrupts == NULL)
957 		return (ENOMEM);
958 
959 	for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
960 		error = vtpci_alloc_interrupt(sc, rid, flags, intr);
961 		if (error)
962 			return (error);
963 	}
964 
965 	return (0);
966 }
967 
968 static int
969 vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type)
970 {
971 	struct vtpci_interrupt *intr;
972 	int error;
973 
974 	intr = &sc->vtpci_device_interrupt;
975 	error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL,
976 	    vtpci_legacy_intr, sc, &intr->vti_handler);
977 
978 	return (error);
979 }
980 
981 static int
982 vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
983 {
984 	struct vtpci_virtqueue *vqx;
985 	struct vtpci_interrupt *intr;
986 	int i, error;
987 
988 	intr = sc->vtpci_msix_vq_interrupts;
989 
990 	for (i = 0; i < sc->vtpci_nvqs; i++) {
991 		vqx = &sc->vtpci_vqs[i];
992 
993 		if (vqx->vtv_no_intr)
994 			continue;
995 
996 		error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type,
997 		    vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
998 		    &intr->vti_handler);
999 		if (error)
1000 			return (error);
1001 
1002 		intr++;
1003 	}
1004 
1005 	return (0);
1006 }
1007 
1008 static int
1009 vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
1010 {
1011 	device_t dev;
1012 	struct vtpci_interrupt *intr;
1013 	int error;
1014 
1015 	dev = sc->vtpci_dev;
1016 	intr = &sc->vtpci_device_interrupt;
1017 
1018 	error = bus_setup_intr(dev, intr->vti_irq, type, NULL,
1019 	    vtpci_config_intr, sc, &intr->vti_handler);
1020 	if (error)
1021 		return (error);
1022 
1023 	if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
1024 		intr = sc->vtpci_msix_vq_interrupts;
1025 		error = bus_setup_intr(dev, intr->vti_irq, type,
1026 		    vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc,
1027 		    &intr->vti_handler);
1028 	} else
1029 		error = vtpci_setup_pervq_msix_interrupts(sc, type);
1030 
1031 	return (error ? error : vtpci_set_host_msix_vectors(sc));
1032 }
1033 
1034 static int
1035 vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type)
1036 {
1037 	int error;
1038 
1039 	type |= INTR_MPSAFE;
1040 	KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
1041 	    ("%s: no interrupt type selected %#x", __func__, sc->vtpci_flags));
1042 
1043 	error = vtpci_alloc_intr_resources(sc);
1044 	if (error)
1045 		return (error);
1046 
1047 	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
1048 		error = vtpci_setup_legacy_interrupt(sc, type);
1049 	else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
1050 		error = vtpci_setup_msi_interrupt(sc, type);
1051 	else
1052 		error = vtpci_setup_msix_interrupts(sc, type);
1053 
1054 	return (error);
1055 }
1056 
1057 static int
1058 vtpci_register_msix_vector(struct vtpci_softc *sc, int offset,
1059     struct vtpci_interrupt *intr)
1060 {
1061 	device_t dev;
1062 	uint16_t vector;
1063 
1064 	dev = sc->vtpci_dev;
1065 
1066 	if (intr != NULL) {
1067 		/* Map from guest rid to host vector. */
1068 		vector = intr->vti_rid - 1;
1069 	} else
1070 		vector = VIRTIO_MSI_NO_VECTOR;
1071 
1072 	vtpci_write_header_2(sc, offset, vector);
1073 
1074 	/* Read vector to determine if the host had sufficient resources. */
1075 	if (vtpci_read_header_2(sc, offset) != vector) {
1076 		device_printf(dev,
1077 		    "insufficient host resources for MSIX interrupts\n");
1078 		return (ENODEV);
1079 	}
1080 
1081 	return (0);
1082 }
1083 
1084 static int
1085 vtpci_set_host_msix_vectors(struct vtpci_softc *sc)
1086 {
1087 	struct vtpci_interrupt *intr, *tintr;
1088 	int idx, offset, error;
1089 
1090 	intr = &sc->vtpci_device_interrupt;
1091 	offset = VIRTIO_MSI_CONFIG_VECTOR;
1092 
1093 	error = vtpci_register_msix_vector(sc, offset, intr);
1094 	if (error)
1095 		return (error);
1096 
1097 	intr = sc->vtpci_msix_vq_interrupts;
1098 	offset = VIRTIO_MSI_QUEUE_VECTOR;
1099 
1100 	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
1101 		vtpci_select_virtqueue(sc, idx);
1102 
1103 		if (sc->vtpci_vqs[idx].vtv_no_intr)
1104 			tintr = NULL;
1105 		else
1106 			tintr = intr;
1107 
1108 		error = vtpci_register_msix_vector(sc, offset, tintr);
1109 		if (error)
1110 			break;
1111 
1112 		/*
1113 		 * For shared MSIX, all the virtqueues share the first
1114 		 * interrupt.
1115 		 */
1116 		if (!sc->vtpci_vqs[idx].vtv_no_intr &&
1117 		    (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
1118 			intr++;
1119 	}
1120 
1121 	return (error);
1122 }
1123 
1124 static int
1125 vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx)
1126 {
1127 	struct vtpci_virtqueue *vqx;
1128 	struct virtqueue *vq;
1129 	int error;
1130 	uint16_t size;
1131 
1132 	vqx = &sc->vtpci_vqs[idx];
1133 	vq = vqx->vtv_vq;
1134 
1135 	KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
1136 
1137 	vtpci_select_virtqueue(sc, idx);
1138 	size = vtpci_read_header_2(sc, VIRTIO_PCI_QUEUE_NUM);
1139 
1140 	error = virtqueue_reinit(vq, size);
1141 	if (error)
1142 		return (error);
1143 
1144 	vtpci_write_header_4(sc, VIRTIO_PCI_QUEUE_PFN,
1145 	    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
1146 
1147 	return (0);
1148 }
1149 
1150 static void
1151 vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt *intr)
1152 {
1153 	device_t dev;
1154 
1155 	dev = sc->vtpci_dev;
1156 
1157 	if (intr->vti_handler != NULL) {
1158 		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
1159 		intr->vti_handler = NULL;
1160 	}
1161 
1162 	if (intr->vti_irq != NULL) {
1163 		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
1164 		    intr->vti_irq);
1165 		intr->vti_irq = NULL;
1166 		intr->vti_rid = -1;
1167 	}
1168 }
1169 
1170 static void
1171 vtpci_free_interrupts(struct vtpci_softc *sc)
1172 {
1173 	struct vtpci_interrupt *intr;
1174 	int i, nvq_intrs;
1175 
1176 	vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt);
1177 
1178 	if (sc->vtpci_nmsix_resources != 0) {
1179 		nvq_intrs = sc->vtpci_nmsix_resources - 1;
1180 		sc->vtpci_nmsix_resources = 0;
1181 
1182 		intr = sc->vtpci_msix_vq_interrupts;
1183 		if (intr != NULL) {
1184 			for (i = 0; i < nvq_intrs; i++, intr++)
1185 				vtpci_free_interrupt(sc, intr);
1186 
1187 			free(sc->vtpci_msix_vq_interrupts, M_DEVBUF);
1188 			sc->vtpci_msix_vq_interrupts = NULL;
1189 		}
1190 	}
1191 
1192 	if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
1193 		pci_release_msi(sc->vtpci_dev);
1194 
1195 	sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
1196 }
1197 
1198 static void
1199 vtpci_free_virtqueues(struct vtpci_softc *sc)
1200 {
1201 	struct vtpci_virtqueue *vqx;
1202 	int idx;
1203 
1204 	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
1205 		vqx = &sc->vtpci_vqs[idx];
1206 
1207 		vtpci_select_virtqueue(sc, idx);
1208 		vtpci_write_header_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
1209 
1210 		virtqueue_free(vqx->vtv_vq);
1211 		vqx->vtv_vq = NULL;
1212 	}
1213 
1214 	free(sc->vtpci_vqs, M_DEVBUF);
1215 	sc->vtpci_vqs = NULL;
1216 	sc->vtpci_nvqs = 0;
1217 }
1218 
1219 static void
1220 vtpci_release_child_resources(struct vtpci_softc *sc)
1221 {
1222 
1223 	vtpci_free_interrupts(sc);
1224 	vtpci_free_virtqueues(sc);
1225 }
1226 
1227 static void
1228 vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc)
1229 {
1230 	int idx;
1231 
1232 	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
1233 		vtpci_write_header_2(sc, VIRTIO_MSI_CONFIG_VECTOR,
1234 		    VIRTIO_MSI_NO_VECTOR);
1235 
1236 		for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
1237 			vtpci_select_virtqueue(sc, idx);
1238 			vtpci_write_header_2(sc, VIRTIO_MSI_QUEUE_VECTOR,
1239 			    VIRTIO_MSI_NO_VECTOR);
1240 		}
1241 	}
1242 
1243 	vtpci_free_interrupts(sc);
1244 }
1245 
1246 static void
1247 vtpci_reset(struct vtpci_softc *sc)
1248 {
1249 
1250 	/*
1251 	 * Setting the status to RESET sets the host device to
1252 	 * the original, uninitialized state.
1253 	 */
1254 	vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET);
1255 }
1256 
1257 static void
1258 vtpci_select_virtqueue(struct vtpci_softc *sc, int idx)
1259 {
1260 
1261 	vtpci_write_header_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
1262 }
1263 
1264 static void
1265 vtpci_legacy_intr(void *xsc)
1266 {
1267 	struct vtpci_softc *sc;
1268 	struct vtpci_virtqueue *vqx;
1269 	int i;
1270 	uint8_t isr;
1271 
1272 	sc = xsc;
1273 	vqx = &sc->vtpci_vqs[0];
1274 
1275 	/* Reading the ISR also clears it. */
1276 	isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR);
1277 
1278 	if (isr & VIRTIO_PCI_ISR_CONFIG)
1279 		vtpci_config_intr(sc);
1280 
1281 	if (isr & VIRTIO_PCI_ISR_INTR) {
1282 		for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
1283 			if (vqx->vtv_no_intr == 0)
1284 				virtqueue_intr(vqx->vtv_vq);
1285 		}
1286 	}
1287 }
1288 
1289 static int
1290 vtpci_vq_shared_intr_filter(void *xsc)
1291 {
1292 	struct vtpci_softc *sc;
1293 	struct vtpci_virtqueue *vqx;
1294 	int i, rc;
1295 
1296 	rc = 0;
1297 	sc = xsc;
1298 	vqx = &sc->vtpci_vqs[0];
1299 
1300 	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
1301 		if (vqx->vtv_no_intr == 0)
1302 			rc |= virtqueue_intr_filter(vqx->vtv_vq);
1303 	}
1304 
1305 	return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
1306 }
1307 
1308 static void
1309 vtpci_vq_shared_intr(void *xsc)
1310 {
1311 	struct vtpci_softc *sc;
1312 	struct vtpci_virtqueue *vqx;
1313 	int i;
1314 
1315 	sc = xsc;
1316 	vqx = &sc->vtpci_vqs[0];
1317 
1318 	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
1319 		if (vqx->vtv_no_intr == 0)
1320 			virtqueue_intr(vqx->vtv_vq);
1321 	}
1322 }
1323 
1324 static int
1325 vtpci_vq_intr_filter(void *xvq)
1326 {
1327 	struct virtqueue *vq;
1328 	int rc;
1329 
1330 	vq = xvq;
1331 	rc = virtqueue_intr_filter(vq);
1332 
1333 	return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
1334 }
1335 
1336 static void
1337 vtpci_vq_intr(void *xvq)
1338 {
1339 	struct virtqueue *vq;
1340 
1341 	vq = xvq;
1342 	virtqueue_intr(vq);
1343 }
1344 
1345 static void
1346 vtpci_config_intr(void *xsc)
1347 {
1348 	struct vtpci_softc *sc;
1349 	device_t child;
1350 
1351 	sc = xsc;
1352 	child = sc->vtpci_child_dev;
1353 
1354 	if (child != NULL)
1355 		VIRTIO_CONFIG_CHANGE(child);
1356 }
1357