xref: /titanic_44/usr/src/uts/common/io/virtio/virtio.c (revision e0724c534a46ca4754330bc022bf1e2a68f5bb93)
1*e0724c53SAlexey Zaytsev /*
2*e0724c53SAlexey Zaytsev  * CDDL HEADER START
3*e0724c53SAlexey Zaytsev  *
4*e0724c53SAlexey Zaytsev  * The contents of this file are subject to the terms of the
5*e0724c53SAlexey Zaytsev  * Common Development and Distribution License (the "License").
6*e0724c53SAlexey Zaytsev  * You may not use this file except in compliance with the License.
7*e0724c53SAlexey Zaytsev  *
8*e0724c53SAlexey Zaytsev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*e0724c53SAlexey Zaytsev  * or http://www.opensolaris.org/os/licensing.
10*e0724c53SAlexey Zaytsev  * See the License for the specific language governing permissions
11*e0724c53SAlexey Zaytsev  * and limitations under the License.
12*e0724c53SAlexey Zaytsev  *
13*e0724c53SAlexey Zaytsev  * When distributing Covered Code, include this CDDL HEADER in each
14*e0724c53SAlexey Zaytsev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*e0724c53SAlexey Zaytsev  * If applicable, add the following below this CDDL HEADER, with the
16*e0724c53SAlexey Zaytsev  * fields enclosed by brackets "[]" replaced with your own identifying
17*e0724c53SAlexey Zaytsev  * information: Portions Copyright [yyyy] [name of copyright owner]
18*e0724c53SAlexey Zaytsev  *
19*e0724c53SAlexey Zaytsev  * CDDL HEADER END
20*e0724c53SAlexey Zaytsev  */
21*e0724c53SAlexey Zaytsev 
22*e0724c53SAlexey Zaytsev /*
23*e0724c53SAlexey Zaytsev  * Copyright 2012 Nexenta Systems, Inc.
24*e0724c53SAlexey Zaytsev  * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
25*e0724c53SAlexey Zaytsev  */
26*e0724c53SAlexey Zaytsev 
27*e0724c53SAlexey Zaytsev /* Based on the NetBSD virtio driver by Minoura Makoto. */
28*e0724c53SAlexey Zaytsev /*
29*e0724c53SAlexey Zaytsev  * Copyright (c) 2010 Minoura Makoto.
30*e0724c53SAlexey Zaytsev  * All rights reserved.
31*e0724c53SAlexey Zaytsev  *
32*e0724c53SAlexey Zaytsev  * Redistribution and use in source and binary forms, with or without
33*e0724c53SAlexey Zaytsev  * modification, are permitted provided that the following conditions
34*e0724c53SAlexey Zaytsev  * are met:
35*e0724c53SAlexey Zaytsev  * 1. Redistributions of source code must retain the above copyright
36*e0724c53SAlexey Zaytsev  *    notice, this list of conditions and the following disclaimer.
37*e0724c53SAlexey Zaytsev  * 2. Redistributions in binary form must reproduce the above copyright
38*e0724c53SAlexey Zaytsev  *    notice, this list of conditions and the following disclaimer in the
39*e0724c53SAlexey Zaytsev  *    documentation and/or other materials provided with the distribution.
40*e0724c53SAlexey Zaytsev  *
41*e0724c53SAlexey Zaytsev  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
42*e0724c53SAlexey Zaytsev  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43*e0724c53SAlexey Zaytsev  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44*e0724c53SAlexey Zaytsev  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
45*e0724c53SAlexey Zaytsev  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46*e0724c53SAlexey Zaytsev  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47*e0724c53SAlexey Zaytsev  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48*e0724c53SAlexey Zaytsev  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49*e0724c53SAlexey Zaytsev  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50*e0724c53SAlexey Zaytsev  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51*e0724c53SAlexey Zaytsev  *
52*e0724c53SAlexey Zaytsev  */
53*e0724c53SAlexey Zaytsev 
54*e0724c53SAlexey Zaytsev #include <sys/conf.h>
55*e0724c53SAlexey Zaytsev #include <sys/kmem.h>
56*e0724c53SAlexey Zaytsev #include <sys/debug.h>
57*e0724c53SAlexey Zaytsev #include <sys/modctl.h>
58*e0724c53SAlexey Zaytsev #include <sys/autoconf.h>
59*e0724c53SAlexey Zaytsev #include <sys/ddi_impldefs.h>
60*e0724c53SAlexey Zaytsev #include <sys/ddi.h>
61*e0724c53SAlexey Zaytsev #include <sys/sunddi.h>
62*e0724c53SAlexey Zaytsev #include <sys/sunndi.h>
63*e0724c53SAlexey Zaytsev #include <sys/avintr.h>
64*e0724c53SAlexey Zaytsev #include <sys/spl.h>
65*e0724c53SAlexey Zaytsev #include <sys/promif.h>
66*e0724c53SAlexey Zaytsev #include <sys/list.h>
67*e0724c53SAlexey Zaytsev #include <sys/bootconf.h>
68*e0724c53SAlexey Zaytsev #include <sys/bootsvcs.h>
69*e0724c53SAlexey Zaytsev #include <sys/sysmacros.h>
70*e0724c53SAlexey Zaytsev #include <sys/pci.h>
71*e0724c53SAlexey Zaytsev 
72*e0724c53SAlexey Zaytsev #include "virtiovar.h"
73*e0724c53SAlexey Zaytsev #include "virtioreg.h"
74*e0724c53SAlexey Zaytsev #define	NDEVNAMES	(sizeof (virtio_device_name) / sizeof (char *))
75*e0724c53SAlexey Zaytsev #define	MINSEG_INDIRECT	2	/* use indirect if nsegs >= this value */
76*e0724c53SAlexey Zaytsev #define	VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
77*e0724c53SAlexey Zaytsev 	    ~(VIRTIO_PAGE_SIZE-1))
78*e0724c53SAlexey Zaytsev 
79*e0724c53SAlexey Zaytsev void
virtio_set_status(struct virtio_softc * sc,unsigned int status)80*e0724c53SAlexey Zaytsev virtio_set_status(struct virtio_softc *sc, unsigned int status)
81*e0724c53SAlexey Zaytsev {
82*e0724c53SAlexey Zaytsev 	int old = 0;
83*e0724c53SAlexey Zaytsev 
84*e0724c53SAlexey Zaytsev 	if (status != 0)
85*e0724c53SAlexey Zaytsev 		old = ddi_get8(sc->sc_ioh,
86*e0724c53SAlexey Zaytsev 		    (uint8_t *)(sc->sc_io_addr +
87*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_DEVICE_STATUS));
88*e0724c53SAlexey Zaytsev 
89*e0724c53SAlexey Zaytsev 	ddi_put8(sc->sc_ioh,
90*e0724c53SAlexey Zaytsev 	    (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS),
91*e0724c53SAlexey Zaytsev 	    status | old);
92*e0724c53SAlexey Zaytsev }
93*e0724c53SAlexey Zaytsev 
94*e0724c53SAlexey Zaytsev /*
95*e0724c53SAlexey Zaytsev  * Negotiate features, save the result in sc->sc_features
96*e0724c53SAlexey Zaytsev  */
97*e0724c53SAlexey Zaytsev uint32_t
virtio_negotiate_features(struct virtio_softc * sc,uint32_t guest_features)98*e0724c53SAlexey Zaytsev virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
99*e0724c53SAlexey Zaytsev {
100*e0724c53SAlexey Zaytsev 	uint32_t host_features;
101*e0724c53SAlexey Zaytsev 	uint32_t features;
102*e0724c53SAlexey Zaytsev 
103*e0724c53SAlexey Zaytsev 	host_features = ddi_get32(sc->sc_ioh,
104*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
105*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
106*e0724c53SAlexey Zaytsev 
107*e0724c53SAlexey Zaytsev 	dev_debug(sc->sc_dev, CE_NOTE,
108*e0724c53SAlexey Zaytsev 	    "host features: %x, guest features: %x",
109*e0724c53SAlexey Zaytsev 	    host_features, guest_features);
110*e0724c53SAlexey Zaytsev 
111*e0724c53SAlexey Zaytsev 	features = host_features & guest_features;
112*e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
113*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
114*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
115*e0724c53SAlexey Zaytsev 	    features);
116*e0724c53SAlexey Zaytsev 
117*e0724c53SAlexey Zaytsev 	sc->sc_features = features;
118*e0724c53SAlexey Zaytsev 
119*e0724c53SAlexey Zaytsev 	return (host_features);
120*e0724c53SAlexey Zaytsev }
121*e0724c53SAlexey Zaytsev 
122*e0724c53SAlexey Zaytsev size_t
virtio_show_features(uint32_t features,char * buf,size_t len)123*e0724c53SAlexey Zaytsev virtio_show_features(uint32_t features,
124*e0724c53SAlexey Zaytsev     char *buf, size_t len)
125*e0724c53SAlexey Zaytsev {
126*e0724c53SAlexey Zaytsev 	char *orig_buf = buf;
127*e0724c53SAlexey Zaytsev 	char *bufend = buf + len;
128*e0724c53SAlexey Zaytsev 
129*e0724c53SAlexey Zaytsev 	/* LINTED E_PTRDIFF_OVERFLOW */
130*e0724c53SAlexey Zaytsev 	buf += snprintf(buf, bufend - buf, "Generic ( ");
131*e0724c53SAlexey Zaytsev 	if (features & VIRTIO_F_RING_INDIRECT_DESC)
132*e0724c53SAlexey Zaytsev 		/* LINTED E_PTRDIFF_OVERFLOW */
133*e0724c53SAlexey Zaytsev 		buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
134*e0724c53SAlexey Zaytsev 
135*e0724c53SAlexey Zaytsev 	/* LINTED E_PTRDIFF_OVERFLOW */
136*e0724c53SAlexey Zaytsev 	buf += snprintf(buf, bufend - buf, ") ");
137*e0724c53SAlexey Zaytsev 
138*e0724c53SAlexey Zaytsev 	/* LINTED E_PTRDIFF_OVERFLOW */
139*e0724c53SAlexey Zaytsev 	return (buf - orig_buf);
140*e0724c53SAlexey Zaytsev }
141*e0724c53SAlexey Zaytsev 
142*e0724c53SAlexey Zaytsev boolean_t
virtio_has_feature(struct virtio_softc * sc,uint32_t feature)143*e0724c53SAlexey Zaytsev virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
144*e0724c53SAlexey Zaytsev {
145*e0724c53SAlexey Zaytsev 	return (sc->sc_features & feature);
146*e0724c53SAlexey Zaytsev }
147*e0724c53SAlexey Zaytsev 
148*e0724c53SAlexey Zaytsev /*
149*e0724c53SAlexey Zaytsev  * Device configuration registers.
150*e0724c53SAlexey Zaytsev  */
151*e0724c53SAlexey Zaytsev uint8_t
virtio_read_device_config_1(struct virtio_softc * sc,unsigned int index)152*e0724c53SAlexey Zaytsev virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
153*e0724c53SAlexey Zaytsev {
154*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
155*e0724c53SAlexey Zaytsev 	return ddi_get8(sc->sc_ioh,
156*e0724c53SAlexey Zaytsev 	    (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
157*e0724c53SAlexey Zaytsev }
158*e0724c53SAlexey Zaytsev 
159*e0724c53SAlexey Zaytsev uint16_t
virtio_read_device_config_2(struct virtio_softc * sc,unsigned int index)160*e0724c53SAlexey Zaytsev virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
161*e0724c53SAlexey Zaytsev {
162*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
163*e0724c53SAlexey Zaytsev 	return ddi_get16(sc->sc_ioh,
164*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
165*e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
166*e0724c53SAlexey Zaytsev }
167*e0724c53SAlexey Zaytsev 
168*e0724c53SAlexey Zaytsev uint32_t
virtio_read_device_config_4(struct virtio_softc * sc,unsigned int index)169*e0724c53SAlexey Zaytsev virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
170*e0724c53SAlexey Zaytsev {
171*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
172*e0724c53SAlexey Zaytsev 	return ddi_get32(sc->sc_ioh,
173*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
174*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
175*e0724c53SAlexey Zaytsev }
176*e0724c53SAlexey Zaytsev 
177*e0724c53SAlexey Zaytsev uint64_t
virtio_read_device_config_8(struct virtio_softc * sc,unsigned int index)178*e0724c53SAlexey Zaytsev virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
179*e0724c53SAlexey Zaytsev {
180*e0724c53SAlexey Zaytsev 	uint64_t r;
181*e0724c53SAlexey Zaytsev 
182*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
183*e0724c53SAlexey Zaytsev 	r = ddi_get32(sc->sc_ioh,
184*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
185*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
186*e0724c53SAlexey Zaytsev 	    index + sizeof (uint32_t)));
187*e0724c53SAlexey Zaytsev 
188*e0724c53SAlexey Zaytsev 	r <<= 32;
189*e0724c53SAlexey Zaytsev 
190*e0724c53SAlexey Zaytsev 	r += ddi_get32(sc->sc_ioh,
191*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
192*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
193*e0724c53SAlexey Zaytsev 	return (r);
194*e0724c53SAlexey Zaytsev }
195*e0724c53SAlexey Zaytsev 
196*e0724c53SAlexey Zaytsev void
virtio_write_device_config_1(struct virtio_softc * sc,unsigned int index,uint8_t value)197*e0724c53SAlexey Zaytsev virtio_write_device_config_1(struct virtio_softc *sc,
198*e0724c53SAlexey Zaytsev     unsigned int index, uint8_t value)
199*e0724c53SAlexey Zaytsev {
200*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
201*e0724c53SAlexey Zaytsev 	ddi_put8(sc->sc_ioh,
202*e0724c53SAlexey Zaytsev 	    (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
203*e0724c53SAlexey Zaytsev }
204*e0724c53SAlexey Zaytsev 
205*e0724c53SAlexey Zaytsev void
virtio_write_device_config_2(struct virtio_softc * sc,unsigned int index,uint16_t value)206*e0724c53SAlexey Zaytsev virtio_write_device_config_2(struct virtio_softc *sc,
207*e0724c53SAlexey Zaytsev     unsigned int index, uint16_t value)
208*e0724c53SAlexey Zaytsev {
209*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
210*e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh,
211*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
212*e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
213*e0724c53SAlexey Zaytsev }
214*e0724c53SAlexey Zaytsev 
215*e0724c53SAlexey Zaytsev void
virtio_write_device_config_4(struct virtio_softc * sc,unsigned int index,uint32_t value)216*e0724c53SAlexey Zaytsev virtio_write_device_config_4(struct virtio_softc *sc,
217*e0724c53SAlexey Zaytsev     unsigned int index, uint32_t value)
218*e0724c53SAlexey Zaytsev {
219*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
220*e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
221*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
222*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
223*e0724c53SAlexey Zaytsev }
224*e0724c53SAlexey Zaytsev 
225*e0724c53SAlexey Zaytsev void
virtio_write_device_config_8(struct virtio_softc * sc,unsigned int index,uint64_t value)226*e0724c53SAlexey Zaytsev virtio_write_device_config_8(struct virtio_softc *sc,
227*e0724c53SAlexey Zaytsev     unsigned int index, uint64_t value)
228*e0724c53SAlexey Zaytsev {
229*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
230*e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
231*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
232*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
233*e0724c53SAlexey Zaytsev 	    value & 0xFFFFFFFF);
234*e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
235*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
236*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
237*e0724c53SAlexey Zaytsev 	    index + sizeof (uint32_t)), value >> 32);
238*e0724c53SAlexey Zaytsev }
239*e0724c53SAlexey Zaytsev 
240*e0724c53SAlexey Zaytsev /*
241*e0724c53SAlexey Zaytsev  * Start/stop vq interrupt.  No guarantee.
242*e0724c53SAlexey Zaytsev  */
243*e0724c53SAlexey Zaytsev void
virtio_stop_vq_intr(struct virtqueue * vq)244*e0724c53SAlexey Zaytsev virtio_stop_vq_intr(struct virtqueue *vq)
245*e0724c53SAlexey Zaytsev {
246*e0724c53SAlexey Zaytsev 	vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
247*e0724c53SAlexey Zaytsev }
248*e0724c53SAlexey Zaytsev 
249*e0724c53SAlexey Zaytsev void
virtio_start_vq_intr(struct virtqueue * vq)250*e0724c53SAlexey Zaytsev virtio_start_vq_intr(struct virtqueue *vq)
251*e0724c53SAlexey Zaytsev {
252*e0724c53SAlexey Zaytsev 	vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
253*e0724c53SAlexey Zaytsev }
254*e0724c53SAlexey Zaytsev 
255*e0724c53SAlexey Zaytsev static ddi_dma_attr_t virtio_vq_dma_attr = {
256*e0724c53SAlexey Zaytsev 	DMA_ATTR_V0,	/* Version number */
257*e0724c53SAlexey Zaytsev 	0,		/* low address */
258*e0724c53SAlexey Zaytsev 	/*
259*e0724c53SAlexey Zaytsev 	 * high address. Has to fit into 32 bits
260*e0724c53SAlexey Zaytsev 	 * after page-shifting
261*e0724c53SAlexey Zaytsev 	 */
262*e0724c53SAlexey Zaytsev 	0x00000FFFFFFFFFFF,
263*e0724c53SAlexey Zaytsev 	0xFFFFFFFF,	/* counter register max */
264*e0724c53SAlexey Zaytsev 	VIRTIO_PAGE_SIZE, /* page alignment required */
265*e0724c53SAlexey Zaytsev 	0x3F,		/* burst sizes: 1 - 32 */
266*e0724c53SAlexey Zaytsev 	0x1,		/* minimum transfer size */
267*e0724c53SAlexey Zaytsev 	0xFFFFFFFF,	/* max transfer size */
268*e0724c53SAlexey Zaytsev 	0xFFFFFFFF,	/* address register max */
269*e0724c53SAlexey Zaytsev 	1,		/* no scatter-gather */
270*e0724c53SAlexey Zaytsev 	1,		/* device operates on bytes */
271*e0724c53SAlexey Zaytsev 	0,		/* attr flag: set to 0 */
272*e0724c53SAlexey Zaytsev };
273*e0724c53SAlexey Zaytsev 
274*e0724c53SAlexey Zaytsev static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
275*e0724c53SAlexey Zaytsev 	DMA_ATTR_V0,	/* Version number */
276*e0724c53SAlexey Zaytsev 	0,		/* low address */
277*e0724c53SAlexey Zaytsev 	0xFFFFFFFFFFFFFFFF, /* high address */
278*e0724c53SAlexey Zaytsev 	0xFFFFFFFF,	/* counter register max */
279*e0724c53SAlexey Zaytsev 	1,		/* No specific alignment */
280*e0724c53SAlexey Zaytsev 	0x3F,		/* burst sizes: 1 - 32 */
281*e0724c53SAlexey Zaytsev 	0x1,		/* minimum transfer size */
282*e0724c53SAlexey Zaytsev 	0xFFFFFFFF,	/* max transfer size */
283*e0724c53SAlexey Zaytsev 	0xFFFFFFFF,	/* address register max */
284*e0724c53SAlexey Zaytsev 	1,		/* no scatter-gather */
285*e0724c53SAlexey Zaytsev 	1,		/* device operates on bytes */
286*e0724c53SAlexey Zaytsev 	0,		/* attr flag: set to 0 */
287*e0724c53SAlexey Zaytsev };
288*e0724c53SAlexey Zaytsev 
289*e0724c53SAlexey Zaytsev /* Same for direct and indirect descriptors. */
290*e0724c53SAlexey Zaytsev static ddi_device_acc_attr_t virtio_vq_devattr = {
291*e0724c53SAlexey Zaytsev 	DDI_DEVICE_ATTR_V0,
292*e0724c53SAlexey Zaytsev 	DDI_NEVERSWAP_ACC,
293*e0724c53SAlexey Zaytsev 	DDI_STORECACHING_OK_ACC,
294*e0724c53SAlexey Zaytsev 	DDI_DEFAULT_ACC
295*e0724c53SAlexey Zaytsev };
296*e0724c53SAlexey Zaytsev 
297*e0724c53SAlexey Zaytsev static void
virtio_free_indirect(struct vq_entry * entry)298*e0724c53SAlexey Zaytsev virtio_free_indirect(struct vq_entry *entry)
299*e0724c53SAlexey Zaytsev {
300*e0724c53SAlexey Zaytsev 
301*e0724c53SAlexey Zaytsev 	(void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
302*e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
303*e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
304*e0724c53SAlexey Zaytsev 
305*e0724c53SAlexey Zaytsev 	entry->qe_indirect_descs = NULL;
306*e0724c53SAlexey Zaytsev }
307*e0724c53SAlexey Zaytsev 
308*e0724c53SAlexey Zaytsev 
309*e0724c53SAlexey Zaytsev static int
virtio_alloc_indirect(struct virtio_softc * sc,struct vq_entry * entry)310*e0724c53SAlexey Zaytsev virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
311*e0724c53SAlexey Zaytsev {
312*e0724c53SAlexey Zaytsev 	int allocsize, num;
313*e0724c53SAlexey Zaytsev 	size_t len;
314*e0724c53SAlexey Zaytsev 	unsigned int ncookies;
315*e0724c53SAlexey Zaytsev 	int ret;
316*e0724c53SAlexey Zaytsev 
317*e0724c53SAlexey Zaytsev 	num = entry->qe_queue->vq_indirect_num;
318*e0724c53SAlexey Zaytsev 	ASSERT(num > 1);
319*e0724c53SAlexey Zaytsev 
320*e0724c53SAlexey Zaytsev 	allocsize = sizeof (struct vring_desc) * num;
321*e0724c53SAlexey Zaytsev 
322*e0724c53SAlexey Zaytsev 	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
323*e0724c53SAlexey Zaytsev 	    DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
324*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
325*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
326*e0724c53SAlexey Zaytsev 		    "Failed to allocate dma handle for indirect descriptors,"
327*e0724c53SAlexey Zaytsev 		    " entry %d, vq %d", entry->qe_index,
328*e0724c53SAlexey Zaytsev 		    entry->qe_queue->vq_index);
329*e0724c53SAlexey Zaytsev 		goto out_alloc_handle;
330*e0724c53SAlexey Zaytsev 	}
331*e0724c53SAlexey Zaytsev 
332*e0724c53SAlexey Zaytsev 	ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle,
333*e0724c53SAlexey Zaytsev 	    allocsize, &virtio_vq_devattr,
334*e0724c53SAlexey Zaytsev 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
335*e0724c53SAlexey Zaytsev 	    (caddr_t *)&entry->qe_indirect_descs, &len,
336*e0724c53SAlexey Zaytsev 	    &entry->qe_indirect_dma_acch);
337*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
338*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
339*e0724c53SAlexey Zaytsev 		    "Failed to alocate dma memory for indirect descriptors,"
340*e0724c53SAlexey Zaytsev 		    " entry %d, vq %d,", entry->qe_index,
341*e0724c53SAlexey Zaytsev 		    entry->qe_queue->vq_index);
342*e0724c53SAlexey Zaytsev 		goto out_alloc;
343*e0724c53SAlexey Zaytsev 	}
344*e0724c53SAlexey Zaytsev 
345*e0724c53SAlexey Zaytsev 	(void) memset(entry->qe_indirect_descs, 0xff, allocsize);
346*e0724c53SAlexey Zaytsev 
347*e0724c53SAlexey Zaytsev 	ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
348*e0724c53SAlexey Zaytsev 	    (caddr_t)entry->qe_indirect_descs, len,
349*e0724c53SAlexey Zaytsev 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
350*e0724c53SAlexey Zaytsev 	    DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_cookie, &ncookies);
351*e0724c53SAlexey Zaytsev 	if (ret != DDI_DMA_MAPPED) {
352*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
353*e0724c53SAlexey Zaytsev 		    "Failed to bind dma memory for indirect descriptors,"
354*e0724c53SAlexey Zaytsev 		    "entry %d, vq %d", entry->qe_index,
355*e0724c53SAlexey Zaytsev 		    entry->qe_queue->vq_index);
356*e0724c53SAlexey Zaytsev 		goto out_bind;
357*e0724c53SAlexey Zaytsev 	}
358*e0724c53SAlexey Zaytsev 
359*e0724c53SAlexey Zaytsev 	/* We asked for a single segment */
360*e0724c53SAlexey Zaytsev 	ASSERT(ncookies == 1);
361*e0724c53SAlexey Zaytsev 
362*e0724c53SAlexey Zaytsev 	return (0);
363*e0724c53SAlexey Zaytsev 
364*e0724c53SAlexey Zaytsev out_bind:
365*e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
366*e0724c53SAlexey Zaytsev out_alloc:
367*e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
368*e0724c53SAlexey Zaytsev out_alloc_handle:
369*e0724c53SAlexey Zaytsev 
370*e0724c53SAlexey Zaytsev 	return (ret);
371*e0724c53SAlexey Zaytsev }
372*e0724c53SAlexey Zaytsev 
373*e0724c53SAlexey Zaytsev /*
374*e0724c53SAlexey Zaytsev  * Initialize the vq structure.
375*e0724c53SAlexey Zaytsev  */
376*e0724c53SAlexey Zaytsev static int
virtio_init_vq(struct virtio_softc * sc,struct virtqueue * vq)377*e0724c53SAlexey Zaytsev virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
378*e0724c53SAlexey Zaytsev {
379*e0724c53SAlexey Zaytsev 	int ret;
380*e0724c53SAlexey Zaytsev 	uint16_t i;
381*e0724c53SAlexey Zaytsev 	int vq_size = vq->vq_num;
382*e0724c53SAlexey Zaytsev 	int indirect_num = vq->vq_indirect_num;
383*e0724c53SAlexey Zaytsev 
384*e0724c53SAlexey Zaytsev 	/* free slot management */
385*e0724c53SAlexey Zaytsev 	list_create(&vq->vq_freelist, sizeof (struct vq_entry),
386*e0724c53SAlexey Zaytsev 	    offsetof(struct vq_entry, qe_list));
387*e0724c53SAlexey Zaytsev 
388*e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_size; i++) {
389*e0724c53SAlexey Zaytsev 		struct vq_entry *entry = &vq->vq_entries[i];
390*e0724c53SAlexey Zaytsev 		list_insert_tail(&vq->vq_freelist, entry);
391*e0724c53SAlexey Zaytsev 		entry->qe_index = i;
392*e0724c53SAlexey Zaytsev 		entry->qe_desc = &vq->vq_descs[i];
393*e0724c53SAlexey Zaytsev 		entry->qe_queue = vq;
394*e0724c53SAlexey Zaytsev 
395*e0724c53SAlexey Zaytsev 		if (indirect_num) {
396*e0724c53SAlexey Zaytsev 			ret = virtio_alloc_indirect(sc, entry);
397*e0724c53SAlexey Zaytsev 			if (ret)
398*e0724c53SAlexey Zaytsev 				goto out_indirect;
399*e0724c53SAlexey Zaytsev 		}
400*e0724c53SAlexey Zaytsev 	}
401*e0724c53SAlexey Zaytsev 
402*e0724c53SAlexey Zaytsev 	mutex_init(&vq->vq_freelist_lock, "virtio-freelist",
403*e0724c53SAlexey Zaytsev 	    MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
404*e0724c53SAlexey Zaytsev 	mutex_init(&vq->vq_avail_lock, "virtio-avail",
405*e0724c53SAlexey Zaytsev 	    MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
406*e0724c53SAlexey Zaytsev 	mutex_init(&vq->vq_used_lock, "virtio-used",
407*e0724c53SAlexey Zaytsev 	    MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
408*e0724c53SAlexey Zaytsev 
409*e0724c53SAlexey Zaytsev 	return (0);
410*e0724c53SAlexey Zaytsev 
411*e0724c53SAlexey Zaytsev out_indirect:
412*e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_size; i++) {
413*e0724c53SAlexey Zaytsev 		struct vq_entry *entry = &vq->vq_entries[i];
414*e0724c53SAlexey Zaytsev 		if (entry->qe_indirect_descs)
415*e0724c53SAlexey Zaytsev 			virtio_free_indirect(entry);
416*e0724c53SAlexey Zaytsev 	}
417*e0724c53SAlexey Zaytsev 
418*e0724c53SAlexey Zaytsev 	return (ret);
419*e0724c53SAlexey Zaytsev }
420*e0724c53SAlexey Zaytsev 
421*e0724c53SAlexey Zaytsev 
422*e0724c53SAlexey Zaytsev 
423*e0724c53SAlexey Zaytsev /*
424*e0724c53SAlexey Zaytsev  * Allocate/free a vq.
425*e0724c53SAlexey Zaytsev  */
426*e0724c53SAlexey Zaytsev struct virtqueue *
virtio_alloc_vq(struct virtio_softc * sc,unsigned int index,unsigned int size,unsigned int indirect_num,const char * name)427*e0724c53SAlexey Zaytsev virtio_alloc_vq(struct virtio_softc *sc,
428*e0724c53SAlexey Zaytsev     unsigned int index,
429*e0724c53SAlexey Zaytsev     unsigned int size,
430*e0724c53SAlexey Zaytsev     unsigned int indirect_num,
431*e0724c53SAlexey Zaytsev     const char *name)
432*e0724c53SAlexey Zaytsev {
433*e0724c53SAlexey Zaytsev 	int vq_size, allocsize1, allocsize2, allocsize = 0;
434*e0724c53SAlexey Zaytsev 	int ret;
435*e0724c53SAlexey Zaytsev 	unsigned int ncookies;
436*e0724c53SAlexey Zaytsev 	size_t len;
437*e0724c53SAlexey Zaytsev 	struct virtqueue *vq;
438*e0724c53SAlexey Zaytsev 
439*e0724c53SAlexey Zaytsev 
440*e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh,
441*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
442*e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
443*e0724c53SAlexey Zaytsev 	vq_size = ddi_get16(sc->sc_ioh,
444*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
445*e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
446*e0724c53SAlexey Zaytsev 	if (vq_size == 0) {
447*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
448*e0724c53SAlexey Zaytsev 		    "virtqueue dest not exist, index %d for %s\n", index, name);
449*e0724c53SAlexey Zaytsev 		goto out;
450*e0724c53SAlexey Zaytsev 	}
451*e0724c53SAlexey Zaytsev 
452*e0724c53SAlexey Zaytsev 	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
453*e0724c53SAlexey Zaytsev 
454*e0724c53SAlexey Zaytsev 	/* size 0 => use native vq size, good for receive queues. */
455*e0724c53SAlexey Zaytsev 	if (size)
456*e0724c53SAlexey Zaytsev 		vq_size = MIN(vq_size, size);
457*e0724c53SAlexey Zaytsev 
458*e0724c53SAlexey Zaytsev 	/* allocsize1: descriptor table + avail ring + pad */
459*e0724c53SAlexey Zaytsev 	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
460*e0724c53SAlexey Zaytsev 	    sizeof (struct vring_avail) +
461*e0724c53SAlexey Zaytsev 	    sizeof (uint16_t) * vq_size);
462*e0724c53SAlexey Zaytsev 	/* allocsize2: used ring + pad */
463*e0724c53SAlexey Zaytsev 	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used)
464*e0724c53SAlexey Zaytsev 	    + sizeof (struct vring_used_elem) * vq_size);
465*e0724c53SAlexey Zaytsev 
466*e0724c53SAlexey Zaytsev 	allocsize = allocsize1 + allocsize2;
467*e0724c53SAlexey Zaytsev 
468*e0724c53SAlexey Zaytsev 	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
469*e0724c53SAlexey Zaytsev 	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
470*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
471*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
472*e0724c53SAlexey Zaytsev 		    "Failed to allocate dma handle for vq %d", index);
473*e0724c53SAlexey Zaytsev 		goto out_alloc_handle;
474*e0724c53SAlexey Zaytsev 	}
475*e0724c53SAlexey Zaytsev 
476*e0724c53SAlexey Zaytsev 	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
477*e0724c53SAlexey Zaytsev 	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
478*e0724c53SAlexey Zaytsev 	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
479*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
480*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
481*e0724c53SAlexey Zaytsev 		    "Failed to alocate dma memory for vq %d", index);
482*e0724c53SAlexey Zaytsev 		goto out_alloc;
483*e0724c53SAlexey Zaytsev 	}
484*e0724c53SAlexey Zaytsev 
485*e0724c53SAlexey Zaytsev 
486*e0724c53SAlexey Zaytsev 	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
487*e0724c53SAlexey Zaytsev 	    (caddr_t)vq->vq_vaddr, len,
488*e0724c53SAlexey Zaytsev 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
489*e0724c53SAlexey Zaytsev 	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
490*e0724c53SAlexey Zaytsev 	if (ret != DDI_DMA_MAPPED) {
491*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
492*e0724c53SAlexey Zaytsev 		    "Failed to bind dma memory for vq %d", index);
493*e0724c53SAlexey Zaytsev 		goto out_bind;
494*e0724c53SAlexey Zaytsev 	}
495*e0724c53SAlexey Zaytsev 
496*e0724c53SAlexey Zaytsev 	/* We asked for a single segment */
497*e0724c53SAlexey Zaytsev 	ASSERT(ncookies == 1);
498*e0724c53SAlexey Zaytsev 	/* and page-ligned buffers. */
499*e0724c53SAlexey Zaytsev 	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
500*e0724c53SAlexey Zaytsev 
501*e0724c53SAlexey Zaytsev 	(void) memset(vq->vq_vaddr, 0, allocsize);
502*e0724c53SAlexey Zaytsev 
503*e0724c53SAlexey Zaytsev 	/* Make sure all zeros hit the buffer before we point the host to it */
504*e0724c53SAlexey Zaytsev 	membar_producer();
505*e0724c53SAlexey Zaytsev 
506*e0724c53SAlexey Zaytsev 	/* set the vq address */
507*e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
508*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
509*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
510*e0724c53SAlexey Zaytsev 	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
511*e0724c53SAlexey Zaytsev 
512*e0724c53SAlexey Zaytsev 	/* remember addresses and offsets for later use */
513*e0724c53SAlexey Zaytsev 	vq->vq_owner = sc;
514*e0724c53SAlexey Zaytsev 	vq->vq_num = vq_size;
515*e0724c53SAlexey Zaytsev 	vq->vq_index = index;
516*e0724c53SAlexey Zaytsev 	vq->vq_descs = vq->vq_vaddr;
517*e0724c53SAlexey Zaytsev 	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
518*e0724c53SAlexey Zaytsev 	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
519*e0724c53SAlexey Zaytsev 	vq->vq_usedoffset = allocsize1;
520*e0724c53SAlexey Zaytsev 	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
521*e0724c53SAlexey Zaytsev 
522*e0724c53SAlexey Zaytsev 	ASSERT(indirect_num == 0 ||
523*e0724c53SAlexey Zaytsev 	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
524*e0724c53SAlexey Zaytsev 	vq->vq_indirect_num = indirect_num;
525*e0724c53SAlexey Zaytsev 
526*e0724c53SAlexey Zaytsev 	/* free slot management */
527*e0724c53SAlexey Zaytsev 	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
528*e0724c53SAlexey Zaytsev 	    KM_SLEEP);
529*e0724c53SAlexey Zaytsev 
530*e0724c53SAlexey Zaytsev 	ret = virtio_init_vq(sc, vq);
531*e0724c53SAlexey Zaytsev 	if (ret)
532*e0724c53SAlexey Zaytsev 		goto out_init;
533*e0724c53SAlexey Zaytsev 
534*e0724c53SAlexey Zaytsev 	dev_debug(sc->sc_dev, CE_NOTE,
535*e0724c53SAlexey Zaytsev 	    "Allocated %d entries for vq %d:%s (%d incdirect descs)",
536*e0724c53SAlexey Zaytsev 	    vq_size, index, name, indirect_num * vq_size);
537*e0724c53SAlexey Zaytsev 
538*e0724c53SAlexey Zaytsev 	return (vq);
539*e0724c53SAlexey Zaytsev 
540*e0724c53SAlexey Zaytsev out_init:
541*e0724c53SAlexey Zaytsev 	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
542*e0724c53SAlexey Zaytsev 	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
543*e0724c53SAlexey Zaytsev out_bind:
544*e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&vq->vq_dma_acch);
545*e0724c53SAlexey Zaytsev out_alloc:
546*e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&vq->vq_dma_handle);
547*e0724c53SAlexey Zaytsev out_alloc_handle:
548*e0724c53SAlexey Zaytsev 	kmem_free(vq, sizeof (struct virtqueue));
549*e0724c53SAlexey Zaytsev out:
550*e0724c53SAlexey Zaytsev 	return (NULL);
551*e0724c53SAlexey Zaytsev }
552*e0724c53SAlexey Zaytsev 
553*e0724c53SAlexey Zaytsev 
554*e0724c53SAlexey Zaytsev void
virtio_free_vq(struct virtqueue * vq)555*e0724c53SAlexey Zaytsev virtio_free_vq(struct virtqueue *vq)
556*e0724c53SAlexey Zaytsev {
557*e0724c53SAlexey Zaytsev 	struct virtio_softc *sc = vq->vq_owner;
558*e0724c53SAlexey Zaytsev 	int i;
559*e0724c53SAlexey Zaytsev 
560*e0724c53SAlexey Zaytsev 	/* tell device that there's no virtqueue any longer */
561*e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh,
562*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
563*e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
564*e0724c53SAlexey Zaytsev 	    vq->vq_index);
565*e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
566*e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
567*e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
568*e0724c53SAlexey Zaytsev 
569*e0724c53SAlexey Zaytsev 	/* Free the indirect descriptors, if any. */
570*e0724c53SAlexey Zaytsev 	for (i = 0; i < vq->vq_num; i++) {
571*e0724c53SAlexey Zaytsev 		struct vq_entry *entry = &vq->vq_entries[i];
572*e0724c53SAlexey Zaytsev 		if (entry->qe_indirect_descs)
573*e0724c53SAlexey Zaytsev 			virtio_free_indirect(entry);
574*e0724c53SAlexey Zaytsev 	}
575*e0724c53SAlexey Zaytsev 
576*e0724c53SAlexey Zaytsev 	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
577*e0724c53SAlexey Zaytsev 
578*e0724c53SAlexey Zaytsev 	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
579*e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&vq->vq_dma_acch);
580*e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&vq->vq_dma_handle);
581*e0724c53SAlexey Zaytsev 
582*e0724c53SAlexey Zaytsev 	mutex_destroy(&vq->vq_used_lock);
583*e0724c53SAlexey Zaytsev 	mutex_destroy(&vq->vq_avail_lock);
584*e0724c53SAlexey Zaytsev 	mutex_destroy(&vq->vq_freelist_lock);
585*e0724c53SAlexey Zaytsev 
586*e0724c53SAlexey Zaytsev 	kmem_free(vq, sizeof (struct virtqueue));
587*e0724c53SAlexey Zaytsev }
588*e0724c53SAlexey Zaytsev 
589*e0724c53SAlexey Zaytsev /*
590*e0724c53SAlexey Zaytsev  * Free descriptor management.
591*e0724c53SAlexey Zaytsev  */
592*e0724c53SAlexey Zaytsev struct vq_entry *
vq_alloc_entry(struct virtqueue * vq)593*e0724c53SAlexey Zaytsev vq_alloc_entry(struct virtqueue *vq)
594*e0724c53SAlexey Zaytsev {
595*e0724c53SAlexey Zaytsev 	struct vq_entry *qe;
596*e0724c53SAlexey Zaytsev 
597*e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_freelist_lock);
598*e0724c53SAlexey Zaytsev 	if (list_is_empty(&vq->vq_freelist)) {
599*e0724c53SAlexey Zaytsev 		mutex_exit(&vq->vq_freelist_lock);
600*e0724c53SAlexey Zaytsev 		return (NULL);
601*e0724c53SAlexey Zaytsev 	}
602*e0724c53SAlexey Zaytsev 	qe = list_remove_head(&vq->vq_freelist);
603*e0724c53SAlexey Zaytsev 
604*e0724c53SAlexey Zaytsev 	ASSERT(vq->vq_used_entries >= 0);
605*e0724c53SAlexey Zaytsev 	vq->vq_used_entries++;
606*e0724c53SAlexey Zaytsev 
607*e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_freelist_lock);
608*e0724c53SAlexey Zaytsev 
609*e0724c53SAlexey Zaytsev 	qe->qe_next = NULL;
610*e0724c53SAlexey Zaytsev 	qe->qe_indirect_next = 0;
611*e0724c53SAlexey Zaytsev 	(void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
612*e0724c53SAlexey Zaytsev 
613*e0724c53SAlexey Zaytsev 	return (qe);
614*e0724c53SAlexey Zaytsev }
615*e0724c53SAlexey Zaytsev 
616*e0724c53SAlexey Zaytsev void
vq_free_entry(struct virtqueue * vq,struct vq_entry * qe)617*e0724c53SAlexey Zaytsev vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
618*e0724c53SAlexey Zaytsev {
619*e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_freelist_lock);
620*e0724c53SAlexey Zaytsev 
621*e0724c53SAlexey Zaytsev 	list_insert_head(&vq->vq_freelist, qe);
622*e0724c53SAlexey Zaytsev 	vq->vq_used_entries--;
623*e0724c53SAlexey Zaytsev 	ASSERT(vq->vq_used_entries >= 0);
624*e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_freelist_lock);
625*e0724c53SAlexey Zaytsev }
626*e0724c53SAlexey Zaytsev 
627*e0724c53SAlexey Zaytsev /*
628*e0724c53SAlexey Zaytsev  * We (intentionally) don't have a global vq mutex, so you are
629*e0724c53SAlexey Zaytsev  * responsible for external locking to avoid allocting/freeing any
630*e0724c53SAlexey Zaytsev  * entries before using the returned value. Have fun.
631*e0724c53SAlexey Zaytsev  */
632*e0724c53SAlexey Zaytsev uint_t
vq_num_used(struct virtqueue * vq)633*e0724c53SAlexey Zaytsev vq_num_used(struct virtqueue *vq)
634*e0724c53SAlexey Zaytsev {
635*e0724c53SAlexey Zaytsev 	/* vq->vq_freelist_lock would not help here. */
636*e0724c53SAlexey Zaytsev 	return (vq->vq_used_entries);
637*e0724c53SAlexey Zaytsev }
638*e0724c53SAlexey Zaytsev 
639*e0724c53SAlexey Zaytsev static inline void
virtio_ve_set_desc(struct vring_desc * desc,uint64_t paddr,uint32_t len,boolean_t write)640*e0724c53SAlexey Zaytsev virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
641*e0724c53SAlexey Zaytsev     boolean_t write)
642*e0724c53SAlexey Zaytsev {
643*e0724c53SAlexey Zaytsev 	desc->addr = paddr;
644*e0724c53SAlexey Zaytsev 	desc->len = len;
645*e0724c53SAlexey Zaytsev 	desc->next = 0;
646*e0724c53SAlexey Zaytsev 	desc->flags = 0;
647*e0724c53SAlexey Zaytsev 
648*e0724c53SAlexey Zaytsev 	/* 'write' - from the driver's point of view */
649*e0724c53SAlexey Zaytsev 	if (!write)
650*e0724c53SAlexey Zaytsev 		desc->flags = VRING_DESC_F_WRITE;
651*e0724c53SAlexey Zaytsev 
652*e0724c53SAlexey Zaytsev 
653*e0724c53SAlexey Zaytsev }
654*e0724c53SAlexey Zaytsev 
655*e0724c53SAlexey Zaytsev void
virtio_ve_set(struct vq_entry * qe,uint64_t paddr,uint32_t len,boolean_t write)656*e0724c53SAlexey Zaytsev virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
657*e0724c53SAlexey Zaytsev     boolean_t write)
658*e0724c53SAlexey Zaytsev {
659*e0724c53SAlexey Zaytsev 	virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
660*e0724c53SAlexey Zaytsev }
661*e0724c53SAlexey Zaytsev 
662*e0724c53SAlexey Zaytsev void
virtio_ve_add_indirect_buf(struct vq_entry * qe,uint64_t paddr,uint32_t len,boolean_t write)663*e0724c53SAlexey Zaytsev virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
664*e0724c53SAlexey Zaytsev     boolean_t write)
665*e0724c53SAlexey Zaytsev {
666*e0724c53SAlexey Zaytsev 	struct vring_desc *indirect_desc;
667*e0724c53SAlexey Zaytsev 
668*e0724c53SAlexey Zaytsev 	ASSERT(qe->qe_queue->vq_indirect_num);
669*e0724c53SAlexey Zaytsev 	ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
670*e0724c53SAlexey Zaytsev 
671*e0724c53SAlexey Zaytsev 	indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
672*e0724c53SAlexey Zaytsev 	virtio_ve_set_desc(indirect_desc, paddr, len, write);
673*e0724c53SAlexey Zaytsev 	qe->qe_indirect_next++;
674*e0724c53SAlexey Zaytsev }
675*e0724c53SAlexey Zaytsev 
676*e0724c53SAlexey Zaytsev void
virtio_ve_add_cookie(struct vq_entry * qe,ddi_dma_handle_t dma_handle,ddi_dma_cookie_t dma_cookie,unsigned int ncookies,boolean_t write)677*e0724c53SAlexey Zaytsev virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
678*e0724c53SAlexey Zaytsev     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
679*e0724c53SAlexey Zaytsev {
680*e0724c53SAlexey Zaytsev 	int i;
681*e0724c53SAlexey Zaytsev 
682*e0724c53SAlexey Zaytsev 	for (i = 0; i < ncookies; i++) {
683*e0724c53SAlexey Zaytsev 		virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
684*e0724c53SAlexey Zaytsev 		    dma_cookie.dmac_size, write);
685*e0724c53SAlexey Zaytsev 		ddi_dma_nextcookie(dma_handle, &dma_cookie);
686*e0724c53SAlexey Zaytsev 	}
687*e0724c53SAlexey Zaytsev }
688*e0724c53SAlexey Zaytsev 
689*e0724c53SAlexey Zaytsev void
virtio_sync_vq(struct virtqueue * vq)690*e0724c53SAlexey Zaytsev virtio_sync_vq(struct virtqueue *vq)
691*e0724c53SAlexey Zaytsev {
692*e0724c53SAlexey Zaytsev 	struct virtio_softc *vsc = vq->vq_owner;
693*e0724c53SAlexey Zaytsev 
694*e0724c53SAlexey Zaytsev 	/* Make sure the avail ring update hit the buffer */
695*e0724c53SAlexey Zaytsev 	membar_producer();
696*e0724c53SAlexey Zaytsev 
697*e0724c53SAlexey Zaytsev 	vq->vq_avail->idx = vq->vq_avail_idx;
698*e0724c53SAlexey Zaytsev 
699*e0724c53SAlexey Zaytsev 	/* Make sure the avail idx update hits the buffer */
700*e0724c53SAlexey Zaytsev 	membar_producer();
701*e0724c53SAlexey Zaytsev 
702*e0724c53SAlexey Zaytsev 	/* Make sure we see the flags update */
703*e0724c53SAlexey Zaytsev 	membar_consumer();
704*e0724c53SAlexey Zaytsev 
705*e0724c53SAlexey Zaytsev 	if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
706*e0724c53SAlexey Zaytsev 		ddi_put16(vsc->sc_ioh,
707*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
708*e0724c53SAlexey Zaytsev 		    (uint16_t *)(vsc->sc_io_addr +
709*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_NOTIFY),
710*e0724c53SAlexey Zaytsev 		    vq->vq_index);
711*e0724c53SAlexey Zaytsev }
712*e0724c53SAlexey Zaytsev 
713*e0724c53SAlexey Zaytsev void
virtio_push_chain(struct vq_entry * qe,boolean_t sync)714*e0724c53SAlexey Zaytsev virtio_push_chain(struct vq_entry *qe, boolean_t sync)
715*e0724c53SAlexey Zaytsev {
716*e0724c53SAlexey Zaytsev 	struct virtqueue *vq = qe->qe_queue;
717*e0724c53SAlexey Zaytsev 	struct vq_entry *head = qe;
718*e0724c53SAlexey Zaytsev 	struct vring_desc *desc;
719*e0724c53SAlexey Zaytsev 	int idx;
720*e0724c53SAlexey Zaytsev 
721*e0724c53SAlexey Zaytsev 	ASSERT(qe);
722*e0724c53SAlexey Zaytsev 
723*e0724c53SAlexey Zaytsev 	/*
724*e0724c53SAlexey Zaytsev 	 * Bind the descs together, paddr and len should be already
725*e0724c53SAlexey Zaytsev 	 * set with virtio_ve_set
726*e0724c53SAlexey Zaytsev 	 */
727*e0724c53SAlexey Zaytsev 	do {
728*e0724c53SAlexey Zaytsev 		/* Bind the indirect descriptors */
729*e0724c53SAlexey Zaytsev 		if (qe->qe_indirect_next > 1) {
730*e0724c53SAlexey Zaytsev 			uint16_t i = 0;
731*e0724c53SAlexey Zaytsev 
732*e0724c53SAlexey Zaytsev 			/*
733*e0724c53SAlexey Zaytsev 			 * Set the pointer/flags to the
734*e0724c53SAlexey Zaytsev 			 * first indirect descriptor
735*e0724c53SAlexey Zaytsev 			 */
736*e0724c53SAlexey Zaytsev 			virtio_ve_set_desc(qe->qe_desc,
737*e0724c53SAlexey Zaytsev 			    qe->qe_indirect_dma_cookie.dmac_laddress,
738*e0724c53SAlexey Zaytsev 			    sizeof (struct vring_desc) * qe->qe_indirect_next,
739*e0724c53SAlexey Zaytsev 			    B_FALSE);
740*e0724c53SAlexey Zaytsev 			qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
741*e0724c53SAlexey Zaytsev 
742*e0724c53SAlexey Zaytsev 			/* For all but the last one, add the next index/flag */
743*e0724c53SAlexey Zaytsev 			do {
744*e0724c53SAlexey Zaytsev 				desc = &qe->qe_indirect_descs[i];
745*e0724c53SAlexey Zaytsev 				i++;
746*e0724c53SAlexey Zaytsev 
747*e0724c53SAlexey Zaytsev 				desc->flags |= VRING_DESC_F_NEXT;
748*e0724c53SAlexey Zaytsev 				desc->next = i;
749*e0724c53SAlexey Zaytsev 			} while (i < qe->qe_indirect_next - 1);
750*e0724c53SAlexey Zaytsev 
751*e0724c53SAlexey Zaytsev 		}
752*e0724c53SAlexey Zaytsev 
753*e0724c53SAlexey Zaytsev 		if (qe->qe_next) {
754*e0724c53SAlexey Zaytsev 			qe->qe_desc->flags |= VRING_DESC_F_NEXT;
755*e0724c53SAlexey Zaytsev 			qe->qe_desc->next = qe->qe_next->qe_index;
756*e0724c53SAlexey Zaytsev 		}
757*e0724c53SAlexey Zaytsev 
758*e0724c53SAlexey Zaytsev 		qe = qe->qe_next;
759*e0724c53SAlexey Zaytsev 	} while (qe);
760*e0724c53SAlexey Zaytsev 
761*e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_avail_lock);
762*e0724c53SAlexey Zaytsev 	idx = vq->vq_avail_idx;
763*e0724c53SAlexey Zaytsev 	vq->vq_avail_idx++;
764*e0724c53SAlexey Zaytsev 
765*e0724c53SAlexey Zaytsev 	/* Make sure the bits hit the descriptor(s) */
766*e0724c53SAlexey Zaytsev 	membar_producer();
767*e0724c53SAlexey Zaytsev 	vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
768*e0724c53SAlexey Zaytsev 
769*e0724c53SAlexey Zaytsev 	/* Notify the device, if needed. */
770*e0724c53SAlexey Zaytsev 	if (sync)
771*e0724c53SAlexey Zaytsev 		virtio_sync_vq(vq);
772*e0724c53SAlexey Zaytsev 
773*e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_avail_lock);
774*e0724c53SAlexey Zaytsev }
775*e0724c53SAlexey Zaytsev 
776*e0724c53SAlexey Zaytsev /* Get a chain of descriptors from the used ring, if one is available. */
777*e0724c53SAlexey Zaytsev struct vq_entry *
virtio_pull_chain(struct virtqueue * vq,uint32_t * len)778*e0724c53SAlexey Zaytsev virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
779*e0724c53SAlexey Zaytsev {
780*e0724c53SAlexey Zaytsev 	struct vq_entry *head;
781*e0724c53SAlexey Zaytsev 	int slot;
782*e0724c53SAlexey Zaytsev 	int usedidx;
783*e0724c53SAlexey Zaytsev 
784*e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_used_lock);
785*e0724c53SAlexey Zaytsev 
786*e0724c53SAlexey Zaytsev 	/* No used entries? Bye. */
787*e0724c53SAlexey Zaytsev 	if (vq->vq_used_idx == vq->vq_used->idx) {
788*e0724c53SAlexey Zaytsev 		mutex_exit(&vq->vq_used_lock);
789*e0724c53SAlexey Zaytsev 		return (NULL);
790*e0724c53SAlexey Zaytsev 	}
791*e0724c53SAlexey Zaytsev 
792*e0724c53SAlexey Zaytsev 	usedidx = vq->vq_used_idx;
793*e0724c53SAlexey Zaytsev 	vq->vq_used_idx++;
794*e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_used_lock);
795*e0724c53SAlexey Zaytsev 
796*e0724c53SAlexey Zaytsev 	usedidx %= vq->vq_num;
797*e0724c53SAlexey Zaytsev 
798*e0724c53SAlexey Zaytsev 	/* Make sure we do the next step _after_ checking the idx. */
799*e0724c53SAlexey Zaytsev 	membar_consumer();
800*e0724c53SAlexey Zaytsev 
801*e0724c53SAlexey Zaytsev 	slot = vq->vq_used->ring[usedidx].id;
802*e0724c53SAlexey Zaytsev 	*len = vq->vq_used->ring[usedidx].len;
803*e0724c53SAlexey Zaytsev 
804*e0724c53SAlexey Zaytsev 	head = &vq->vq_entries[slot];
805*e0724c53SAlexey Zaytsev 
806*e0724c53SAlexey Zaytsev 	return (head);
807*e0724c53SAlexey Zaytsev }
808*e0724c53SAlexey Zaytsev 
809*e0724c53SAlexey Zaytsev void
virtio_free_chain(struct vq_entry * qe)810*e0724c53SAlexey Zaytsev virtio_free_chain(struct vq_entry *qe)
811*e0724c53SAlexey Zaytsev {
812*e0724c53SAlexey Zaytsev 	struct vq_entry *tmp;
813*e0724c53SAlexey Zaytsev 	struct virtqueue *vq = qe->qe_queue;
814*e0724c53SAlexey Zaytsev 
815*e0724c53SAlexey Zaytsev 	ASSERT(qe);
816*e0724c53SAlexey Zaytsev 
817*e0724c53SAlexey Zaytsev 	do {
818*e0724c53SAlexey Zaytsev 		ASSERT(qe->qe_queue == vq);
819*e0724c53SAlexey Zaytsev 		tmp = qe->qe_next;
820*e0724c53SAlexey Zaytsev 		vq_free_entry(vq, qe);
821*e0724c53SAlexey Zaytsev 		qe = tmp;
822*e0724c53SAlexey Zaytsev 	} while (tmp);
823*e0724c53SAlexey Zaytsev }
824*e0724c53SAlexey Zaytsev 
825*e0724c53SAlexey Zaytsev void
virtio_ventry_stick(struct vq_entry * first,struct vq_entry * second)826*e0724c53SAlexey Zaytsev virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
827*e0724c53SAlexey Zaytsev {
828*e0724c53SAlexey Zaytsev 	first->qe_next = second;
829*e0724c53SAlexey Zaytsev }
830*e0724c53SAlexey Zaytsev 
831*e0724c53SAlexey Zaytsev static int
virtio_register_msi(struct virtio_softc * sc,struct virtio_int_handler * config_handler,struct virtio_int_handler vq_handlers[],int intr_types)832*e0724c53SAlexey Zaytsev virtio_register_msi(struct virtio_softc *sc,
833*e0724c53SAlexey Zaytsev     struct virtio_int_handler *config_handler,
834*e0724c53SAlexey Zaytsev     struct virtio_int_handler vq_handlers[],
835*e0724c53SAlexey Zaytsev     int intr_types)
836*e0724c53SAlexey Zaytsev {
837*e0724c53SAlexey Zaytsev 	int count, actual;
838*e0724c53SAlexey Zaytsev 	int int_type;
839*e0724c53SAlexey Zaytsev 	int i;
840*e0724c53SAlexey Zaytsev 	int handler_count;
841*e0724c53SAlexey Zaytsev 	int ret;
842*e0724c53SAlexey Zaytsev 
843*e0724c53SAlexey Zaytsev 	/* If both MSI and MSI-x are reported, prefer MSI-x. */
844*e0724c53SAlexey Zaytsev 	int_type = DDI_INTR_TYPE_MSI;
845*e0724c53SAlexey Zaytsev 	if (intr_types & DDI_INTR_TYPE_MSIX)
846*e0724c53SAlexey Zaytsev 		int_type = DDI_INTR_TYPE_MSIX;
847*e0724c53SAlexey Zaytsev 
848*e0724c53SAlexey Zaytsev 	/* Walk the handler table to get the number of handlers. */
849*e0724c53SAlexey Zaytsev 	for (handler_count = 0;
850*e0724c53SAlexey Zaytsev 	    vq_handlers && vq_handlers[handler_count].vh_func;
851*e0724c53SAlexey Zaytsev 	    handler_count++)
852*e0724c53SAlexey Zaytsev 		;
853*e0724c53SAlexey Zaytsev 
854*e0724c53SAlexey Zaytsev 	/* +1 if there is a config change handler. */
855*e0724c53SAlexey Zaytsev 	if (config_handler)
856*e0724c53SAlexey Zaytsev 		handler_count++;
857*e0724c53SAlexey Zaytsev 
858*e0724c53SAlexey Zaytsev 	/* Number of MSIs supported by the device. */
859*e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
860*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
861*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
862*e0724c53SAlexey Zaytsev 		return (ret);
863*e0724c53SAlexey Zaytsev 	}
864*e0724c53SAlexey Zaytsev 
865*e0724c53SAlexey Zaytsev 	/*
866*e0724c53SAlexey Zaytsev 	 * Those who try to register more handlers then the device
867*e0724c53SAlexey Zaytsev 	 * supports shall suffer.
868*e0724c53SAlexey Zaytsev 	 */
869*e0724c53SAlexey Zaytsev 	ASSERT(handler_count <= count);
870*e0724c53SAlexey Zaytsev 
871*e0724c53SAlexey Zaytsev 	sc->sc_intr_htable = kmem_zalloc(
872*e0724c53SAlexey Zaytsev 	    sizeof (ddi_intr_handle_t) * handler_count, KM_SLEEP);
873*e0724c53SAlexey Zaytsev 
874*e0724c53SAlexey Zaytsev 	ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
875*e0724c53SAlexey Zaytsev 	    handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
876*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
877*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
878*e0724c53SAlexey Zaytsev 		goto out_msi_alloc;
879*e0724c53SAlexey Zaytsev 	}
880*e0724c53SAlexey Zaytsev 
881*e0724c53SAlexey Zaytsev 	if (actual != handler_count) {
882*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
883*e0724c53SAlexey Zaytsev 		    "Not enough MSI available: need %d, available %d",
884*e0724c53SAlexey Zaytsev 		    handler_count, actual);
885*e0724c53SAlexey Zaytsev 		goto out_msi_available;
886*e0724c53SAlexey Zaytsev 	}
887*e0724c53SAlexey Zaytsev 
888*e0724c53SAlexey Zaytsev 	sc->sc_intr_num = handler_count;
889*e0724c53SAlexey Zaytsev 	sc->sc_intr_config = B_FALSE;
890*e0724c53SAlexey Zaytsev 	if (config_handler) {
891*e0724c53SAlexey Zaytsev 		sc->sc_intr_config = B_TRUE;
892*e0724c53SAlexey Zaytsev 	}
893*e0724c53SAlexey Zaytsev 
894*e0724c53SAlexey Zaytsev 	/* Assume they are all same priority */
895*e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
896*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
897*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
898*e0724c53SAlexey Zaytsev 		goto out_msi_prio;
899*e0724c53SAlexey Zaytsev 	}
900*e0724c53SAlexey Zaytsev 
901*e0724c53SAlexey Zaytsev 	/* Add the vq handlers */
902*e0724c53SAlexey Zaytsev 	for (i = 0; vq_handlers[i].vh_func; i++) {
903*e0724c53SAlexey Zaytsev 		ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
904*e0724c53SAlexey Zaytsev 		    vq_handlers[i].vh_func,
905*e0724c53SAlexey Zaytsev 		    sc, vq_handlers[i].vh_priv);
906*e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
907*e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
908*e0724c53SAlexey Zaytsev 			    "ddi_intr_add_handler failed");
909*e0724c53SAlexey Zaytsev 			/* Remove the handlers that succeeded. */
910*e0724c53SAlexey Zaytsev 			while (--i >= 0) {
911*e0724c53SAlexey Zaytsev 				(void) ddi_intr_remove_handler(
912*e0724c53SAlexey Zaytsev 				    sc->sc_intr_htable[i]);
913*e0724c53SAlexey Zaytsev 			}
914*e0724c53SAlexey Zaytsev 			goto out_add_handlers;
915*e0724c53SAlexey Zaytsev 		}
916*e0724c53SAlexey Zaytsev 	}
917*e0724c53SAlexey Zaytsev 
918*e0724c53SAlexey Zaytsev 	/* Don't forget the config handler */
919*e0724c53SAlexey Zaytsev 	if (config_handler) {
920*e0724c53SAlexey Zaytsev 		ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
921*e0724c53SAlexey Zaytsev 		    config_handler->vh_func,
922*e0724c53SAlexey Zaytsev 		    sc, config_handler->vh_priv);
923*e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
924*e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
925*e0724c53SAlexey Zaytsev 			    "ddi_intr_add_handler failed");
926*e0724c53SAlexey Zaytsev 			/* Remove the handlers that succeeded. */
927*e0724c53SAlexey Zaytsev 			while (--i >= 0) {
928*e0724c53SAlexey Zaytsev 				(void) ddi_intr_remove_handler(
929*e0724c53SAlexey Zaytsev 				    sc->sc_intr_htable[i]);
930*e0724c53SAlexey Zaytsev 			}
931*e0724c53SAlexey Zaytsev 			goto out_add_handlers;
932*e0724c53SAlexey Zaytsev 		}
933*e0724c53SAlexey Zaytsev 	}
934*e0724c53SAlexey Zaytsev 
935*e0724c53SAlexey Zaytsev 	/* We know we are using MSI, so set the config offset. */
936*e0724c53SAlexey Zaytsev 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
937*e0724c53SAlexey Zaytsev 
938*e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_cap(sc->sc_intr_htable[0],
939*e0724c53SAlexey Zaytsev 	    &sc->sc_intr_cap);
940*e0724c53SAlexey Zaytsev 	/* Just in case. */
941*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS)
942*e0724c53SAlexey Zaytsev 		sc->sc_intr_cap = 0;
943*e0724c53SAlexey Zaytsev 
944*e0724c53SAlexey Zaytsev out_add_handlers:
945*e0724c53SAlexey Zaytsev out_msi_prio:
946*e0724c53SAlexey Zaytsev out_msi_available:
947*e0724c53SAlexey Zaytsev 	for (i = 0; i < actual; i++)
948*e0724c53SAlexey Zaytsev 		(void) ddi_intr_free(sc->sc_intr_htable[i]);
949*e0724c53SAlexey Zaytsev out_msi_alloc:
950*e0724c53SAlexey Zaytsev 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
951*e0724c53SAlexey Zaytsev 
952*e0724c53SAlexey Zaytsev 	return (ret);
953*e0724c53SAlexey Zaytsev }
954*e0724c53SAlexey Zaytsev 
955*e0724c53SAlexey Zaytsev struct virtio_handler_container {
956*e0724c53SAlexey Zaytsev 	int nhandlers;
957*e0724c53SAlexey Zaytsev 	struct virtio_int_handler config_handler;
958*e0724c53SAlexey Zaytsev 	struct virtio_int_handler vq_handlers[];
959*e0724c53SAlexey Zaytsev };
960*e0724c53SAlexey Zaytsev 
961*e0724c53SAlexey Zaytsev uint_t
virtio_intx_dispatch(caddr_t arg1,caddr_t arg2)962*e0724c53SAlexey Zaytsev virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
963*e0724c53SAlexey Zaytsev {
964*e0724c53SAlexey Zaytsev 	struct virtio_softc *sc = (void *)arg1;
965*e0724c53SAlexey Zaytsev 	struct virtio_handler_container *vhc = (void *)arg2;
966*e0724c53SAlexey Zaytsev 	uint8_t isr_status;
967*e0724c53SAlexey Zaytsev 	int i;
968*e0724c53SAlexey Zaytsev 
969*e0724c53SAlexey Zaytsev 	isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
970*e0724c53SAlexey Zaytsev 	    VIRTIO_CONFIG_ISR_STATUS));
971*e0724c53SAlexey Zaytsev 
972*e0724c53SAlexey Zaytsev 	if (!isr_status)
973*e0724c53SAlexey Zaytsev 		return (DDI_INTR_UNCLAIMED);
974*e0724c53SAlexey Zaytsev 
975*e0724c53SAlexey Zaytsev 	if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
976*e0724c53SAlexey Zaytsev 	    vhc->config_handler.vh_func) {
977*e0724c53SAlexey Zaytsev 		vhc->config_handler.vh_func((void *)sc,
978*e0724c53SAlexey Zaytsev 		    vhc->config_handler.vh_priv);
979*e0724c53SAlexey Zaytsev 	}
980*e0724c53SAlexey Zaytsev 
981*e0724c53SAlexey Zaytsev 	/* Notify all handlers */
982*e0724c53SAlexey Zaytsev 	for (i = 0; i < vhc->nhandlers; i++) {
983*e0724c53SAlexey Zaytsev 		vhc->vq_handlers[i].vh_func((void *)sc,
984*e0724c53SAlexey Zaytsev 		    vhc->vq_handlers[i].vh_priv);
985*e0724c53SAlexey Zaytsev 	}
986*e0724c53SAlexey Zaytsev 
987*e0724c53SAlexey Zaytsev 	return (DDI_INTR_CLAIMED);
988*e0724c53SAlexey Zaytsev }
989*e0724c53SAlexey Zaytsev 
990*e0724c53SAlexey Zaytsev /*
991*e0724c53SAlexey Zaytsev  * config_handler and vq_handlers may be allocated on stack.
992*e0724c53SAlexey Zaytsev  * Take precautions not to loose them.
993*e0724c53SAlexey Zaytsev  */
994*e0724c53SAlexey Zaytsev static int
virtio_register_intx(struct virtio_softc * sc,struct virtio_int_handler * config_handler,struct virtio_int_handler vq_handlers[])995*e0724c53SAlexey Zaytsev virtio_register_intx(struct virtio_softc *sc,
996*e0724c53SAlexey Zaytsev     struct virtio_int_handler *config_handler,
997*e0724c53SAlexey Zaytsev     struct virtio_int_handler vq_handlers[])
998*e0724c53SAlexey Zaytsev {
999*e0724c53SAlexey Zaytsev 	int vq_handler_count;
1000*e0724c53SAlexey Zaytsev 	int config_handler_count = 0;
1001*e0724c53SAlexey Zaytsev 	int actual;
1002*e0724c53SAlexey Zaytsev 	struct virtio_handler_container *vhc;
1003*e0724c53SAlexey Zaytsev 	int ret = DDI_FAILURE;
1004*e0724c53SAlexey Zaytsev 
1005*e0724c53SAlexey Zaytsev 	/* Walk the handler table to get the number of handlers. */
1006*e0724c53SAlexey Zaytsev 	for (vq_handler_count = 0;
1007*e0724c53SAlexey Zaytsev 	    vq_handlers && vq_handlers[vq_handler_count].vh_func;
1008*e0724c53SAlexey Zaytsev 	    vq_handler_count++)
1009*e0724c53SAlexey Zaytsev 		;
1010*e0724c53SAlexey Zaytsev 
1011*e0724c53SAlexey Zaytsev 	if (config_handler)
1012*e0724c53SAlexey Zaytsev 		config_handler_count = 1;
1013*e0724c53SAlexey Zaytsev 
1014*e0724c53SAlexey Zaytsev 	vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1015*e0724c53SAlexey Zaytsev 	    sizeof (struct virtio_int_handler) * vq_handler_count,
1016*e0724c53SAlexey Zaytsev 	    KM_SLEEP);
1017*e0724c53SAlexey Zaytsev 
1018*e0724c53SAlexey Zaytsev 	vhc->nhandlers = vq_handler_count;
1019*e0724c53SAlexey Zaytsev 	(void) memcpy(vhc->vq_handlers, vq_handlers,
1020*e0724c53SAlexey Zaytsev 	    sizeof (struct virtio_int_handler) * vq_handler_count);
1021*e0724c53SAlexey Zaytsev 
1022*e0724c53SAlexey Zaytsev 	if (config_handler) {
1023*e0724c53SAlexey Zaytsev 		(void) memcpy(&vhc->config_handler, config_handler,
1024*e0724c53SAlexey Zaytsev 		    sizeof (struct virtio_int_handler));
1025*e0724c53SAlexey Zaytsev 	}
1026*e0724c53SAlexey Zaytsev 
1027*e0724c53SAlexey Zaytsev 	/* Just a single entry for a single interrupt. */
1028*e0724c53SAlexey Zaytsev 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1029*e0724c53SAlexey Zaytsev 
1030*e0724c53SAlexey Zaytsev 	ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1031*e0724c53SAlexey Zaytsev 	    DDI_INTR_TYPE_FIXED, 0, 1, &actual,
1032*e0724c53SAlexey Zaytsev 	    DDI_INTR_ALLOC_NORMAL);
1033*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1034*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
1035*e0724c53SAlexey Zaytsev 		    "Failed to allocate a fixed interrupt: %d", ret);
1036*e0724c53SAlexey Zaytsev 		goto out_int_alloc;
1037*e0724c53SAlexey Zaytsev 	}
1038*e0724c53SAlexey Zaytsev 
1039*e0724c53SAlexey Zaytsev 	ASSERT(actual == 1);
1040*e0724c53SAlexey Zaytsev 	sc->sc_intr_num = 1;
1041*e0724c53SAlexey Zaytsev 
1042*e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1043*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1044*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1045*e0724c53SAlexey Zaytsev 		goto out_prio;
1046*e0724c53SAlexey Zaytsev 	}
1047*e0724c53SAlexey Zaytsev 
1048*e0724c53SAlexey Zaytsev 	ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1049*e0724c53SAlexey Zaytsev 	    virtio_intx_dispatch, sc, vhc);
1050*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1051*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1052*e0724c53SAlexey Zaytsev 		goto out_add_handlers;
1053*e0724c53SAlexey Zaytsev 	}
1054*e0724c53SAlexey Zaytsev 
1055*e0724c53SAlexey Zaytsev 	/* We know we are not using MSI, so set the config offset. */
1056*e0724c53SAlexey Zaytsev 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1057*e0724c53SAlexey Zaytsev 
1058*e0724c53SAlexey Zaytsev 	return (DDI_SUCCESS);
1059*e0724c53SAlexey Zaytsev 
1060*e0724c53SAlexey Zaytsev out_add_handlers:
1061*e0724c53SAlexey Zaytsev out_prio:
1062*e0724c53SAlexey Zaytsev 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
1063*e0724c53SAlexey Zaytsev out_int_alloc:
1064*e0724c53SAlexey Zaytsev 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1065*e0724c53SAlexey Zaytsev 	kmem_free(vhc, sizeof (struct virtio_int_handler) *
1066*e0724c53SAlexey Zaytsev 	    (vq_handler_count + config_handler_count));
1067*e0724c53SAlexey Zaytsev 	return (ret);
1068*e0724c53SAlexey Zaytsev }
1069*e0724c53SAlexey Zaytsev 
1070*e0724c53SAlexey Zaytsev /*
1071*e0724c53SAlexey Zaytsev  * We find out if we support MSI during this, and the register layout
1072*e0724c53SAlexey Zaytsev  * depends on the MSI (doh). Don't acces the device specific bits in
1073*e0724c53SAlexey Zaytsev  * BAR 0 before calling it!
1074*e0724c53SAlexey Zaytsev  */
1075*e0724c53SAlexey Zaytsev int
virtio_register_ints(struct virtio_softc * sc,struct virtio_int_handler * config_handler,struct virtio_int_handler vq_handlers[])1076*e0724c53SAlexey Zaytsev virtio_register_ints(struct virtio_softc *sc,
1077*e0724c53SAlexey Zaytsev     struct virtio_int_handler *config_handler,
1078*e0724c53SAlexey Zaytsev     struct virtio_int_handler vq_handlers[])
1079*e0724c53SAlexey Zaytsev {
1080*e0724c53SAlexey Zaytsev 	int ret;
1081*e0724c53SAlexey Zaytsev 	int intr_types;
1082*e0724c53SAlexey Zaytsev 
1083*e0724c53SAlexey Zaytsev 	/* Determine which types of interrupts are supported */
1084*e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1085*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1086*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1087*e0724c53SAlexey Zaytsev 		goto out_inttype;
1088*e0724c53SAlexey Zaytsev 	}
1089*e0724c53SAlexey Zaytsev 
1090*e0724c53SAlexey Zaytsev 	/* If we have msi, let's use them. */
1091*e0724c53SAlexey Zaytsev 	if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1092*e0724c53SAlexey Zaytsev 		ret = virtio_register_msi(sc, config_handler,
1093*e0724c53SAlexey Zaytsev 		    vq_handlers, intr_types);
1094*e0724c53SAlexey Zaytsev 		if (!ret)
1095*e0724c53SAlexey Zaytsev 			return (0);
1096*e0724c53SAlexey Zaytsev 	}
1097*e0724c53SAlexey Zaytsev 
1098*e0724c53SAlexey Zaytsev 	/* Fall back to old-fashioned interrupts. */
1099*e0724c53SAlexey Zaytsev 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1100*e0724c53SAlexey Zaytsev 		dev_debug(sc->sc_dev, CE_WARN,
1101*e0724c53SAlexey Zaytsev 		    "Using legacy interrupts");
1102*e0724c53SAlexey Zaytsev 
1103*e0724c53SAlexey Zaytsev 		return (virtio_register_intx(sc, config_handler, vq_handlers));
1104*e0724c53SAlexey Zaytsev 	}
1105*e0724c53SAlexey Zaytsev 
1106*e0724c53SAlexey Zaytsev 	dev_err(sc->sc_dev, CE_WARN,
1107*e0724c53SAlexey Zaytsev 	    "MSI failed and fixed interrupts not supported. Giving up.");
1108*e0724c53SAlexey Zaytsev 	ret = DDI_FAILURE;
1109*e0724c53SAlexey Zaytsev 
1110*e0724c53SAlexey Zaytsev out_inttype:
1111*e0724c53SAlexey Zaytsev 	return (ret);
1112*e0724c53SAlexey Zaytsev }
1113*e0724c53SAlexey Zaytsev 
1114*e0724c53SAlexey Zaytsev 
1115*e0724c53SAlexey Zaytsev static int
virtio_enable_msi(struct virtio_softc * sc)1116*e0724c53SAlexey Zaytsev virtio_enable_msi(struct virtio_softc *sc)
1117*e0724c53SAlexey Zaytsev {
1118*e0724c53SAlexey Zaytsev 	int ret, i;
1119*e0724c53SAlexey Zaytsev 	int vq_handler_count = sc->sc_intr_num;
1120*e0724c53SAlexey Zaytsev 
1121*e0724c53SAlexey Zaytsev 	/* Number of handlers, not counting the counfig. */
1122*e0724c53SAlexey Zaytsev 	if (sc->sc_intr_config)
1123*e0724c53SAlexey Zaytsev 		vq_handler_count--;
1124*e0724c53SAlexey Zaytsev 
1125*e0724c53SAlexey Zaytsev 	/* Enable the iterrupts. Either the whole block, or one by one. */
1126*e0724c53SAlexey Zaytsev 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1127*e0724c53SAlexey Zaytsev 		ret = ddi_intr_block_enable(sc->sc_intr_htable,
1128*e0724c53SAlexey Zaytsev 		    sc->sc_intr_num);
1129*e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
1130*e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
1131*e0724c53SAlexey Zaytsev 			    "Failed to enable MSI, falling back to INTx");
1132*e0724c53SAlexey Zaytsev 			goto out_enable;
1133*e0724c53SAlexey Zaytsev 		}
1134*e0724c53SAlexey Zaytsev 	} else {
1135*e0724c53SAlexey Zaytsev 		for (i = 0; i < sc->sc_intr_num; i++) {
1136*e0724c53SAlexey Zaytsev 			ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1137*e0724c53SAlexey Zaytsev 			if (ret != DDI_SUCCESS) {
1138*e0724c53SAlexey Zaytsev 				dev_err(sc->sc_dev, CE_WARN,
1139*e0724c53SAlexey Zaytsev 				    "Failed to enable MSI %d, "
1140*e0724c53SAlexey Zaytsev 				    "falling back to INTx", i);
1141*e0724c53SAlexey Zaytsev 
1142*e0724c53SAlexey Zaytsev 				while (--i >= 0) {
1143*e0724c53SAlexey Zaytsev 					(void) ddi_intr_disable(
1144*e0724c53SAlexey Zaytsev 					    sc->sc_intr_htable[i]);
1145*e0724c53SAlexey Zaytsev 				}
1146*e0724c53SAlexey Zaytsev 				goto out_enable;
1147*e0724c53SAlexey Zaytsev 			}
1148*e0724c53SAlexey Zaytsev 		}
1149*e0724c53SAlexey Zaytsev 	}
1150*e0724c53SAlexey Zaytsev 
1151*e0724c53SAlexey Zaytsev 	/* Bind the allocated MSI to the queues and config */
1152*e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_handler_count; i++) {
1153*e0724c53SAlexey Zaytsev 		int check;
1154*e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1155*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1156*e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1157*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_SELECT), i);
1158*e0724c53SAlexey Zaytsev 
1159*e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1160*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1161*e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1162*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_VECTOR), i);
1163*e0724c53SAlexey Zaytsev 
1164*e0724c53SAlexey Zaytsev 		check = ddi_get16(sc->sc_ioh,
1165*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1166*e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1167*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_VECTOR));
1168*e0724c53SAlexey Zaytsev 		if (check != i) {
1169*e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler"
1170*e0724c53SAlexey Zaytsev 			    "for VQ %d, MSI %d. Check = %x", i, i, check);
1171*e0724c53SAlexey Zaytsev 			ret = ENODEV;
1172*e0724c53SAlexey Zaytsev 			goto out_bind;
1173*e0724c53SAlexey Zaytsev 		}
1174*e0724c53SAlexey Zaytsev 	}
1175*e0724c53SAlexey Zaytsev 
1176*e0724c53SAlexey Zaytsev 	if (sc->sc_intr_config) {
1177*e0724c53SAlexey Zaytsev 		int check;
1178*e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1179*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1180*e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1181*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_CONFIG_VECTOR), i);
1182*e0724c53SAlexey Zaytsev 
1183*e0724c53SAlexey Zaytsev 		check = ddi_get16(sc->sc_ioh,
1184*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1185*e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1186*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_CONFIG_VECTOR));
1187*e0724c53SAlexey Zaytsev 		if (check != i) {
1188*e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1189*e0724c53SAlexey Zaytsev 			    "for Config updates, MSI %d", i);
1190*e0724c53SAlexey Zaytsev 			ret = ENODEV;
1191*e0724c53SAlexey Zaytsev 			goto out_bind;
1192*e0724c53SAlexey Zaytsev 		}
1193*e0724c53SAlexey Zaytsev 	}
1194*e0724c53SAlexey Zaytsev 
1195*e0724c53SAlexey Zaytsev 	return (DDI_SUCCESS);
1196*e0724c53SAlexey Zaytsev 
1197*e0724c53SAlexey Zaytsev out_bind:
1198*e0724c53SAlexey Zaytsev 	/* Unbind the vqs */
1199*e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_handler_count - 1; i++) {
1200*e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1201*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1202*e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1203*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_SELECT), i);
1204*e0724c53SAlexey Zaytsev 
1205*e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1206*e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1207*e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1208*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_VECTOR),
1209*e0724c53SAlexey Zaytsev 		    VIRTIO_MSI_NO_VECTOR);
1210*e0724c53SAlexey Zaytsev 	}
1211*e0724c53SAlexey Zaytsev 	/* And the config */
1212*e0724c53SAlexey Zaytsev 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1213*e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1214*e0724c53SAlexey Zaytsev 	    VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1215*e0724c53SAlexey Zaytsev 
1216*e0724c53SAlexey Zaytsev 	ret = DDI_FAILURE;
1217*e0724c53SAlexey Zaytsev 
1218*e0724c53SAlexey Zaytsev out_enable:
1219*e0724c53SAlexey Zaytsev 	return (ret);
1220*e0724c53SAlexey Zaytsev }
1221*e0724c53SAlexey Zaytsev 
virtio_enable_intx(struct virtio_softc * sc)1222*e0724c53SAlexey Zaytsev static int virtio_enable_intx(struct virtio_softc *sc)
1223*e0724c53SAlexey Zaytsev {
1224*e0724c53SAlexey Zaytsev 	int ret;
1225*e0724c53SAlexey Zaytsev 
1226*e0724c53SAlexey Zaytsev 	ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1227*e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS)
1228*e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
1229*e0724c53SAlexey Zaytsev 		    "Failed to enable interrupt: %d", ret);
1230*e0724c53SAlexey Zaytsev 	return (ret);
1231*e0724c53SAlexey Zaytsev }
1232*e0724c53SAlexey Zaytsev 
1233*e0724c53SAlexey Zaytsev /*
1234*e0724c53SAlexey Zaytsev  * We can't enable/disable individual handlers in the INTx case so do
1235*e0724c53SAlexey Zaytsev  * the whole bunch even in the msi case.
1236*e0724c53SAlexey Zaytsev  */
1237*e0724c53SAlexey Zaytsev int
virtio_enable_ints(struct virtio_softc * sc)1238*e0724c53SAlexey Zaytsev virtio_enable_ints(struct virtio_softc *sc)
1239*e0724c53SAlexey Zaytsev {
1240*e0724c53SAlexey Zaytsev 
1241*e0724c53SAlexey Zaytsev 	/* See if we are using MSI. */
1242*e0724c53SAlexey Zaytsev 	if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI)
1243*e0724c53SAlexey Zaytsev 		return (virtio_enable_msi(sc));
1244*e0724c53SAlexey Zaytsev 
1245*e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI);
1246*e0724c53SAlexey Zaytsev 
1247*e0724c53SAlexey Zaytsev 	return (virtio_enable_intx(sc));
1248*e0724c53SAlexey Zaytsev }
1249*e0724c53SAlexey Zaytsev 
1250*e0724c53SAlexey Zaytsev void
virtio_release_ints(struct virtio_softc * sc)1251*e0724c53SAlexey Zaytsev virtio_release_ints(struct virtio_softc *sc)
1252*e0724c53SAlexey Zaytsev {
1253*e0724c53SAlexey Zaytsev 	int i;
1254*e0724c53SAlexey Zaytsev 	int ret;
1255*e0724c53SAlexey Zaytsev 
1256*e0724c53SAlexey Zaytsev 	/* We were running with MSI, unbind them. */
1257*e0724c53SAlexey Zaytsev 	if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
1258*e0724c53SAlexey Zaytsev 		/* Unbind all vqs */
1259*e0724c53SAlexey Zaytsev 		for (i = 0; i < sc->sc_nvqs; i++) {
1260*e0724c53SAlexey Zaytsev 			ddi_put16(sc->sc_ioh,
1261*e0724c53SAlexey Zaytsev 			    /* LINTED E_BAD_PTR_CAST_ALIGN */
1262*e0724c53SAlexey Zaytsev 			    (uint16_t *)(sc->sc_io_addr +
1263*e0724c53SAlexey Zaytsev 			    VIRTIO_CONFIG_QUEUE_SELECT), i);
1264*e0724c53SAlexey Zaytsev 
1265*e0724c53SAlexey Zaytsev 			ddi_put16(sc->sc_ioh,
1266*e0724c53SAlexey Zaytsev 			    /* LINTED E_BAD_PTR_CAST_ALIGN */
1267*e0724c53SAlexey Zaytsev 			    (uint16_t *)(sc->sc_io_addr +
1268*e0724c53SAlexey Zaytsev 			    VIRTIO_CONFIG_QUEUE_VECTOR),
1269*e0724c53SAlexey Zaytsev 			    VIRTIO_MSI_NO_VECTOR);
1270*e0724c53SAlexey Zaytsev 		}
1271*e0724c53SAlexey Zaytsev 		/* And the config */
1272*e0724c53SAlexey Zaytsev 		/* LINTED E_BAD_PTR_CAST_ALIGN */
1273*e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1274*e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_CONFIG_VECTOR),
1275*e0724c53SAlexey Zaytsev 		    VIRTIO_MSI_NO_VECTOR);
1276*e0724c53SAlexey Zaytsev 
1277*e0724c53SAlexey Zaytsev 	}
1278*e0724c53SAlexey Zaytsev 
1279*e0724c53SAlexey Zaytsev 	/* Disable the iterrupts. Either the whole block, or one by one. */
1280*e0724c53SAlexey Zaytsev 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1281*e0724c53SAlexey Zaytsev 		ret = ddi_intr_block_disable(sc->sc_intr_htable,
1282*e0724c53SAlexey Zaytsev 		    sc->sc_intr_num);
1283*e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
1284*e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
1285*e0724c53SAlexey Zaytsev 			    "Failed to disable MSIs, won't be able to"
1286*e0724c53SAlexey Zaytsev 			    "reuse next time");
1287*e0724c53SAlexey Zaytsev 		}
1288*e0724c53SAlexey Zaytsev 	} else {
1289*e0724c53SAlexey Zaytsev 		for (i = 0; i < sc->sc_intr_num; i++) {
1290*e0724c53SAlexey Zaytsev 			ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1291*e0724c53SAlexey Zaytsev 			if (ret != DDI_SUCCESS) {
1292*e0724c53SAlexey Zaytsev 				dev_err(sc->sc_dev, CE_WARN,
1293*e0724c53SAlexey Zaytsev 				    "Failed to disable interrupt %d, "
1294*e0724c53SAlexey Zaytsev 				    "won't be able to reuse", i);
1295*e0724c53SAlexey Zaytsev 
1296*e0724c53SAlexey Zaytsev 			}
1297*e0724c53SAlexey Zaytsev 		}
1298*e0724c53SAlexey Zaytsev 	}
1299*e0724c53SAlexey Zaytsev 
1300*e0724c53SAlexey Zaytsev 
1301*e0724c53SAlexey Zaytsev 	for (i = 0; i < sc->sc_intr_num; i++) {
1302*e0724c53SAlexey Zaytsev 		(void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1303*e0724c53SAlexey Zaytsev 	}
1304*e0724c53SAlexey Zaytsev 
1305*e0724c53SAlexey Zaytsev 	for (i = 0; i < sc->sc_intr_num; i++)
1306*e0724c53SAlexey Zaytsev 		(void) ddi_intr_free(sc->sc_intr_htable[i]);
1307*e0724c53SAlexey Zaytsev 
1308*e0724c53SAlexey Zaytsev 	kmem_free(sc->sc_intr_htable,
1309*e0724c53SAlexey Zaytsev 	    sizeof (ddi_intr_handle_t) * sc->sc_intr_num);
1310*e0724c53SAlexey Zaytsev 
1311*e0724c53SAlexey Zaytsev 
1312*e0724c53SAlexey Zaytsev 	/* After disabling interrupts, the config offset is non-MSI. */
1313*e0724c53SAlexey Zaytsev 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1314*e0724c53SAlexey Zaytsev }
1315*e0724c53SAlexey Zaytsev 
1316*e0724c53SAlexey Zaytsev /*
1317*e0724c53SAlexey Zaytsev  * Module linkage information for the kernel.
1318*e0724c53SAlexey Zaytsev  */
1319*e0724c53SAlexey Zaytsev static struct modlmisc modlmisc = {
1320*e0724c53SAlexey Zaytsev 	&mod_miscops, /* Type of module */
1321*e0724c53SAlexey Zaytsev 	"VirtIO common library module",
1322*e0724c53SAlexey Zaytsev };
1323*e0724c53SAlexey Zaytsev 
1324*e0724c53SAlexey Zaytsev static struct modlinkage modlinkage = {
1325*e0724c53SAlexey Zaytsev 	MODREV_1,
1326*e0724c53SAlexey Zaytsev 	{
1327*e0724c53SAlexey Zaytsev 		(void *)&modlmisc,
1328*e0724c53SAlexey Zaytsev 		NULL
1329*e0724c53SAlexey Zaytsev 	}
1330*e0724c53SAlexey Zaytsev };
1331*e0724c53SAlexey Zaytsev 
1332*e0724c53SAlexey Zaytsev int
_init(void)1333*e0724c53SAlexey Zaytsev _init(void)
1334*e0724c53SAlexey Zaytsev {
1335*e0724c53SAlexey Zaytsev 	return (mod_install(&modlinkage));
1336*e0724c53SAlexey Zaytsev }
1337*e0724c53SAlexey Zaytsev 
1338*e0724c53SAlexey Zaytsev int
_fini(void)1339*e0724c53SAlexey Zaytsev _fini(void)
1340*e0724c53SAlexey Zaytsev {
1341*e0724c53SAlexey Zaytsev 	return (mod_remove(&modlinkage));
1342*e0724c53SAlexey Zaytsev }
1343*e0724c53SAlexey Zaytsev 
1344*e0724c53SAlexey Zaytsev int
_info(struct modinfo * modinfop)1345*e0724c53SAlexey Zaytsev _info(struct modinfo *modinfop)
1346*e0724c53SAlexey Zaytsev {
1347*e0724c53SAlexey Zaytsev 	return (mod_info(&modlinkage, modinfop));
1348*e0724c53SAlexey Zaytsev }
1349