xref: /titanic_50/usr/src/uts/common/io/virtio/virtio.c (revision 17ad7f9fd28ceea21aea94421cb8ada963285765)
1e0724c53SAlexey Zaytsev /*
2e0724c53SAlexey Zaytsev  * CDDL HEADER START
3e0724c53SAlexey Zaytsev  *
4e0724c53SAlexey Zaytsev  * The contents of this file are subject to the terms of the
5e0724c53SAlexey Zaytsev  * Common Development and Distribution License (the "License").
6e0724c53SAlexey Zaytsev  * You may not use this file except in compliance with the License.
7e0724c53SAlexey Zaytsev  *
8e0724c53SAlexey Zaytsev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9e0724c53SAlexey Zaytsev  * or http://www.opensolaris.org/os/licensing.
10e0724c53SAlexey Zaytsev  * See the License for the specific language governing permissions
11e0724c53SAlexey Zaytsev  * and limitations under the License.
12e0724c53SAlexey Zaytsev  *
13e0724c53SAlexey Zaytsev  * When distributing Covered Code, include this CDDL HEADER in each
14e0724c53SAlexey Zaytsev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15e0724c53SAlexey Zaytsev  * If applicable, add the following below this CDDL HEADER, with the
16e0724c53SAlexey Zaytsev  * fields enclosed by brackets "[]" replaced with your own identifying
17e0724c53SAlexey Zaytsev  * information: Portions Copyright [yyyy] [name of copyright owner]
18e0724c53SAlexey Zaytsev  *
19e0724c53SAlexey Zaytsev  * CDDL HEADER END
20e0724c53SAlexey Zaytsev  */
21e0724c53SAlexey Zaytsev 
22e0724c53SAlexey Zaytsev /*
238a324c92SDan McDonald  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
24e0724c53SAlexey Zaytsev  * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
25e0724c53SAlexey Zaytsev  */
26e0724c53SAlexey Zaytsev 
27e0724c53SAlexey Zaytsev /* Based on the NetBSD virtio driver by Minoura Makoto. */
28e0724c53SAlexey Zaytsev /*
29e0724c53SAlexey Zaytsev  * Copyright (c) 2010 Minoura Makoto.
30e0724c53SAlexey Zaytsev  * All rights reserved.
31e0724c53SAlexey Zaytsev  *
32e0724c53SAlexey Zaytsev  * Redistribution and use in source and binary forms, with or without
33e0724c53SAlexey Zaytsev  * modification, are permitted provided that the following conditions
34e0724c53SAlexey Zaytsev  * are met:
35e0724c53SAlexey Zaytsev  * 1. Redistributions of source code must retain the above copyright
36e0724c53SAlexey Zaytsev  *    notice, this list of conditions and the following disclaimer.
37e0724c53SAlexey Zaytsev  * 2. Redistributions in binary form must reproduce the above copyright
38e0724c53SAlexey Zaytsev  *    notice, this list of conditions and the following disclaimer in the
39e0724c53SAlexey Zaytsev  *    documentation and/or other materials provided with the distribution.
40e0724c53SAlexey Zaytsev  *
41e0724c53SAlexey Zaytsev  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
42e0724c53SAlexey Zaytsev  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43e0724c53SAlexey Zaytsev  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44e0724c53SAlexey Zaytsev  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
45e0724c53SAlexey Zaytsev  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46e0724c53SAlexey Zaytsev  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47e0724c53SAlexey Zaytsev  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48e0724c53SAlexey Zaytsev  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49e0724c53SAlexey Zaytsev  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50e0724c53SAlexey Zaytsev  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51e0724c53SAlexey Zaytsev  *
52e0724c53SAlexey Zaytsev  */
53e0724c53SAlexey Zaytsev 
54e0724c53SAlexey Zaytsev #include <sys/conf.h>
55e0724c53SAlexey Zaytsev #include <sys/kmem.h>
56e0724c53SAlexey Zaytsev #include <sys/debug.h>
57e0724c53SAlexey Zaytsev #include <sys/modctl.h>
58e0724c53SAlexey Zaytsev #include <sys/autoconf.h>
59e0724c53SAlexey Zaytsev #include <sys/ddi_impldefs.h>
60e0724c53SAlexey Zaytsev #include <sys/ddi.h>
61e0724c53SAlexey Zaytsev #include <sys/sunddi.h>
62e0724c53SAlexey Zaytsev #include <sys/sunndi.h>
63e0724c53SAlexey Zaytsev #include <sys/avintr.h>
64e0724c53SAlexey Zaytsev #include <sys/spl.h>
65e0724c53SAlexey Zaytsev #include <sys/promif.h>
66e0724c53SAlexey Zaytsev #include <sys/list.h>
67e0724c53SAlexey Zaytsev #include <sys/bootconf.h>
68e0724c53SAlexey Zaytsev #include <sys/bootsvcs.h>
69e0724c53SAlexey Zaytsev #include <sys/sysmacros.h>
70e0724c53SAlexey Zaytsev #include <sys/pci.h>
71e0724c53SAlexey Zaytsev 
72e0724c53SAlexey Zaytsev #include "virtiovar.h"
73e0724c53SAlexey Zaytsev #include "virtioreg.h"
748a324c92SDan McDonald 
75e0724c53SAlexey Zaytsev #define	NDEVNAMES	(sizeof (virtio_device_name) / sizeof (char *))
76e0724c53SAlexey Zaytsev #define	MINSEG_INDIRECT	2	/* use indirect if nsegs >= this value */
77e0724c53SAlexey Zaytsev #define	VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
78e0724c53SAlexey Zaytsev 	    ~(VIRTIO_PAGE_SIZE-1))
79e0724c53SAlexey Zaytsev 
80e0724c53SAlexey Zaytsev void
virtio_set_status(struct virtio_softc * sc,unsigned int status)81e0724c53SAlexey Zaytsev virtio_set_status(struct virtio_softc *sc, unsigned int status)
82e0724c53SAlexey Zaytsev {
83e0724c53SAlexey Zaytsev 	int old = 0;
84e0724c53SAlexey Zaytsev 
858a324c92SDan McDonald 	if (status != 0) {
868a324c92SDan McDonald 		old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
87e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_DEVICE_STATUS));
888a324c92SDan McDonald 	}
89e0724c53SAlexey Zaytsev 
908a324c92SDan McDonald 	ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
918a324c92SDan McDonald 	    VIRTIO_CONFIG_DEVICE_STATUS), status | old);
92e0724c53SAlexey Zaytsev }
93e0724c53SAlexey Zaytsev 
94e0724c53SAlexey Zaytsev /*
95e0724c53SAlexey Zaytsev  * Negotiate features, save the result in sc->sc_features
96e0724c53SAlexey Zaytsev  */
97e0724c53SAlexey Zaytsev uint32_t
virtio_negotiate_features(struct virtio_softc * sc,uint32_t guest_features)98e0724c53SAlexey Zaytsev virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
99e0724c53SAlexey Zaytsev {
100e0724c53SAlexey Zaytsev 	uint32_t host_features;
101e0724c53SAlexey Zaytsev 	uint32_t features;
102e0724c53SAlexey Zaytsev 
103e0724c53SAlexey Zaytsev 	host_features = ddi_get32(sc->sc_ioh,
104e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
105e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
106e0724c53SAlexey Zaytsev 
1078a324c92SDan McDonald 	dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
108e0724c53SAlexey Zaytsev 	    host_features, guest_features);
109e0724c53SAlexey Zaytsev 
110e0724c53SAlexey Zaytsev 	features = host_features & guest_features;
111e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
112e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
113e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
114e0724c53SAlexey Zaytsev 	    features);
115e0724c53SAlexey Zaytsev 
116e0724c53SAlexey Zaytsev 	sc->sc_features = features;
117e0724c53SAlexey Zaytsev 
118e0724c53SAlexey Zaytsev 	return (host_features);
119e0724c53SAlexey Zaytsev }
120e0724c53SAlexey Zaytsev 
121e0724c53SAlexey Zaytsev size_t
virtio_show_features(uint32_t features,char * buf,size_t len)1228a324c92SDan McDonald virtio_show_features(uint32_t features, char *buf, size_t len)
123e0724c53SAlexey Zaytsev {
124e0724c53SAlexey Zaytsev 	char *orig_buf = buf;
125e0724c53SAlexey Zaytsev 	char *bufend = buf + len;
126e0724c53SAlexey Zaytsev 
127e0724c53SAlexey Zaytsev 	/* LINTED E_PTRDIFF_OVERFLOW */
128e0724c53SAlexey Zaytsev 	buf += snprintf(buf, bufend - buf, "Generic ( ");
129e0724c53SAlexey Zaytsev 	if (features & VIRTIO_F_RING_INDIRECT_DESC)
130e0724c53SAlexey Zaytsev 		/* LINTED E_PTRDIFF_OVERFLOW */
131e0724c53SAlexey Zaytsev 		buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
132e0724c53SAlexey Zaytsev 
133e0724c53SAlexey Zaytsev 	/* LINTED E_PTRDIFF_OVERFLOW */
134e0724c53SAlexey Zaytsev 	buf += snprintf(buf, bufend - buf, ") ");
135e0724c53SAlexey Zaytsev 
136e0724c53SAlexey Zaytsev 	/* LINTED E_PTRDIFF_OVERFLOW */
137e0724c53SAlexey Zaytsev 	return (buf - orig_buf);
138e0724c53SAlexey Zaytsev }
139e0724c53SAlexey Zaytsev 
140e0724c53SAlexey Zaytsev boolean_t
virtio_has_feature(struct virtio_softc * sc,uint32_t feature)141e0724c53SAlexey Zaytsev virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
142e0724c53SAlexey Zaytsev {
143e0724c53SAlexey Zaytsev 	return (sc->sc_features & feature);
144e0724c53SAlexey Zaytsev }
145e0724c53SAlexey Zaytsev 
146e0724c53SAlexey Zaytsev /*
147e0724c53SAlexey Zaytsev  * Device configuration registers.
148e0724c53SAlexey Zaytsev  */
149e0724c53SAlexey Zaytsev uint8_t
virtio_read_device_config_1(struct virtio_softc * sc,unsigned int index)150e0724c53SAlexey Zaytsev virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
151e0724c53SAlexey Zaytsev {
152e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
153e0724c53SAlexey Zaytsev 	return ddi_get8(sc->sc_ioh,
154e0724c53SAlexey Zaytsev 	    (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
155e0724c53SAlexey Zaytsev }
156e0724c53SAlexey Zaytsev 
157e0724c53SAlexey Zaytsev uint16_t
virtio_read_device_config_2(struct virtio_softc * sc,unsigned int index)158e0724c53SAlexey Zaytsev virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
159e0724c53SAlexey Zaytsev {
160e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
161e0724c53SAlexey Zaytsev 	return ddi_get16(sc->sc_ioh,
162e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
163e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
164e0724c53SAlexey Zaytsev }
165e0724c53SAlexey Zaytsev 
166e0724c53SAlexey Zaytsev uint32_t
virtio_read_device_config_4(struct virtio_softc * sc,unsigned int index)167e0724c53SAlexey Zaytsev virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
168e0724c53SAlexey Zaytsev {
169e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
170e0724c53SAlexey Zaytsev 	return ddi_get32(sc->sc_ioh,
171e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
172e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
173e0724c53SAlexey Zaytsev }
174e0724c53SAlexey Zaytsev 
175e0724c53SAlexey Zaytsev uint64_t
virtio_read_device_config_8(struct virtio_softc * sc,unsigned int index)176e0724c53SAlexey Zaytsev virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
177e0724c53SAlexey Zaytsev {
178e0724c53SAlexey Zaytsev 	uint64_t r;
179e0724c53SAlexey Zaytsev 
180e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
181e0724c53SAlexey Zaytsev 	r = ddi_get32(sc->sc_ioh,
182e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
183e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
184e0724c53SAlexey Zaytsev 	    index + sizeof (uint32_t)));
185e0724c53SAlexey Zaytsev 
186e0724c53SAlexey Zaytsev 	r <<= 32;
187e0724c53SAlexey Zaytsev 
188e0724c53SAlexey Zaytsev 	r += ddi_get32(sc->sc_ioh,
189e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
190e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
191e0724c53SAlexey Zaytsev 	return (r);
192e0724c53SAlexey Zaytsev }
193e0724c53SAlexey Zaytsev 
194e0724c53SAlexey Zaytsev void
virtio_write_device_config_1(struct virtio_softc * sc,unsigned int index,uint8_t value)1958a324c92SDan McDonald virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
1968a324c92SDan McDonald     uint8_t value)
197e0724c53SAlexey Zaytsev {
198e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
199e0724c53SAlexey Zaytsev 	ddi_put8(sc->sc_ioh,
200e0724c53SAlexey Zaytsev 	    (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
201e0724c53SAlexey Zaytsev }
202e0724c53SAlexey Zaytsev 
203e0724c53SAlexey Zaytsev void
virtio_write_device_config_2(struct virtio_softc * sc,unsigned int index,uint16_t value)2048a324c92SDan McDonald virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
2058a324c92SDan McDonald     uint16_t value)
206e0724c53SAlexey Zaytsev {
207e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
208e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh,
209e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
210e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
211e0724c53SAlexey Zaytsev }
212e0724c53SAlexey Zaytsev 
213e0724c53SAlexey Zaytsev void
virtio_write_device_config_4(struct virtio_softc * sc,unsigned int index,uint32_t value)2148a324c92SDan McDonald virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
2158a324c92SDan McDonald     uint32_t value)
216e0724c53SAlexey Zaytsev {
217e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
218e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
219e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
220e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
221e0724c53SAlexey Zaytsev }
222e0724c53SAlexey Zaytsev 
223e0724c53SAlexey Zaytsev void
virtio_write_device_config_8(struct virtio_softc * sc,unsigned int index,uint64_t value)2248a324c92SDan McDonald virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
2258a324c92SDan McDonald     uint64_t value)
226e0724c53SAlexey Zaytsev {
227e0724c53SAlexey Zaytsev 	ASSERT(sc->sc_config_offset);
228e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
229e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
230e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
231e0724c53SAlexey Zaytsev 	    value & 0xFFFFFFFF);
232e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
233e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
234e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
235e0724c53SAlexey Zaytsev 	    index + sizeof (uint32_t)), value >> 32);
236e0724c53SAlexey Zaytsev }
237e0724c53SAlexey Zaytsev 
238e0724c53SAlexey Zaytsev /*
239e0724c53SAlexey Zaytsev  * Start/stop vq interrupt.  No guarantee.
240e0724c53SAlexey Zaytsev  */
241e0724c53SAlexey Zaytsev void
virtio_stop_vq_intr(struct virtqueue * vq)242e0724c53SAlexey Zaytsev virtio_stop_vq_intr(struct virtqueue *vq)
243e0724c53SAlexey Zaytsev {
244e0724c53SAlexey Zaytsev 	vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
245e0724c53SAlexey Zaytsev }
246e0724c53SAlexey Zaytsev 
247e0724c53SAlexey Zaytsev void
virtio_start_vq_intr(struct virtqueue * vq)248e0724c53SAlexey Zaytsev virtio_start_vq_intr(struct virtqueue *vq)
249e0724c53SAlexey Zaytsev {
250e0724c53SAlexey Zaytsev 	vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
251e0724c53SAlexey Zaytsev }
252e0724c53SAlexey Zaytsev 
253e0724c53SAlexey Zaytsev static ddi_dma_attr_t virtio_vq_dma_attr = {
254e0724c53SAlexey Zaytsev 	DMA_ATTR_V0,		/* Version number */
255e0724c53SAlexey Zaytsev 	0,			/* low address */
2568a324c92SDan McDonald 	0x00000FFFFFFFFFFF,	/* high address. Has to fit into 32 bits */
2578a324c92SDan McDonald 				/* after page-shifting */
258e0724c53SAlexey Zaytsev 	0xFFFFFFFF,		/* counter register max */
259e0724c53SAlexey Zaytsev 	VIRTIO_PAGE_SIZE,	/* page alignment required */
260e0724c53SAlexey Zaytsev 	0x3F,			/* burst sizes: 1 - 32 */
261e0724c53SAlexey Zaytsev 	0x1,			/* minimum transfer size */
262e0724c53SAlexey Zaytsev 	0xFFFFFFFF,		/* max transfer size */
263e0724c53SAlexey Zaytsev 	0xFFFFFFFF,		/* address register max */
264e0724c53SAlexey Zaytsev 	1,			/* no scatter-gather */
265e0724c53SAlexey Zaytsev 	1,			/* device operates on bytes */
266e0724c53SAlexey Zaytsev 	0,			/* attr flag: set to 0 */
267e0724c53SAlexey Zaytsev };
268e0724c53SAlexey Zaytsev 
269e0724c53SAlexey Zaytsev static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
270e0724c53SAlexey Zaytsev 	DMA_ATTR_V0,		/* Version number */
271e0724c53SAlexey Zaytsev 	0,			/* low address */
272e0724c53SAlexey Zaytsev 	0xFFFFFFFFFFFFFFFF,	/* high address */
273e0724c53SAlexey Zaytsev 	0xFFFFFFFF,		/* counter register max */
274e0724c53SAlexey Zaytsev 	1,			/* No specific alignment */
275e0724c53SAlexey Zaytsev 	0x3F,			/* burst sizes: 1 - 32 */
276e0724c53SAlexey Zaytsev 	0x1,			/* minimum transfer size */
277e0724c53SAlexey Zaytsev 	0xFFFFFFFF,		/* max transfer size */
278e0724c53SAlexey Zaytsev 	0xFFFFFFFF,		/* address register max */
279e0724c53SAlexey Zaytsev 	1,			/* no scatter-gather */
280e0724c53SAlexey Zaytsev 	1,			/* device operates on bytes */
281e0724c53SAlexey Zaytsev 	0,			/* attr flag: set to 0 */
282e0724c53SAlexey Zaytsev };
283e0724c53SAlexey Zaytsev 
284e0724c53SAlexey Zaytsev /* Same for direct and indirect descriptors. */
285e0724c53SAlexey Zaytsev static ddi_device_acc_attr_t virtio_vq_devattr = {
286e0724c53SAlexey Zaytsev 	DDI_DEVICE_ATTR_V0,
287e0724c53SAlexey Zaytsev 	DDI_NEVERSWAP_ACC,
288e0724c53SAlexey Zaytsev 	DDI_STORECACHING_OK_ACC,
289e0724c53SAlexey Zaytsev 	DDI_DEFAULT_ACC
290e0724c53SAlexey Zaytsev };
291e0724c53SAlexey Zaytsev 
292e0724c53SAlexey Zaytsev static void
virtio_free_indirect(struct vq_entry * entry)293e0724c53SAlexey Zaytsev virtio_free_indirect(struct vq_entry *entry)
294e0724c53SAlexey Zaytsev {
295e0724c53SAlexey Zaytsev 
296e0724c53SAlexey Zaytsev 	(void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
297e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
298e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
299e0724c53SAlexey Zaytsev 
300e0724c53SAlexey Zaytsev 	entry->qe_indirect_descs = NULL;
301e0724c53SAlexey Zaytsev }
302e0724c53SAlexey Zaytsev 
303e0724c53SAlexey Zaytsev 
304e0724c53SAlexey Zaytsev static int
virtio_alloc_indirect(struct virtio_softc * sc,struct vq_entry * entry)305e0724c53SAlexey Zaytsev virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
306e0724c53SAlexey Zaytsev {
307e0724c53SAlexey Zaytsev 	int allocsize, num;
308e0724c53SAlexey Zaytsev 	size_t len;
309e0724c53SAlexey Zaytsev 	unsigned int ncookies;
310e0724c53SAlexey Zaytsev 	int ret;
311e0724c53SAlexey Zaytsev 
312e0724c53SAlexey Zaytsev 	num = entry->qe_queue->vq_indirect_num;
313e0724c53SAlexey Zaytsev 	ASSERT(num > 1);
314e0724c53SAlexey Zaytsev 
315e0724c53SAlexey Zaytsev 	allocsize = sizeof (struct vring_desc) * num;
316e0724c53SAlexey Zaytsev 
317e0724c53SAlexey Zaytsev 	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
318e0724c53SAlexey Zaytsev 	    DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
319e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
320e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
321e0724c53SAlexey Zaytsev 		    "Failed to allocate dma handle for indirect descriptors, "
322e0724c53SAlexey Zaytsev 		    "entry %d, vq %d", entry->qe_index,
323e0724c53SAlexey Zaytsev 		    entry->qe_queue->vq_index);
324e0724c53SAlexey Zaytsev 		goto out_alloc_handle;
325e0724c53SAlexey Zaytsev 	}
326e0724c53SAlexey Zaytsev 
3278a324c92SDan McDonald 	ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
3288a324c92SDan McDonald 	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
329e0724c53SAlexey Zaytsev 	    (caddr_t *)&entry->qe_indirect_descs, &len,
330e0724c53SAlexey Zaytsev 	    &entry->qe_indirect_dma_acch);
331e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
332e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
3338a324c92SDan McDonald 		    "Failed to allocate dma memory for indirect descriptors, "
334e0724c53SAlexey Zaytsev 		    "entry %d, vq %d,", entry->qe_index,
335e0724c53SAlexey Zaytsev 		    entry->qe_queue->vq_index);
336e0724c53SAlexey Zaytsev 		goto out_alloc;
337e0724c53SAlexey Zaytsev 	}
338e0724c53SAlexey Zaytsev 
339e0724c53SAlexey Zaytsev 	(void) memset(entry->qe_indirect_descs, 0xff, allocsize);
340e0724c53SAlexey Zaytsev 
341e0724c53SAlexey Zaytsev 	ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
342e0724c53SAlexey Zaytsev 	    (caddr_t)entry->qe_indirect_descs, len,
3438a324c92SDan McDonald 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
3448a324c92SDan McDonald 	    &entry->qe_indirect_dma_cookie, &ncookies);
345e0724c53SAlexey Zaytsev 	if (ret != DDI_DMA_MAPPED) {
346e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
347e0724c53SAlexey Zaytsev 		    "Failed to bind dma memory for indirect descriptors, "
348e0724c53SAlexey Zaytsev 		    "entry %d, vq %d", entry->qe_index,
349e0724c53SAlexey Zaytsev 		    entry->qe_queue->vq_index);
350e0724c53SAlexey Zaytsev 		goto out_bind;
351e0724c53SAlexey Zaytsev 	}
352e0724c53SAlexey Zaytsev 
353e0724c53SAlexey Zaytsev 	/* We asked for a single segment */
354e0724c53SAlexey Zaytsev 	ASSERT(ncookies == 1);
355e0724c53SAlexey Zaytsev 
356e0724c53SAlexey Zaytsev 	return (0);
357e0724c53SAlexey Zaytsev 
358e0724c53SAlexey Zaytsev out_bind:
359e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
360e0724c53SAlexey Zaytsev out_alloc:
361e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
362e0724c53SAlexey Zaytsev out_alloc_handle:
363e0724c53SAlexey Zaytsev 
364e0724c53SAlexey Zaytsev 	return (ret);
365e0724c53SAlexey Zaytsev }
366e0724c53SAlexey Zaytsev 
367e0724c53SAlexey Zaytsev /*
368e0724c53SAlexey Zaytsev  * Initialize the vq structure.
369e0724c53SAlexey Zaytsev  */
370e0724c53SAlexey Zaytsev static int
virtio_init_vq(struct virtio_softc * sc,struct virtqueue * vq)371e0724c53SAlexey Zaytsev virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
372e0724c53SAlexey Zaytsev {
373e0724c53SAlexey Zaytsev 	int ret;
374e0724c53SAlexey Zaytsev 	uint16_t i;
375e0724c53SAlexey Zaytsev 	int vq_size = vq->vq_num;
376e0724c53SAlexey Zaytsev 	int indirect_num = vq->vq_indirect_num;
377e0724c53SAlexey Zaytsev 
378e0724c53SAlexey Zaytsev 	/* free slot management */
379e0724c53SAlexey Zaytsev 	list_create(&vq->vq_freelist, sizeof (struct vq_entry),
380e0724c53SAlexey Zaytsev 	    offsetof(struct vq_entry, qe_list));
381e0724c53SAlexey Zaytsev 
382e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_size; i++) {
383e0724c53SAlexey Zaytsev 		struct vq_entry *entry = &vq->vq_entries[i];
384e0724c53SAlexey Zaytsev 		list_insert_tail(&vq->vq_freelist, entry);
385e0724c53SAlexey Zaytsev 		entry->qe_index = i;
386e0724c53SAlexey Zaytsev 		entry->qe_desc = &vq->vq_descs[i];
387e0724c53SAlexey Zaytsev 		entry->qe_queue = vq;
388e0724c53SAlexey Zaytsev 
389e0724c53SAlexey Zaytsev 		if (indirect_num) {
390e0724c53SAlexey Zaytsev 			ret = virtio_alloc_indirect(sc, entry);
391e0724c53SAlexey Zaytsev 			if (ret)
392e0724c53SAlexey Zaytsev 				goto out_indirect;
393e0724c53SAlexey Zaytsev 		}
394e0724c53SAlexey Zaytsev 	}
395e0724c53SAlexey Zaytsev 
3968a324c92SDan McDonald 	mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
3978a324c92SDan McDonald 	    DDI_INTR_PRI(sc->sc_intr_prio));
3988a324c92SDan McDonald 	mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
3998a324c92SDan McDonald 	    DDI_INTR_PRI(sc->sc_intr_prio));
4008a324c92SDan McDonald 	mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
4018a324c92SDan McDonald 	    DDI_INTR_PRI(sc->sc_intr_prio));
402e0724c53SAlexey Zaytsev 
403e0724c53SAlexey Zaytsev 	return (0);
404e0724c53SAlexey Zaytsev 
405e0724c53SAlexey Zaytsev out_indirect:
406e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_size; i++) {
407e0724c53SAlexey Zaytsev 		struct vq_entry *entry = &vq->vq_entries[i];
408e0724c53SAlexey Zaytsev 		if (entry->qe_indirect_descs)
409e0724c53SAlexey Zaytsev 			virtio_free_indirect(entry);
410e0724c53SAlexey Zaytsev 	}
411e0724c53SAlexey Zaytsev 
412e0724c53SAlexey Zaytsev 	return (ret);
413e0724c53SAlexey Zaytsev }
414e0724c53SAlexey Zaytsev 
415e0724c53SAlexey Zaytsev /*
416e0724c53SAlexey Zaytsev  * Allocate/free a vq.
417e0724c53SAlexey Zaytsev  */
418e0724c53SAlexey Zaytsev struct virtqueue *
virtio_alloc_vq(struct virtio_softc * sc,unsigned int index,unsigned int size,unsigned int indirect_num,const char * name)4198a324c92SDan McDonald virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
4208a324c92SDan McDonald     unsigned int indirect_num, const char *name)
421e0724c53SAlexey Zaytsev {
422e0724c53SAlexey Zaytsev 	int vq_size, allocsize1, allocsize2, allocsize = 0;
423e0724c53SAlexey Zaytsev 	int ret;
424e0724c53SAlexey Zaytsev 	unsigned int ncookies;
425e0724c53SAlexey Zaytsev 	size_t len;
426e0724c53SAlexey Zaytsev 	struct virtqueue *vq;
427e0724c53SAlexey Zaytsev 
428e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh,
429e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
430e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
431e0724c53SAlexey Zaytsev 	vq_size = ddi_get16(sc->sc_ioh,
432e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
433e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
434e0724c53SAlexey Zaytsev 	if (vq_size == 0) {
435e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
436e0724c53SAlexey Zaytsev 		    "virtqueue dest not exist, index %d for %s\n", index, name);
437e0724c53SAlexey Zaytsev 		goto out;
438e0724c53SAlexey Zaytsev 	}
439e0724c53SAlexey Zaytsev 
440e0724c53SAlexey Zaytsev 	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
441e0724c53SAlexey Zaytsev 
442e0724c53SAlexey Zaytsev 	/* size 0 => use native vq size, good for receive queues. */
443e0724c53SAlexey Zaytsev 	if (size)
444e0724c53SAlexey Zaytsev 		vq_size = MIN(vq_size, size);
445e0724c53SAlexey Zaytsev 
446e0724c53SAlexey Zaytsev 	/* allocsize1: descriptor table + avail ring + pad */
447e0724c53SAlexey Zaytsev 	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
4488a324c92SDan McDonald 	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
449e0724c53SAlexey Zaytsev 	/* allocsize2: used ring + pad */
4508a324c92SDan McDonald 	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
4518a324c92SDan McDonald 	    sizeof (struct vring_used_elem) * vq_size);
452e0724c53SAlexey Zaytsev 
453e0724c53SAlexey Zaytsev 	allocsize = allocsize1 + allocsize2;
454e0724c53SAlexey Zaytsev 
455e0724c53SAlexey Zaytsev 	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
456e0724c53SAlexey Zaytsev 	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
457e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
458e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
459e0724c53SAlexey Zaytsev 		    "Failed to allocate dma handle for vq %d", index);
460e0724c53SAlexey Zaytsev 		goto out_alloc_handle;
461e0724c53SAlexey Zaytsev 	}
462e0724c53SAlexey Zaytsev 
463e0724c53SAlexey Zaytsev 	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
464e0724c53SAlexey Zaytsev 	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
465e0724c53SAlexey Zaytsev 	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
466e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
467e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
4688a324c92SDan McDonald 		    "Failed to allocate dma memory for vq %d", index);
469e0724c53SAlexey Zaytsev 		goto out_alloc;
470e0724c53SAlexey Zaytsev 	}
471e0724c53SAlexey Zaytsev 
472e0724c53SAlexey Zaytsev 	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
4738a324c92SDan McDonald 	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
474e0724c53SAlexey Zaytsev 	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
475e0724c53SAlexey Zaytsev 	if (ret != DDI_DMA_MAPPED) {
476e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
477e0724c53SAlexey Zaytsev 		    "Failed to bind dma memory for vq %d", index);
478e0724c53SAlexey Zaytsev 		goto out_bind;
479e0724c53SAlexey Zaytsev 	}
480e0724c53SAlexey Zaytsev 
481e0724c53SAlexey Zaytsev 	/* We asked for a single segment */
482e0724c53SAlexey Zaytsev 	ASSERT(ncookies == 1);
483e0724c53SAlexey Zaytsev 	/* and page-ligned buffers. */
484e0724c53SAlexey Zaytsev 	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
485e0724c53SAlexey Zaytsev 
486e0724c53SAlexey Zaytsev 	(void) memset(vq->vq_vaddr, 0, allocsize);
487e0724c53SAlexey Zaytsev 
488e0724c53SAlexey Zaytsev 	/* Make sure all zeros hit the buffer before we point the host to it */
489e0724c53SAlexey Zaytsev 	membar_producer();
490e0724c53SAlexey Zaytsev 
491e0724c53SAlexey Zaytsev 	/* set the vq address */
492e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
493e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
494e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
495e0724c53SAlexey Zaytsev 	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
496e0724c53SAlexey Zaytsev 
497e0724c53SAlexey Zaytsev 	/* remember addresses and offsets for later use */
498e0724c53SAlexey Zaytsev 	vq->vq_owner = sc;
499e0724c53SAlexey Zaytsev 	vq->vq_num = vq_size;
500e0724c53SAlexey Zaytsev 	vq->vq_index = index;
501e0724c53SAlexey Zaytsev 	vq->vq_descs = vq->vq_vaddr;
502e0724c53SAlexey Zaytsev 	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
503e0724c53SAlexey Zaytsev 	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
504e0724c53SAlexey Zaytsev 	vq->vq_usedoffset = allocsize1;
505e0724c53SAlexey Zaytsev 	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
506e0724c53SAlexey Zaytsev 
507e0724c53SAlexey Zaytsev 	ASSERT(indirect_num == 0 ||
508e0724c53SAlexey Zaytsev 	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
509e0724c53SAlexey Zaytsev 	vq->vq_indirect_num = indirect_num;
510e0724c53SAlexey Zaytsev 
511e0724c53SAlexey Zaytsev 	/* free slot management */
512e0724c53SAlexey Zaytsev 	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
513e0724c53SAlexey Zaytsev 	    KM_SLEEP);
514e0724c53SAlexey Zaytsev 
515e0724c53SAlexey Zaytsev 	ret = virtio_init_vq(sc, vq);
516e0724c53SAlexey Zaytsev 	if (ret)
517e0724c53SAlexey Zaytsev 		goto out_init;
518e0724c53SAlexey Zaytsev 
519e0724c53SAlexey Zaytsev 	dev_debug(sc->sc_dev, CE_NOTE,
5208a324c92SDan McDonald 	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
521e0724c53SAlexey Zaytsev 	    vq_size, index, name, indirect_num * vq_size);
522e0724c53SAlexey Zaytsev 
523e0724c53SAlexey Zaytsev 	return (vq);
524e0724c53SAlexey Zaytsev 
525e0724c53SAlexey Zaytsev out_init:
526e0724c53SAlexey Zaytsev 	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
527e0724c53SAlexey Zaytsev 	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
528e0724c53SAlexey Zaytsev out_bind:
529e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&vq->vq_dma_acch);
530e0724c53SAlexey Zaytsev out_alloc:
531e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&vq->vq_dma_handle);
532e0724c53SAlexey Zaytsev out_alloc_handle:
533e0724c53SAlexey Zaytsev 	kmem_free(vq, sizeof (struct virtqueue));
534e0724c53SAlexey Zaytsev out:
535e0724c53SAlexey Zaytsev 	return (NULL);
536e0724c53SAlexey Zaytsev }
537e0724c53SAlexey Zaytsev 
538e0724c53SAlexey Zaytsev void
virtio_free_vq(struct virtqueue * vq)539e0724c53SAlexey Zaytsev virtio_free_vq(struct virtqueue *vq)
540e0724c53SAlexey Zaytsev {
541e0724c53SAlexey Zaytsev 	struct virtio_softc *sc = vq->vq_owner;
542e0724c53SAlexey Zaytsev 	int i;
543e0724c53SAlexey Zaytsev 
544e0724c53SAlexey Zaytsev 	/* tell device that there's no virtqueue any longer */
545e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh,
546e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
547e0724c53SAlexey Zaytsev 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
548e0724c53SAlexey Zaytsev 	    vq->vq_index);
549e0724c53SAlexey Zaytsev 	ddi_put32(sc->sc_ioh,
550e0724c53SAlexey Zaytsev 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
551e0724c53SAlexey Zaytsev 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
552e0724c53SAlexey Zaytsev 
553e0724c53SAlexey Zaytsev 	/* Free the indirect descriptors, if any. */
554e0724c53SAlexey Zaytsev 	for (i = 0; i < vq->vq_num; i++) {
555e0724c53SAlexey Zaytsev 		struct vq_entry *entry = &vq->vq_entries[i];
556e0724c53SAlexey Zaytsev 		if (entry->qe_indirect_descs)
557e0724c53SAlexey Zaytsev 			virtio_free_indirect(entry);
558e0724c53SAlexey Zaytsev 	}
559e0724c53SAlexey Zaytsev 
560e0724c53SAlexey Zaytsev 	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
561e0724c53SAlexey Zaytsev 
562e0724c53SAlexey Zaytsev 	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
563e0724c53SAlexey Zaytsev 	ddi_dma_mem_free(&vq->vq_dma_acch);
564e0724c53SAlexey Zaytsev 	ddi_dma_free_handle(&vq->vq_dma_handle);
565e0724c53SAlexey Zaytsev 
566e0724c53SAlexey Zaytsev 	mutex_destroy(&vq->vq_used_lock);
567e0724c53SAlexey Zaytsev 	mutex_destroy(&vq->vq_avail_lock);
568e0724c53SAlexey Zaytsev 	mutex_destroy(&vq->vq_freelist_lock);
569e0724c53SAlexey Zaytsev 
570e0724c53SAlexey Zaytsev 	kmem_free(vq, sizeof (struct virtqueue));
571e0724c53SAlexey Zaytsev }
572e0724c53SAlexey Zaytsev 
573e0724c53SAlexey Zaytsev /*
574e0724c53SAlexey Zaytsev  * Free descriptor management.
575e0724c53SAlexey Zaytsev  */
576e0724c53SAlexey Zaytsev struct vq_entry *
vq_alloc_entry(struct virtqueue * vq)577e0724c53SAlexey Zaytsev vq_alloc_entry(struct virtqueue *vq)
578e0724c53SAlexey Zaytsev {
579e0724c53SAlexey Zaytsev 	struct vq_entry *qe;
580e0724c53SAlexey Zaytsev 
581e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_freelist_lock);
582e0724c53SAlexey Zaytsev 	if (list_is_empty(&vq->vq_freelist)) {
583e0724c53SAlexey Zaytsev 		mutex_exit(&vq->vq_freelist_lock);
584e0724c53SAlexey Zaytsev 		return (NULL);
585e0724c53SAlexey Zaytsev 	}
586e0724c53SAlexey Zaytsev 	qe = list_remove_head(&vq->vq_freelist);
587e0724c53SAlexey Zaytsev 
588e0724c53SAlexey Zaytsev 	ASSERT(vq->vq_used_entries >= 0);
589e0724c53SAlexey Zaytsev 	vq->vq_used_entries++;
590e0724c53SAlexey Zaytsev 
591e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_freelist_lock);
592e0724c53SAlexey Zaytsev 
593e0724c53SAlexey Zaytsev 	qe->qe_next = NULL;
594e0724c53SAlexey Zaytsev 	qe->qe_indirect_next = 0;
595e0724c53SAlexey Zaytsev 	(void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
596e0724c53SAlexey Zaytsev 
597e0724c53SAlexey Zaytsev 	return (qe);
598e0724c53SAlexey Zaytsev }
599e0724c53SAlexey Zaytsev 
600e0724c53SAlexey Zaytsev void
vq_free_entry(struct virtqueue * vq,struct vq_entry * qe)601e0724c53SAlexey Zaytsev vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
602e0724c53SAlexey Zaytsev {
603e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_freelist_lock);
604e0724c53SAlexey Zaytsev 
605e0724c53SAlexey Zaytsev 	list_insert_head(&vq->vq_freelist, qe);
606e0724c53SAlexey Zaytsev 	vq->vq_used_entries--;
607e0724c53SAlexey Zaytsev 	ASSERT(vq->vq_used_entries >= 0);
608e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_freelist_lock);
609e0724c53SAlexey Zaytsev }
610e0724c53SAlexey Zaytsev 
611e0724c53SAlexey Zaytsev /*
612e0724c53SAlexey Zaytsev  * We (intentionally) don't have a global vq mutex, so you are
613e0724c53SAlexey Zaytsev  * responsible for external locking to avoid allocting/freeing any
614e0724c53SAlexey Zaytsev  * entries before using the returned value. Have fun.
615e0724c53SAlexey Zaytsev  */
616e0724c53SAlexey Zaytsev uint_t
vq_num_used(struct virtqueue * vq)617e0724c53SAlexey Zaytsev vq_num_used(struct virtqueue *vq)
618e0724c53SAlexey Zaytsev {
619e0724c53SAlexey Zaytsev 	/* vq->vq_freelist_lock would not help here. */
620e0724c53SAlexey Zaytsev 	return (vq->vq_used_entries);
621e0724c53SAlexey Zaytsev }
622e0724c53SAlexey Zaytsev 
623e0724c53SAlexey Zaytsev static inline void
virtio_ve_set_desc(struct vring_desc * desc,uint64_t paddr,uint32_t len,boolean_t write)624e0724c53SAlexey Zaytsev virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
625e0724c53SAlexey Zaytsev     boolean_t write)
626e0724c53SAlexey Zaytsev {
627e0724c53SAlexey Zaytsev 	desc->addr = paddr;
628e0724c53SAlexey Zaytsev 	desc->len = len;
629e0724c53SAlexey Zaytsev 	desc->next = 0;
630e0724c53SAlexey Zaytsev 	desc->flags = 0;
631e0724c53SAlexey Zaytsev 
632e0724c53SAlexey Zaytsev 	/* 'write' - from the driver's point of view */
633e0724c53SAlexey Zaytsev 	if (!write)
634e0724c53SAlexey Zaytsev 		desc->flags = VRING_DESC_F_WRITE;
635e0724c53SAlexey Zaytsev }
636e0724c53SAlexey Zaytsev 
637e0724c53SAlexey Zaytsev void
virtio_ve_set(struct vq_entry * qe,uint64_t paddr,uint32_t len,boolean_t write)638e0724c53SAlexey Zaytsev virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
639e0724c53SAlexey Zaytsev     boolean_t write)
640e0724c53SAlexey Zaytsev {
641e0724c53SAlexey Zaytsev 	virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
642e0724c53SAlexey Zaytsev }
643e0724c53SAlexey Zaytsev 
6448a324c92SDan McDonald unsigned int
virtio_ve_indirect_available(struct vq_entry * qe)6458a324c92SDan McDonald virtio_ve_indirect_available(struct vq_entry *qe)
6468a324c92SDan McDonald {
6478a324c92SDan McDonald 	return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1));
6488a324c92SDan McDonald }
6498a324c92SDan McDonald 
650e0724c53SAlexey Zaytsev void
virtio_ve_add_indirect_buf(struct vq_entry * qe,uint64_t paddr,uint32_t len,boolean_t write)651e0724c53SAlexey Zaytsev virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
652e0724c53SAlexey Zaytsev     boolean_t write)
653e0724c53SAlexey Zaytsev {
654e0724c53SAlexey Zaytsev 	struct vring_desc *indirect_desc;
655e0724c53SAlexey Zaytsev 
656e0724c53SAlexey Zaytsev 	ASSERT(qe->qe_queue->vq_indirect_num);
657e0724c53SAlexey Zaytsev 	ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
658e0724c53SAlexey Zaytsev 
659e0724c53SAlexey Zaytsev 	indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
660e0724c53SAlexey Zaytsev 	virtio_ve_set_desc(indirect_desc, paddr, len, write);
661e0724c53SAlexey Zaytsev 	qe->qe_indirect_next++;
662e0724c53SAlexey Zaytsev }
663e0724c53SAlexey Zaytsev 
664e0724c53SAlexey Zaytsev void
virtio_ve_add_cookie(struct vq_entry * qe,ddi_dma_handle_t dma_handle,ddi_dma_cookie_t dma_cookie,unsigned int ncookies,boolean_t write)665e0724c53SAlexey Zaytsev virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
666e0724c53SAlexey Zaytsev     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
667e0724c53SAlexey Zaytsev {
668e0724c53SAlexey Zaytsev 	int i;
669e0724c53SAlexey Zaytsev 
670e0724c53SAlexey Zaytsev 	for (i = 0; i < ncookies; i++) {
671e0724c53SAlexey Zaytsev 		virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
672e0724c53SAlexey Zaytsev 		    dma_cookie.dmac_size, write);
673e0724c53SAlexey Zaytsev 		ddi_dma_nextcookie(dma_handle, &dma_cookie);
674e0724c53SAlexey Zaytsev 	}
675e0724c53SAlexey Zaytsev }
676e0724c53SAlexey Zaytsev 
677e0724c53SAlexey Zaytsev void
virtio_sync_vq(struct virtqueue * vq)678e0724c53SAlexey Zaytsev virtio_sync_vq(struct virtqueue *vq)
679e0724c53SAlexey Zaytsev {
680e0724c53SAlexey Zaytsev 	struct virtio_softc *vsc = vq->vq_owner;
681e0724c53SAlexey Zaytsev 
682e0724c53SAlexey Zaytsev 	/* Make sure the avail ring update hit the buffer */
683e0724c53SAlexey Zaytsev 	membar_producer();
684e0724c53SAlexey Zaytsev 
685e0724c53SAlexey Zaytsev 	vq->vq_avail->idx = vq->vq_avail_idx;
686e0724c53SAlexey Zaytsev 
687e0724c53SAlexey Zaytsev 	/* Make sure the avail idx update hits the buffer */
688e0724c53SAlexey Zaytsev 	membar_producer();
689e0724c53SAlexey Zaytsev 
690e0724c53SAlexey Zaytsev 	/* Make sure we see the flags update */
691e0724c53SAlexey Zaytsev 	membar_consumer();
692e0724c53SAlexey Zaytsev 
6938a324c92SDan McDonald 	if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
694e0724c53SAlexey Zaytsev 		ddi_put16(vsc->sc_ioh,
695e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
696e0724c53SAlexey Zaytsev 		    (uint16_t *)(vsc->sc_io_addr +
697e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_NOTIFY),
698e0724c53SAlexey Zaytsev 		    vq->vq_index);
699e0724c53SAlexey Zaytsev 	}
7008a324c92SDan McDonald }
701e0724c53SAlexey Zaytsev 
702e0724c53SAlexey Zaytsev void
virtio_push_chain(struct vq_entry * qe,boolean_t sync)703e0724c53SAlexey Zaytsev virtio_push_chain(struct vq_entry *qe, boolean_t sync)
704e0724c53SAlexey Zaytsev {
705e0724c53SAlexey Zaytsev 	struct virtqueue *vq = qe->qe_queue;
706e0724c53SAlexey Zaytsev 	struct vq_entry *head = qe;
707e0724c53SAlexey Zaytsev 	struct vring_desc *desc;
708e0724c53SAlexey Zaytsev 	int idx;
709e0724c53SAlexey Zaytsev 
710e0724c53SAlexey Zaytsev 	ASSERT(qe);
711e0724c53SAlexey Zaytsev 
712e0724c53SAlexey Zaytsev 	/*
713e0724c53SAlexey Zaytsev 	 * Bind the descs together, paddr and len should be already
714e0724c53SAlexey Zaytsev 	 * set with virtio_ve_set
715e0724c53SAlexey Zaytsev 	 */
716e0724c53SAlexey Zaytsev 	do {
717e0724c53SAlexey Zaytsev 		/* Bind the indirect descriptors */
718e0724c53SAlexey Zaytsev 		if (qe->qe_indirect_next > 1) {
719e0724c53SAlexey Zaytsev 			uint16_t i = 0;
720e0724c53SAlexey Zaytsev 
721e0724c53SAlexey Zaytsev 			/*
722e0724c53SAlexey Zaytsev 			 * Set the pointer/flags to the
723e0724c53SAlexey Zaytsev 			 * first indirect descriptor
724e0724c53SAlexey Zaytsev 			 */
725e0724c53SAlexey Zaytsev 			virtio_ve_set_desc(qe->qe_desc,
726e0724c53SAlexey Zaytsev 			    qe->qe_indirect_dma_cookie.dmac_laddress,
727e0724c53SAlexey Zaytsev 			    sizeof (struct vring_desc) * qe->qe_indirect_next,
728e0724c53SAlexey Zaytsev 			    B_FALSE);
729e0724c53SAlexey Zaytsev 			qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
730e0724c53SAlexey Zaytsev 
731e0724c53SAlexey Zaytsev 			/* For all but the last one, add the next index/flag */
732e0724c53SAlexey Zaytsev 			do {
733e0724c53SAlexey Zaytsev 				desc = &qe->qe_indirect_descs[i];
734e0724c53SAlexey Zaytsev 				i++;
735e0724c53SAlexey Zaytsev 
736e0724c53SAlexey Zaytsev 				desc->flags |= VRING_DESC_F_NEXT;
737e0724c53SAlexey Zaytsev 				desc->next = i;
738e0724c53SAlexey Zaytsev 			} while (i < qe->qe_indirect_next - 1);
739e0724c53SAlexey Zaytsev 
740e0724c53SAlexey Zaytsev 		}
741e0724c53SAlexey Zaytsev 
742e0724c53SAlexey Zaytsev 		if (qe->qe_next) {
743e0724c53SAlexey Zaytsev 			qe->qe_desc->flags |= VRING_DESC_F_NEXT;
744e0724c53SAlexey Zaytsev 			qe->qe_desc->next = qe->qe_next->qe_index;
745e0724c53SAlexey Zaytsev 		}
746e0724c53SAlexey Zaytsev 
747e0724c53SAlexey Zaytsev 		qe = qe->qe_next;
748e0724c53SAlexey Zaytsev 	} while (qe);
749e0724c53SAlexey Zaytsev 
750e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_avail_lock);
751e0724c53SAlexey Zaytsev 	idx = vq->vq_avail_idx;
752e0724c53SAlexey Zaytsev 	vq->vq_avail_idx++;
753e0724c53SAlexey Zaytsev 
754e0724c53SAlexey Zaytsev 	/* Make sure the bits hit the descriptor(s) */
755e0724c53SAlexey Zaytsev 	membar_producer();
756e0724c53SAlexey Zaytsev 	vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
757e0724c53SAlexey Zaytsev 
758e0724c53SAlexey Zaytsev 	/* Notify the device, if needed. */
759e0724c53SAlexey Zaytsev 	if (sync)
760e0724c53SAlexey Zaytsev 		virtio_sync_vq(vq);
761e0724c53SAlexey Zaytsev 
762e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_avail_lock);
763e0724c53SAlexey Zaytsev }
764e0724c53SAlexey Zaytsev 
7658a324c92SDan McDonald /*
7668a324c92SDan McDonald  * Get a chain of descriptors from the used ring, if one is available.
7678a324c92SDan McDonald  */
768e0724c53SAlexey Zaytsev struct vq_entry *
virtio_pull_chain(struct virtqueue * vq,uint32_t * len)769e0724c53SAlexey Zaytsev virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
770e0724c53SAlexey Zaytsev {
771e0724c53SAlexey Zaytsev 	struct vq_entry *head;
772e0724c53SAlexey Zaytsev 	int slot;
773e0724c53SAlexey Zaytsev 	int usedidx;
774e0724c53SAlexey Zaytsev 
775e0724c53SAlexey Zaytsev 	mutex_enter(&vq->vq_used_lock);
776e0724c53SAlexey Zaytsev 
777e0724c53SAlexey Zaytsev 	/* No used entries? Bye. */
778e0724c53SAlexey Zaytsev 	if (vq->vq_used_idx == vq->vq_used->idx) {
779e0724c53SAlexey Zaytsev 		mutex_exit(&vq->vq_used_lock);
780e0724c53SAlexey Zaytsev 		return (NULL);
781e0724c53SAlexey Zaytsev 	}
782e0724c53SAlexey Zaytsev 
783e0724c53SAlexey Zaytsev 	usedidx = vq->vq_used_idx;
784e0724c53SAlexey Zaytsev 	vq->vq_used_idx++;
785e0724c53SAlexey Zaytsev 	mutex_exit(&vq->vq_used_lock);
786e0724c53SAlexey Zaytsev 
787e0724c53SAlexey Zaytsev 	usedidx %= vq->vq_num;
788e0724c53SAlexey Zaytsev 
789e0724c53SAlexey Zaytsev 	/* Make sure we do the next step _after_ checking the idx. */
790e0724c53SAlexey Zaytsev 	membar_consumer();
791e0724c53SAlexey Zaytsev 
792e0724c53SAlexey Zaytsev 	slot = vq->vq_used->ring[usedidx].id;
793e0724c53SAlexey Zaytsev 	*len = vq->vq_used->ring[usedidx].len;
794e0724c53SAlexey Zaytsev 
795e0724c53SAlexey Zaytsev 	head = &vq->vq_entries[slot];
796e0724c53SAlexey Zaytsev 
797e0724c53SAlexey Zaytsev 	return (head);
798e0724c53SAlexey Zaytsev }
799e0724c53SAlexey Zaytsev 
800e0724c53SAlexey Zaytsev void
virtio_free_chain(struct vq_entry * qe)801e0724c53SAlexey Zaytsev virtio_free_chain(struct vq_entry *qe)
802e0724c53SAlexey Zaytsev {
803e0724c53SAlexey Zaytsev 	struct vq_entry *tmp;
804e0724c53SAlexey Zaytsev 	struct virtqueue *vq = qe->qe_queue;
805e0724c53SAlexey Zaytsev 
806e0724c53SAlexey Zaytsev 	ASSERT(qe);
807e0724c53SAlexey Zaytsev 
808e0724c53SAlexey Zaytsev 	do {
809e0724c53SAlexey Zaytsev 		ASSERT(qe->qe_queue == vq);
810e0724c53SAlexey Zaytsev 		tmp = qe->qe_next;
811e0724c53SAlexey Zaytsev 		vq_free_entry(vq, qe);
812e0724c53SAlexey Zaytsev 		qe = tmp;
8138a324c92SDan McDonald 	} while (tmp != NULL);
814e0724c53SAlexey Zaytsev }
815e0724c53SAlexey Zaytsev 
816e0724c53SAlexey Zaytsev void
virtio_ventry_stick(struct vq_entry * first,struct vq_entry * second)817e0724c53SAlexey Zaytsev virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
818e0724c53SAlexey Zaytsev {
819e0724c53SAlexey Zaytsev 	first->qe_next = second;
820e0724c53SAlexey Zaytsev }
821e0724c53SAlexey Zaytsev 
822e0724c53SAlexey Zaytsev static int
virtio_register_msi(struct virtio_softc * sc,struct virtio_int_handler * config_handler,struct virtio_int_handler vq_handlers[],int intr_types)823e0724c53SAlexey Zaytsev virtio_register_msi(struct virtio_softc *sc,
824e0724c53SAlexey Zaytsev     struct virtio_int_handler *config_handler,
8258a324c92SDan McDonald     struct virtio_int_handler vq_handlers[], int intr_types)
826e0724c53SAlexey Zaytsev {
827e0724c53SAlexey Zaytsev 	int count, actual;
828e0724c53SAlexey Zaytsev 	int int_type;
829e0724c53SAlexey Zaytsev 	int i;
830e0724c53SAlexey Zaytsev 	int handler_count;
831e0724c53SAlexey Zaytsev 	int ret;
832e0724c53SAlexey Zaytsev 
833e0724c53SAlexey Zaytsev 	/* If both MSI and MSI-x are reported, prefer MSI-x. */
834e0724c53SAlexey Zaytsev 	int_type = DDI_INTR_TYPE_MSI;
835e0724c53SAlexey Zaytsev 	if (intr_types & DDI_INTR_TYPE_MSIX)
836e0724c53SAlexey Zaytsev 		int_type = DDI_INTR_TYPE_MSIX;
837e0724c53SAlexey Zaytsev 
838e0724c53SAlexey Zaytsev 	/* Walk the handler table to get the number of handlers. */
839e0724c53SAlexey Zaytsev 	for (handler_count = 0;
840e0724c53SAlexey Zaytsev 	    vq_handlers && vq_handlers[handler_count].vh_func;
841e0724c53SAlexey Zaytsev 	    handler_count++)
842e0724c53SAlexey Zaytsev 		;
843e0724c53SAlexey Zaytsev 
844e0724c53SAlexey Zaytsev 	/* +1 if there is a config change handler. */
8458a324c92SDan McDonald 	if (config_handler != NULL)
846e0724c53SAlexey Zaytsev 		handler_count++;
847e0724c53SAlexey Zaytsev 
848e0724c53SAlexey Zaytsev 	/* Number of MSIs supported by the device. */
849e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
850e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
851e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
852e0724c53SAlexey Zaytsev 		return (ret);
853e0724c53SAlexey Zaytsev 	}
854e0724c53SAlexey Zaytsev 
855e0724c53SAlexey Zaytsev 	/*
856e0724c53SAlexey Zaytsev 	 * Those who try to register more handlers then the device
857e0724c53SAlexey Zaytsev 	 * supports shall suffer.
858e0724c53SAlexey Zaytsev 	 */
859e0724c53SAlexey Zaytsev 	ASSERT(handler_count <= count);
860e0724c53SAlexey Zaytsev 
8618a324c92SDan McDonald 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
8628a324c92SDan McDonald 	    handler_count, KM_SLEEP);
863e0724c53SAlexey Zaytsev 
864e0724c53SAlexey Zaytsev 	ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
865e0724c53SAlexey Zaytsev 	    handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
866e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
867e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
868e0724c53SAlexey Zaytsev 		goto out_msi_alloc;
869e0724c53SAlexey Zaytsev 	}
870e0724c53SAlexey Zaytsev 
871e0724c53SAlexey Zaytsev 	if (actual != handler_count) {
872e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
873e0724c53SAlexey Zaytsev 		    "Not enough MSI available: need %d, available %d",
874e0724c53SAlexey Zaytsev 		    handler_count, actual);
875e0724c53SAlexey Zaytsev 		goto out_msi_available;
876e0724c53SAlexey Zaytsev 	}
877e0724c53SAlexey Zaytsev 
878e0724c53SAlexey Zaytsev 	sc->sc_intr_num = handler_count;
879e0724c53SAlexey Zaytsev 	sc->sc_intr_config = B_FALSE;
8808a324c92SDan McDonald 	if (config_handler != NULL) {
881e0724c53SAlexey Zaytsev 		sc->sc_intr_config = B_TRUE;
882e0724c53SAlexey Zaytsev 	}
883e0724c53SAlexey Zaytsev 
884e0724c53SAlexey Zaytsev 	/* Assume they are all same priority */
885e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
886e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
887e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
888e0724c53SAlexey Zaytsev 		goto out_msi_prio;
889e0724c53SAlexey Zaytsev 	}
890e0724c53SAlexey Zaytsev 
891e0724c53SAlexey Zaytsev 	/* Add the vq handlers */
892e0724c53SAlexey Zaytsev 	for (i = 0; vq_handlers[i].vh_func; i++) {
893e0724c53SAlexey Zaytsev 		ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
8948a324c92SDan McDonald 		    vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
895e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
896e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
897e0724c53SAlexey Zaytsev 			    "ddi_intr_add_handler failed");
898e0724c53SAlexey Zaytsev 			/* Remove the handlers that succeeded. */
899e0724c53SAlexey Zaytsev 			while (--i >= 0) {
900e0724c53SAlexey Zaytsev 				(void) ddi_intr_remove_handler(
901e0724c53SAlexey Zaytsev 				    sc->sc_intr_htable[i]);
902e0724c53SAlexey Zaytsev 			}
903e0724c53SAlexey Zaytsev 			goto out_add_handlers;
904e0724c53SAlexey Zaytsev 		}
905e0724c53SAlexey Zaytsev 	}
906e0724c53SAlexey Zaytsev 
907e0724c53SAlexey Zaytsev 	/* Don't forget the config handler */
9088a324c92SDan McDonald 	if (config_handler != NULL) {
909e0724c53SAlexey Zaytsev 		ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
9108a324c92SDan McDonald 		    config_handler->vh_func, sc, config_handler->vh_priv);
911e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
912e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
913e0724c53SAlexey Zaytsev 			    "ddi_intr_add_handler failed");
914e0724c53SAlexey Zaytsev 			/* Remove the handlers that succeeded. */
915e0724c53SAlexey Zaytsev 			while (--i >= 0) {
916e0724c53SAlexey Zaytsev 				(void) ddi_intr_remove_handler(
917e0724c53SAlexey Zaytsev 				    sc->sc_intr_htable[i]);
918e0724c53SAlexey Zaytsev 			}
919e0724c53SAlexey Zaytsev 			goto out_add_handlers;
920e0724c53SAlexey Zaytsev 		}
921e0724c53SAlexey Zaytsev 	}
922e0724c53SAlexey Zaytsev 
9238a324c92SDan McDonald 	ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
924*17ad7f9fSAndriy Gapon 	if (ret == DDI_SUCCESS) {
925*17ad7f9fSAndriy Gapon 		sc->sc_int_type = int_type;
926*17ad7f9fSAndriy Gapon 		return (DDI_SUCCESS);
927*17ad7f9fSAndriy Gapon 	}
928e0724c53SAlexey Zaytsev 
929e0724c53SAlexey Zaytsev out_add_handlers:
930e0724c53SAlexey Zaytsev out_msi_prio:
931e0724c53SAlexey Zaytsev out_msi_available:
932e0724c53SAlexey Zaytsev 	for (i = 0; i < actual; i++)
933e0724c53SAlexey Zaytsev 		(void) ddi_intr_free(sc->sc_intr_htable[i]);
934e0724c53SAlexey Zaytsev out_msi_alloc:
935*17ad7f9fSAndriy Gapon 	kmem_free(sc->sc_intr_htable,
936*17ad7f9fSAndriy Gapon 	    sizeof (ddi_intr_handle_t) * handler_count);
937e0724c53SAlexey Zaytsev 
938e0724c53SAlexey Zaytsev 	return (ret);
939e0724c53SAlexey Zaytsev }
940e0724c53SAlexey Zaytsev 
941e0724c53SAlexey Zaytsev struct virtio_handler_container {
942e0724c53SAlexey Zaytsev 	int nhandlers;
943e0724c53SAlexey Zaytsev 	struct virtio_int_handler config_handler;
944e0724c53SAlexey Zaytsev 	struct virtio_int_handler vq_handlers[];
945e0724c53SAlexey Zaytsev };
946e0724c53SAlexey Zaytsev 
947e0724c53SAlexey Zaytsev uint_t
virtio_intx_dispatch(caddr_t arg1,caddr_t arg2)948e0724c53SAlexey Zaytsev virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
949e0724c53SAlexey Zaytsev {
950e0724c53SAlexey Zaytsev 	struct virtio_softc *sc = (void *)arg1;
951e0724c53SAlexey Zaytsev 	struct virtio_handler_container *vhc = (void *)arg2;
952e0724c53SAlexey Zaytsev 	uint8_t isr_status;
953e0724c53SAlexey Zaytsev 	int i;
954e0724c53SAlexey Zaytsev 
955e0724c53SAlexey Zaytsev 	isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
956e0724c53SAlexey Zaytsev 	    VIRTIO_CONFIG_ISR_STATUS));
957e0724c53SAlexey Zaytsev 
958e0724c53SAlexey Zaytsev 	if (!isr_status)
959e0724c53SAlexey Zaytsev 		return (DDI_INTR_UNCLAIMED);
960e0724c53SAlexey Zaytsev 
961e0724c53SAlexey Zaytsev 	if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
962e0724c53SAlexey Zaytsev 	    vhc->config_handler.vh_func) {
963e0724c53SAlexey Zaytsev 		vhc->config_handler.vh_func((void *)sc,
964e0724c53SAlexey Zaytsev 		    vhc->config_handler.vh_priv);
965e0724c53SAlexey Zaytsev 	}
966e0724c53SAlexey Zaytsev 
967e0724c53SAlexey Zaytsev 	/* Notify all handlers */
968e0724c53SAlexey Zaytsev 	for (i = 0; i < vhc->nhandlers; i++) {
969e0724c53SAlexey Zaytsev 		vhc->vq_handlers[i].vh_func((void *)sc,
970e0724c53SAlexey Zaytsev 		    vhc->vq_handlers[i].vh_priv);
971e0724c53SAlexey Zaytsev 	}
972e0724c53SAlexey Zaytsev 
973e0724c53SAlexey Zaytsev 	return (DDI_INTR_CLAIMED);
974e0724c53SAlexey Zaytsev }
975e0724c53SAlexey Zaytsev 
976e0724c53SAlexey Zaytsev /*
977e0724c53SAlexey Zaytsev  * config_handler and vq_handlers may be allocated on stack.
978e0724c53SAlexey Zaytsev  * Take precautions not to loose them.
979e0724c53SAlexey Zaytsev  */
980e0724c53SAlexey Zaytsev static int
virtio_register_intx(struct virtio_softc * sc,struct virtio_int_handler * config_handler,struct virtio_int_handler vq_handlers[])981e0724c53SAlexey Zaytsev virtio_register_intx(struct virtio_softc *sc,
982e0724c53SAlexey Zaytsev     struct virtio_int_handler *config_handler,
983e0724c53SAlexey Zaytsev     struct virtio_int_handler vq_handlers[])
984e0724c53SAlexey Zaytsev {
985e0724c53SAlexey Zaytsev 	int vq_handler_count;
986e0724c53SAlexey Zaytsev 	int config_handler_count = 0;
987e0724c53SAlexey Zaytsev 	int actual;
988e0724c53SAlexey Zaytsev 	struct virtio_handler_container *vhc;
989e0724c53SAlexey Zaytsev 	int ret = DDI_FAILURE;
990e0724c53SAlexey Zaytsev 
991e0724c53SAlexey Zaytsev 	/* Walk the handler table to get the number of handlers. */
992e0724c53SAlexey Zaytsev 	for (vq_handler_count = 0;
993e0724c53SAlexey Zaytsev 	    vq_handlers && vq_handlers[vq_handler_count].vh_func;
994e0724c53SAlexey Zaytsev 	    vq_handler_count++)
995e0724c53SAlexey Zaytsev 		;
996e0724c53SAlexey Zaytsev 
9978a324c92SDan McDonald 	if (config_handler != NULL)
998e0724c53SAlexey Zaytsev 		config_handler_count = 1;
999e0724c53SAlexey Zaytsev 
1000e0724c53SAlexey Zaytsev 	vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
10018a324c92SDan McDonald 	    sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);
1002e0724c53SAlexey Zaytsev 
1003e0724c53SAlexey Zaytsev 	vhc->nhandlers = vq_handler_count;
1004e0724c53SAlexey Zaytsev 	(void) memcpy(vhc->vq_handlers, vq_handlers,
1005e0724c53SAlexey Zaytsev 	    sizeof (struct virtio_int_handler) * vq_handler_count);
1006e0724c53SAlexey Zaytsev 
10078a324c92SDan McDonald 	if (config_handler != NULL) {
1008e0724c53SAlexey Zaytsev 		(void) memcpy(&vhc->config_handler, config_handler,
1009e0724c53SAlexey Zaytsev 		    sizeof (struct virtio_int_handler));
1010e0724c53SAlexey Zaytsev 	}
1011e0724c53SAlexey Zaytsev 
1012e0724c53SAlexey Zaytsev 	/* Just a single entry for a single interrupt. */
1013e0724c53SAlexey Zaytsev 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1014e0724c53SAlexey Zaytsev 
1015e0724c53SAlexey Zaytsev 	ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
10168a324c92SDan McDonald 	    DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1017e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1018e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
1019e0724c53SAlexey Zaytsev 		    "Failed to allocate a fixed interrupt: %d", ret);
1020e0724c53SAlexey Zaytsev 		goto out_int_alloc;
1021e0724c53SAlexey Zaytsev 	}
1022e0724c53SAlexey Zaytsev 
1023e0724c53SAlexey Zaytsev 	ASSERT(actual == 1);
1024e0724c53SAlexey Zaytsev 	sc->sc_intr_num = 1;
1025e0724c53SAlexey Zaytsev 
1026e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1027e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1028e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1029e0724c53SAlexey Zaytsev 		goto out_prio;
1030e0724c53SAlexey Zaytsev 	}
1031e0724c53SAlexey Zaytsev 
1032e0724c53SAlexey Zaytsev 	ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1033e0724c53SAlexey Zaytsev 	    virtio_intx_dispatch, sc, vhc);
1034e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1035e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1036e0724c53SAlexey Zaytsev 		goto out_add_handlers;
1037e0724c53SAlexey Zaytsev 	}
1038e0724c53SAlexey Zaytsev 
1039*17ad7f9fSAndriy Gapon 	sc->sc_int_type = DDI_INTR_TYPE_FIXED;
1040e0724c53SAlexey Zaytsev 
1041e0724c53SAlexey Zaytsev 	return (DDI_SUCCESS);
1042e0724c53SAlexey Zaytsev 
1043e0724c53SAlexey Zaytsev out_add_handlers:
1044e0724c53SAlexey Zaytsev out_prio:
1045e0724c53SAlexey Zaytsev 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
1046e0724c53SAlexey Zaytsev out_int_alloc:
1047e0724c53SAlexey Zaytsev 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1048e0724c53SAlexey Zaytsev 	kmem_free(vhc, sizeof (struct virtio_int_handler) *
1049e0724c53SAlexey Zaytsev 	    (vq_handler_count + config_handler_count));
1050e0724c53SAlexey Zaytsev 	return (ret);
1051e0724c53SAlexey Zaytsev }
1052e0724c53SAlexey Zaytsev 
1053e0724c53SAlexey Zaytsev /*
1054e0724c53SAlexey Zaytsev  * We find out if we support MSI during this, and the register layout
1055e0724c53SAlexey Zaytsev  * depends on the MSI (doh). Don't acces the device specific bits in
1056e0724c53SAlexey Zaytsev  * BAR 0 before calling it!
1057e0724c53SAlexey Zaytsev  */
1058e0724c53SAlexey Zaytsev int
virtio_register_ints(struct virtio_softc * sc,struct virtio_int_handler * config_handler,struct virtio_int_handler vq_handlers[])1059e0724c53SAlexey Zaytsev virtio_register_ints(struct virtio_softc *sc,
1060e0724c53SAlexey Zaytsev     struct virtio_int_handler *config_handler,
1061e0724c53SAlexey Zaytsev     struct virtio_int_handler vq_handlers[])
1062e0724c53SAlexey Zaytsev {
1063e0724c53SAlexey Zaytsev 	int ret;
1064e0724c53SAlexey Zaytsev 	int intr_types;
1065e0724c53SAlexey Zaytsev 
1066*17ad7f9fSAndriy Gapon 	/* Default offset until MSI-X is enabled, if ever. */
1067*17ad7f9fSAndriy Gapon 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1068*17ad7f9fSAndriy Gapon 
1069e0724c53SAlexey Zaytsev 	/* Determine which types of interrupts are supported */
1070e0724c53SAlexey Zaytsev 	ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1071e0724c53SAlexey Zaytsev 	if (ret != DDI_SUCCESS) {
1072e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1073e0724c53SAlexey Zaytsev 		goto out_inttype;
1074e0724c53SAlexey Zaytsev 	}
1075e0724c53SAlexey Zaytsev 
1076e0724c53SAlexey Zaytsev 	/* If we have msi, let's use them. */
1077e0724c53SAlexey Zaytsev 	if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1078e0724c53SAlexey Zaytsev 		ret = virtio_register_msi(sc, config_handler,
1079e0724c53SAlexey Zaytsev 		    vq_handlers, intr_types);
1080e0724c53SAlexey Zaytsev 		if (!ret)
1081e0724c53SAlexey Zaytsev 			return (0);
1082e0724c53SAlexey Zaytsev 	}
1083e0724c53SAlexey Zaytsev 
1084e0724c53SAlexey Zaytsev 	/* Fall back to old-fashioned interrupts. */
1085e0724c53SAlexey Zaytsev 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1086e0724c53SAlexey Zaytsev 		dev_debug(sc->sc_dev, CE_WARN,
1087e0724c53SAlexey Zaytsev 		    "Using legacy interrupts");
1088e0724c53SAlexey Zaytsev 
1089e0724c53SAlexey Zaytsev 		return (virtio_register_intx(sc, config_handler, vq_handlers));
1090e0724c53SAlexey Zaytsev 	}
1091e0724c53SAlexey Zaytsev 
1092e0724c53SAlexey Zaytsev 	dev_err(sc->sc_dev, CE_WARN,
1093e0724c53SAlexey Zaytsev 	    "MSI failed and fixed interrupts not supported. Giving up.");
1094e0724c53SAlexey Zaytsev 	ret = DDI_FAILURE;
1095e0724c53SAlexey Zaytsev 
1096e0724c53SAlexey Zaytsev out_inttype:
1097e0724c53SAlexey Zaytsev 	return (ret);
1098e0724c53SAlexey Zaytsev }
1099e0724c53SAlexey Zaytsev 
1100e0724c53SAlexey Zaytsev static int
virtio_enable_msi(struct virtio_softc * sc)1101e0724c53SAlexey Zaytsev virtio_enable_msi(struct virtio_softc *sc)
1102e0724c53SAlexey Zaytsev {
1103e0724c53SAlexey Zaytsev 	int ret, i;
1104e0724c53SAlexey Zaytsev 	int vq_handler_count = sc->sc_intr_num;
1105e0724c53SAlexey Zaytsev 
1106e0724c53SAlexey Zaytsev 	/* Number of handlers, not counting the counfig. */
1107e0724c53SAlexey Zaytsev 	if (sc->sc_intr_config)
1108e0724c53SAlexey Zaytsev 		vq_handler_count--;
1109e0724c53SAlexey Zaytsev 
1110*17ad7f9fSAndriy Gapon 	/* Enable the interrupts. Either the whole block, or one by one. */
1111e0724c53SAlexey Zaytsev 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1112e0724c53SAlexey Zaytsev 		ret = ddi_intr_block_enable(sc->sc_intr_htable,
1113e0724c53SAlexey Zaytsev 		    sc->sc_intr_num);
1114e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
1115e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
1116e0724c53SAlexey Zaytsev 			    "Failed to enable MSI, falling back to INTx");
1117e0724c53SAlexey Zaytsev 			goto out_enable;
1118e0724c53SAlexey Zaytsev 		}
1119e0724c53SAlexey Zaytsev 	} else {
1120e0724c53SAlexey Zaytsev 		for (i = 0; i < sc->sc_intr_num; i++) {
1121e0724c53SAlexey Zaytsev 			ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1122e0724c53SAlexey Zaytsev 			if (ret != DDI_SUCCESS) {
1123e0724c53SAlexey Zaytsev 				dev_err(sc->sc_dev, CE_WARN,
1124e0724c53SAlexey Zaytsev 				    "Failed to enable MSI %d, "
1125e0724c53SAlexey Zaytsev 				    "falling back to INTx", i);
1126e0724c53SAlexey Zaytsev 
1127e0724c53SAlexey Zaytsev 				while (--i >= 0) {
1128e0724c53SAlexey Zaytsev 					(void) ddi_intr_disable(
1129e0724c53SAlexey Zaytsev 					    sc->sc_intr_htable[i]);
1130e0724c53SAlexey Zaytsev 				}
1131e0724c53SAlexey Zaytsev 				goto out_enable;
1132e0724c53SAlexey Zaytsev 			}
1133e0724c53SAlexey Zaytsev 		}
1134e0724c53SAlexey Zaytsev 	}
1135e0724c53SAlexey Zaytsev 
1136e0724c53SAlexey Zaytsev 	/* Bind the allocated MSI to the queues and config */
1137e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_handler_count; i++) {
1138e0724c53SAlexey Zaytsev 		int check;
11398a324c92SDan McDonald 
1140e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1141e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1142e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1143e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_SELECT), i);
1144e0724c53SAlexey Zaytsev 
1145e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1146e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1147e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1148e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_VECTOR), i);
1149e0724c53SAlexey Zaytsev 
1150e0724c53SAlexey Zaytsev 		check = ddi_get16(sc->sc_ioh,
1151e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1152e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1153e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_VECTOR));
1154e0724c53SAlexey Zaytsev 		if (check != i) {
1155e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1156e0724c53SAlexey Zaytsev 			    "for VQ %d, MSI %d. Check = %x", i, i, check);
1157e0724c53SAlexey Zaytsev 			ret = ENODEV;
1158e0724c53SAlexey Zaytsev 			goto out_bind;
1159e0724c53SAlexey Zaytsev 		}
1160e0724c53SAlexey Zaytsev 	}
1161e0724c53SAlexey Zaytsev 
1162e0724c53SAlexey Zaytsev 	if (sc->sc_intr_config) {
1163e0724c53SAlexey Zaytsev 		int check;
11648a324c92SDan McDonald 
1165e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1166e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1167e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1168e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_CONFIG_VECTOR), i);
1169e0724c53SAlexey Zaytsev 
1170e0724c53SAlexey Zaytsev 		check = ddi_get16(sc->sc_ioh,
1171e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1172e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1173e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_CONFIG_VECTOR));
1174e0724c53SAlexey Zaytsev 		if (check != i) {
1175e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1176e0724c53SAlexey Zaytsev 			    "for Config updates, MSI %d", i);
1177e0724c53SAlexey Zaytsev 			ret = ENODEV;
1178e0724c53SAlexey Zaytsev 			goto out_bind;
1179e0724c53SAlexey Zaytsev 		}
1180e0724c53SAlexey Zaytsev 	}
1181e0724c53SAlexey Zaytsev 
1182*17ad7f9fSAndriy Gapon 	/* Configuration offset depends on whether MSI-X is used. */
1183*17ad7f9fSAndriy Gapon 	if (sc->sc_int_type == DDI_INTR_TYPE_MSIX)
1184*17ad7f9fSAndriy Gapon 		sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSIX;
1185*17ad7f9fSAndriy Gapon 	else
1186*17ad7f9fSAndriy Gapon 		ASSERT(sc->sc_int_type == DDI_INTR_TYPE_MSI);
1187*17ad7f9fSAndriy Gapon 
1188e0724c53SAlexey Zaytsev 	return (DDI_SUCCESS);
1189e0724c53SAlexey Zaytsev 
1190e0724c53SAlexey Zaytsev out_bind:
1191e0724c53SAlexey Zaytsev 	/* Unbind the vqs */
1192e0724c53SAlexey Zaytsev 	for (i = 0; i < vq_handler_count - 1; i++) {
1193e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1194e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1195e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1196e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_SELECT), i);
1197e0724c53SAlexey Zaytsev 
1198e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh,
1199e0724c53SAlexey Zaytsev 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1200e0724c53SAlexey Zaytsev 		    (uint16_t *)(sc->sc_io_addr +
1201e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_QUEUE_VECTOR),
1202e0724c53SAlexey Zaytsev 		    VIRTIO_MSI_NO_VECTOR);
1203e0724c53SAlexey Zaytsev 	}
1204e0724c53SAlexey Zaytsev 	/* And the config */
1205e0724c53SAlexey Zaytsev 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1206e0724c53SAlexey Zaytsev 	ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1207e0724c53SAlexey Zaytsev 	    VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1208e0724c53SAlexey Zaytsev 
1209*17ad7f9fSAndriy Gapon 	/* Disable the interrupts. Either the whole block, or one by one. */
1210*17ad7f9fSAndriy Gapon 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1211*17ad7f9fSAndriy Gapon 		ret = ddi_intr_block_disable(sc->sc_intr_htable,
1212*17ad7f9fSAndriy Gapon 		    sc->sc_intr_num);
1213*17ad7f9fSAndriy Gapon 		if (ret != DDI_SUCCESS) {
1214*17ad7f9fSAndriy Gapon 			dev_err(sc->sc_dev, CE_WARN,
1215*17ad7f9fSAndriy Gapon 			    "Failed to disable MSIs, won't be able to "
1216*17ad7f9fSAndriy Gapon 			    "reuse next time");
1217*17ad7f9fSAndriy Gapon 		}
1218*17ad7f9fSAndriy Gapon 	} else {
1219*17ad7f9fSAndriy Gapon 		for (i = 0; i < sc->sc_intr_num; i++) {
1220*17ad7f9fSAndriy Gapon 			ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1221*17ad7f9fSAndriy Gapon 			if (ret != DDI_SUCCESS) {
1222*17ad7f9fSAndriy Gapon 				dev_err(sc->sc_dev, CE_WARN,
1223*17ad7f9fSAndriy Gapon 				    "Failed to disable interrupt %d, "
1224*17ad7f9fSAndriy Gapon 				    "won't be able to reuse", i);
1225*17ad7f9fSAndriy Gapon 			}
1226*17ad7f9fSAndriy Gapon 		}
1227*17ad7f9fSAndriy Gapon 	}
1228*17ad7f9fSAndriy Gapon 
1229e0724c53SAlexey Zaytsev 	ret = DDI_FAILURE;
1230e0724c53SAlexey Zaytsev 
1231e0724c53SAlexey Zaytsev out_enable:
1232e0724c53SAlexey Zaytsev 	return (ret);
1233e0724c53SAlexey Zaytsev }
1234e0724c53SAlexey Zaytsev 
12358a324c92SDan McDonald static int
virtio_enable_intx(struct virtio_softc * sc)12368a324c92SDan McDonald virtio_enable_intx(struct virtio_softc *sc)
1237e0724c53SAlexey Zaytsev {
1238e0724c53SAlexey Zaytsev 	int ret;
1239e0724c53SAlexey Zaytsev 
1240e0724c53SAlexey Zaytsev 	ret = ddi_intr_enable(sc->sc_intr_htable[0]);
12418a324c92SDan McDonald 	if (ret != DDI_SUCCESS) {
1242e0724c53SAlexey Zaytsev 		dev_err(sc->sc_dev, CE_WARN,
1243e0724c53SAlexey Zaytsev 		    "Failed to enable interrupt: %d", ret);
12448a324c92SDan McDonald 	}
12458a324c92SDan McDonald 
1246e0724c53SAlexey Zaytsev 	return (ret);
1247e0724c53SAlexey Zaytsev }
1248e0724c53SAlexey Zaytsev 
1249e0724c53SAlexey Zaytsev /*
1250e0724c53SAlexey Zaytsev  * We can't enable/disable individual handlers in the INTx case so do
1251e0724c53SAlexey Zaytsev  * the whole bunch even in the msi case.
1252e0724c53SAlexey Zaytsev  */
1253e0724c53SAlexey Zaytsev int
virtio_enable_ints(struct virtio_softc * sc)1254e0724c53SAlexey Zaytsev virtio_enable_ints(struct virtio_softc *sc)
1255e0724c53SAlexey Zaytsev {
1256e0724c53SAlexey Zaytsev 
1257*17ad7f9fSAndriy Gapon 	ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX);
1258*17ad7f9fSAndriy Gapon 
1259e0724c53SAlexey Zaytsev 	/* See if we are using MSI. */
1260*17ad7f9fSAndriy Gapon 	if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1261*17ad7f9fSAndriy Gapon 	    sc->sc_int_type == DDI_INTR_TYPE_MSI)
1262e0724c53SAlexey Zaytsev 		return (virtio_enable_msi(sc));
1263e0724c53SAlexey Zaytsev 
1264*17ad7f9fSAndriy Gapon 	ASSERT(sc->sc_int_type == DDI_INTR_TYPE_FIXED);
1265e0724c53SAlexey Zaytsev 	return (virtio_enable_intx(sc));
1266e0724c53SAlexey Zaytsev }
1267e0724c53SAlexey Zaytsev 
1268e0724c53SAlexey Zaytsev void
virtio_release_ints(struct virtio_softc * sc)1269e0724c53SAlexey Zaytsev virtio_release_ints(struct virtio_softc *sc)
1270e0724c53SAlexey Zaytsev {
1271e0724c53SAlexey Zaytsev 	int i;
1272e0724c53SAlexey Zaytsev 	int ret;
1273e0724c53SAlexey Zaytsev 
1274e0724c53SAlexey Zaytsev 	/* We were running with MSI, unbind them. */
1275*17ad7f9fSAndriy Gapon 	if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1276*17ad7f9fSAndriy Gapon 	    sc->sc_int_type == DDI_INTR_TYPE_MSI) {
1277e0724c53SAlexey Zaytsev 		/* Unbind all vqs */
1278e0724c53SAlexey Zaytsev 		for (i = 0; i < sc->sc_nvqs; i++) {
1279e0724c53SAlexey Zaytsev 			ddi_put16(sc->sc_ioh,
1280e0724c53SAlexey Zaytsev 			    /* LINTED E_BAD_PTR_CAST_ALIGN */
1281e0724c53SAlexey Zaytsev 			    (uint16_t *)(sc->sc_io_addr +
1282e0724c53SAlexey Zaytsev 			    VIRTIO_CONFIG_QUEUE_SELECT), i);
1283e0724c53SAlexey Zaytsev 
1284e0724c53SAlexey Zaytsev 			ddi_put16(sc->sc_ioh,
1285e0724c53SAlexey Zaytsev 			    /* LINTED E_BAD_PTR_CAST_ALIGN */
1286e0724c53SAlexey Zaytsev 			    (uint16_t *)(sc->sc_io_addr +
1287e0724c53SAlexey Zaytsev 			    VIRTIO_CONFIG_QUEUE_VECTOR),
1288e0724c53SAlexey Zaytsev 			    VIRTIO_MSI_NO_VECTOR);
1289e0724c53SAlexey Zaytsev 		}
1290e0724c53SAlexey Zaytsev 		/* And the config */
1291e0724c53SAlexey Zaytsev 		/* LINTED E_BAD_PTR_CAST_ALIGN */
1292e0724c53SAlexey Zaytsev 		ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1293e0724c53SAlexey Zaytsev 		    VIRTIO_CONFIG_CONFIG_VECTOR),
1294e0724c53SAlexey Zaytsev 		    VIRTIO_MSI_NO_VECTOR);
1295e0724c53SAlexey Zaytsev 
1296e0724c53SAlexey Zaytsev 	}
1297e0724c53SAlexey Zaytsev 
1298*17ad7f9fSAndriy Gapon 	/* Disable the interrupts. Either the whole block, or one by one. */
1299e0724c53SAlexey Zaytsev 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1300e0724c53SAlexey Zaytsev 		ret = ddi_intr_block_disable(sc->sc_intr_htable,
1301e0724c53SAlexey Zaytsev 		    sc->sc_intr_num);
1302e0724c53SAlexey Zaytsev 		if (ret != DDI_SUCCESS) {
1303e0724c53SAlexey Zaytsev 			dev_err(sc->sc_dev, CE_WARN,
1304e0724c53SAlexey Zaytsev 			    "Failed to disable MSIs, won't be able to "
1305e0724c53SAlexey Zaytsev 			    "reuse next time");
1306e0724c53SAlexey Zaytsev 		}
1307e0724c53SAlexey Zaytsev 	} else {
1308e0724c53SAlexey Zaytsev 		for (i = 0; i < sc->sc_intr_num; i++) {
1309e0724c53SAlexey Zaytsev 			ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1310e0724c53SAlexey Zaytsev 			if (ret != DDI_SUCCESS) {
1311e0724c53SAlexey Zaytsev 				dev_err(sc->sc_dev, CE_WARN,
1312e0724c53SAlexey Zaytsev 				    "Failed to disable interrupt %d, "
1313e0724c53SAlexey Zaytsev 				    "won't be able to reuse", i);
1314e0724c53SAlexey Zaytsev 			}
1315e0724c53SAlexey Zaytsev 		}
1316e0724c53SAlexey Zaytsev 	}
1317e0724c53SAlexey Zaytsev 
1318e0724c53SAlexey Zaytsev 
1319e0724c53SAlexey Zaytsev 	for (i = 0; i < sc->sc_intr_num; i++) {
1320e0724c53SAlexey Zaytsev 		(void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1321e0724c53SAlexey Zaytsev 	}
1322e0724c53SAlexey Zaytsev 
1323e0724c53SAlexey Zaytsev 	for (i = 0; i < sc->sc_intr_num; i++)
1324e0724c53SAlexey Zaytsev 		(void) ddi_intr_free(sc->sc_intr_htable[i]);
1325e0724c53SAlexey Zaytsev 
13268a324c92SDan McDonald 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
13278a324c92SDan McDonald 	    sc->sc_intr_num);
1328e0724c53SAlexey Zaytsev 
1329*17ad7f9fSAndriy Gapon 	/* After disabling interrupts, the config offset is non-MSI-X. */
1330*17ad7f9fSAndriy Gapon 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1331e0724c53SAlexey Zaytsev }
1332e0724c53SAlexey Zaytsev 
1333e0724c53SAlexey Zaytsev /*
1334e0724c53SAlexey Zaytsev  * Module linkage information for the kernel.
1335e0724c53SAlexey Zaytsev  */
1336e0724c53SAlexey Zaytsev static struct modlmisc modlmisc = {
1337e0724c53SAlexey Zaytsev 	&mod_miscops,	/* Type of module */
1338e0724c53SAlexey Zaytsev 	"VirtIO common library module",
1339e0724c53SAlexey Zaytsev };
1340e0724c53SAlexey Zaytsev 
1341e0724c53SAlexey Zaytsev static struct modlinkage modlinkage = {
1342e0724c53SAlexey Zaytsev 	MODREV_1,
1343e0724c53SAlexey Zaytsev 	{
1344e0724c53SAlexey Zaytsev 		(void *)&modlmisc,
1345e0724c53SAlexey Zaytsev 		NULL
1346e0724c53SAlexey Zaytsev 	}
1347e0724c53SAlexey Zaytsev };
1348e0724c53SAlexey Zaytsev 
1349e0724c53SAlexey Zaytsev int
_init(void)1350e0724c53SAlexey Zaytsev _init(void)
1351e0724c53SAlexey Zaytsev {
1352e0724c53SAlexey Zaytsev 	return (mod_install(&modlinkage));
1353e0724c53SAlexey Zaytsev }
1354e0724c53SAlexey Zaytsev 
1355e0724c53SAlexey Zaytsev int
_fini(void)1356e0724c53SAlexey Zaytsev _fini(void)
1357e0724c53SAlexey Zaytsev {
1358e0724c53SAlexey Zaytsev 	return (mod_remove(&modlinkage));
1359e0724c53SAlexey Zaytsev }
1360e0724c53SAlexey Zaytsev 
1361e0724c53SAlexey Zaytsev int
_info(struct modinfo * modinfop)1362e0724c53SAlexey Zaytsev _info(struct modinfo *modinfop)
1363e0724c53SAlexey Zaytsev {
1364e0724c53SAlexey Zaytsev 	return (mod_info(&modlinkage, modinfop));
1365e0724c53SAlexey Zaytsev }
1366