xref: /titanic_52/usr/src/uts/common/io/virtio/virtio.c (revision 5566946ddee5d74cd7ce592465b954f7d90f62f7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
25  */
26 
27 /* Based on the NetBSD virtio driver by Minoura Makoto. */
28 /*
29  * Copyright (c) 2010 Minoura Makoto.
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  *
52  */
53 
54 #include <sys/conf.h>
55 #include <sys/kmem.h>
56 #include <sys/debug.h>
57 #include <sys/modctl.h>
58 #include <sys/autoconf.h>
59 #include <sys/ddi_impldefs.h>
60 #include <sys/ddi.h>
61 #include <sys/sunddi.h>
62 #include <sys/sunndi.h>
63 #include <sys/avintr.h>
64 #include <sys/spl.h>
65 #include <sys/promif.h>
66 #include <sys/list.h>
67 #include <sys/bootconf.h>
68 #include <sys/bootsvcs.h>
69 #include <sys/sysmacros.h>
70 #include <sys/pci.h>
71 
72 #include "virtiovar.h"
73 #include "virtioreg.h"
74 
75 #define	NDEVNAMES	(sizeof (virtio_device_name) / sizeof (char *))
76 #define	MINSEG_INDIRECT	2	/* use indirect if nsegs >= this value */
77 #define	VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
78 	    ~(VIRTIO_PAGE_SIZE-1))
79 
80 void
81 virtio_set_status(struct virtio_softc *sc, unsigned int status)
82 {
83 	int old = 0;
84 
85 	if (status != 0) {
86 		old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
87 		    VIRTIO_CONFIG_DEVICE_STATUS));
88 	}
89 
90 	ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
91 	    VIRTIO_CONFIG_DEVICE_STATUS), status | old);
92 }
93 
94 /*
95  * Negotiate features, save the result in sc->sc_features
96  */
97 uint32_t
98 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
99 {
100 	uint32_t host_features;
101 	uint32_t features;
102 
103 	host_features = ddi_get32(sc->sc_ioh,
104 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
105 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
106 
107 	dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
108 	    host_features, guest_features);
109 
110 	features = host_features & guest_features;
111 	ddi_put32(sc->sc_ioh,
112 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
113 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
114 	    features);
115 
116 	sc->sc_features = features;
117 
118 	return (host_features);
119 }
120 
121 size_t
122 virtio_show_features(uint32_t features, char *buf, size_t len)
123 {
124 	char *orig_buf = buf;
125 	char *bufend = buf + len;
126 
127 	/* LINTED E_PTRDIFF_OVERFLOW */
128 	buf += snprintf(buf, bufend - buf, "Generic ( ");
129 	if (features & VIRTIO_F_RING_INDIRECT_DESC)
130 		/* LINTED E_PTRDIFF_OVERFLOW */
131 		buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
132 
133 	/* LINTED E_PTRDIFF_OVERFLOW */
134 	buf += snprintf(buf, bufend - buf, ") ");
135 
136 	/* LINTED E_PTRDIFF_OVERFLOW */
137 	return (buf - orig_buf);
138 }
139 
140 boolean_t
141 virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
142 {
143 	return (sc->sc_features & feature);
144 }
145 
146 /*
147  * Device configuration registers.
148  */
149 uint8_t
150 virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
151 {
152 	ASSERT(sc->sc_config_offset);
153 	return ddi_get8(sc->sc_ioh,
154 	    (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
155 }
156 
157 uint16_t
158 virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
159 {
160 	ASSERT(sc->sc_config_offset);
161 	return ddi_get16(sc->sc_ioh,
162 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
163 	    (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
164 }
165 
166 uint32_t
167 virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
168 {
169 	ASSERT(sc->sc_config_offset);
170 	return ddi_get32(sc->sc_ioh,
171 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
172 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
173 }
174 
175 uint64_t
176 virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
177 {
178 	uint64_t r;
179 
180 	ASSERT(sc->sc_config_offset);
181 	r = ddi_get32(sc->sc_ioh,
182 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
183 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
184 	    index + sizeof (uint32_t)));
185 
186 	r <<= 32;
187 
188 	r += ddi_get32(sc->sc_ioh,
189 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
190 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
191 	return (r);
192 }
193 
194 void
195 virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
196     uint8_t value)
197 {
198 	ASSERT(sc->sc_config_offset);
199 	ddi_put8(sc->sc_ioh,
200 	    (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
201 }
202 
203 void
204 virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
205     uint16_t value)
206 {
207 	ASSERT(sc->sc_config_offset);
208 	ddi_put16(sc->sc_ioh,
209 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
210 	    (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
211 }
212 
213 void
214 virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
215     uint32_t value)
216 {
217 	ASSERT(sc->sc_config_offset);
218 	ddi_put32(sc->sc_ioh,
219 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
220 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
221 }
222 
223 void
224 virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
225     uint64_t value)
226 {
227 	ASSERT(sc->sc_config_offset);
228 	ddi_put32(sc->sc_ioh,
229 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
230 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
231 	    value & 0xFFFFFFFF);
232 	ddi_put32(sc->sc_ioh,
233 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
234 	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
235 	    index + sizeof (uint32_t)), value >> 32);
236 }
237 
238 /*
239  * Start/stop vq interrupt.  No guarantee.
240  */
241 void
242 virtio_stop_vq_intr(struct virtqueue *vq)
243 {
244 	vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
245 }
246 
247 void
248 virtio_start_vq_intr(struct virtqueue *vq)
249 {
250 	vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
251 }
252 
253 static ddi_dma_attr_t virtio_vq_dma_attr = {
254 	DMA_ATTR_V0,		/* Version number */
255 	0,			/* low address */
256 	0x00000FFFFFFFFFFF,	/* high address. Has to fit into 32 bits */
257 				/* after page-shifting */
258 	0xFFFFFFFF,		/* counter register max */
259 	VIRTIO_PAGE_SIZE,	/* page alignment required */
260 	0x3F,			/* burst sizes: 1 - 32 */
261 	0x1,			/* minimum transfer size */
262 	0xFFFFFFFF,		/* max transfer size */
263 	0xFFFFFFFF,		/* address register max */
264 	1,			/* no scatter-gather */
265 	1,			/* device operates on bytes */
266 	0,			/* attr flag: set to 0 */
267 };
268 
269 static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
270 	DMA_ATTR_V0,		/* Version number */
271 	0,			/* low address */
272 	0xFFFFFFFFFFFFFFFF,	/* high address */
273 	0xFFFFFFFF,		/* counter register max */
274 	1,			/* No specific alignment */
275 	0x3F,			/* burst sizes: 1 - 32 */
276 	0x1,			/* minimum transfer size */
277 	0xFFFFFFFF,		/* max transfer size */
278 	0xFFFFFFFF,		/* address register max */
279 	1,			/* no scatter-gather */
280 	1,			/* device operates on bytes */
281 	0,			/* attr flag: set to 0 */
282 };
283 
284 /* Same for direct and indirect descriptors. */
285 static ddi_device_acc_attr_t virtio_vq_devattr = {
286 	DDI_DEVICE_ATTR_V0,
287 	DDI_NEVERSWAP_ACC,
288 	DDI_STORECACHING_OK_ACC,
289 	DDI_DEFAULT_ACC
290 };
291 
292 static void
293 virtio_free_indirect(struct vq_entry *entry)
294 {
295 
296 	(void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
297 	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
298 	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
299 
300 	entry->qe_indirect_descs = NULL;
301 }
302 
303 
304 static int
305 virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
306 {
307 	int allocsize, num;
308 	size_t len;
309 	unsigned int ncookies;
310 	int ret;
311 
312 	num = entry->qe_queue->vq_indirect_num;
313 	ASSERT(num > 1);
314 
315 	allocsize = sizeof (struct vring_desc) * num;
316 
317 	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
318 	    DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
319 	if (ret != DDI_SUCCESS) {
320 		dev_err(sc->sc_dev, CE_WARN,
321 		    "Failed to allocate dma handle for indirect descriptors, "
322 		    "entry %d, vq %d", entry->qe_index,
323 		    entry->qe_queue->vq_index);
324 		goto out_alloc_handle;
325 	}
326 
327 	ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
328 	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
329 	    (caddr_t *)&entry->qe_indirect_descs, &len,
330 	    &entry->qe_indirect_dma_acch);
331 	if (ret != DDI_SUCCESS) {
332 		dev_err(sc->sc_dev, CE_WARN,
333 		    "Failed to allocate dma memory for indirect descriptors, "
334 		    "entry %d, vq %d,", entry->qe_index,
335 		    entry->qe_queue->vq_index);
336 		goto out_alloc;
337 	}
338 
339 	(void) memset(entry->qe_indirect_descs, 0xff, allocsize);
340 
341 	ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
342 	    (caddr_t)entry->qe_indirect_descs, len,
343 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
344 	    &entry->qe_indirect_dma_cookie, &ncookies);
345 	if (ret != DDI_DMA_MAPPED) {
346 		dev_err(sc->sc_dev, CE_WARN,
347 		    "Failed to bind dma memory for indirect descriptors, "
348 		    "entry %d, vq %d", entry->qe_index,
349 		    entry->qe_queue->vq_index);
350 		goto out_bind;
351 	}
352 
353 	/* We asked for a single segment */
354 	ASSERT(ncookies == 1);
355 
356 	return (0);
357 
358 out_bind:
359 	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
360 out_alloc:
361 	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
362 out_alloc_handle:
363 
364 	return (ret);
365 }
366 
367 /*
368  * Initialize the vq structure.
369  */
370 static int
371 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
372 {
373 	int ret;
374 	uint16_t i;
375 	int vq_size = vq->vq_num;
376 	int indirect_num = vq->vq_indirect_num;
377 
378 	/* free slot management */
379 	list_create(&vq->vq_freelist, sizeof (struct vq_entry),
380 	    offsetof(struct vq_entry, qe_list));
381 
382 	for (i = 0; i < vq_size; i++) {
383 		struct vq_entry *entry = &vq->vq_entries[i];
384 		list_insert_tail(&vq->vq_freelist, entry);
385 		entry->qe_index = i;
386 		entry->qe_desc = &vq->vq_descs[i];
387 		entry->qe_queue = vq;
388 
389 		if (indirect_num) {
390 			ret = virtio_alloc_indirect(sc, entry);
391 			if (ret)
392 				goto out_indirect;
393 		}
394 	}
395 
396 	mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
397 	    DDI_INTR_PRI(sc->sc_intr_prio));
398 	mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
399 	    DDI_INTR_PRI(sc->sc_intr_prio));
400 	mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
401 	    DDI_INTR_PRI(sc->sc_intr_prio));
402 
403 	return (0);
404 
405 out_indirect:
406 	for (i = 0; i < vq_size; i++) {
407 		struct vq_entry *entry = &vq->vq_entries[i];
408 		if (entry->qe_indirect_descs)
409 			virtio_free_indirect(entry);
410 	}
411 
412 	return (ret);
413 }
414 
415 /*
416  * Allocate/free a vq.
417  */
418 struct virtqueue *
419 virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
420     unsigned int indirect_num, const char *name)
421 {
422 	int vq_size, allocsize1, allocsize2, allocsize = 0;
423 	int ret;
424 	unsigned int ncookies;
425 	size_t len;
426 	struct virtqueue *vq;
427 
428 	ddi_put16(sc->sc_ioh,
429 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
430 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
431 	vq_size = ddi_get16(sc->sc_ioh,
432 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
433 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
434 	if (vq_size == 0) {
435 		dev_err(sc->sc_dev, CE_WARN,
436 		    "virtqueue dest not exist, index %d for %s\n", index, name);
437 		goto out;
438 	}
439 
440 	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
441 
442 	/* size 0 => use native vq size, good for receive queues. */
443 	if (size)
444 		vq_size = MIN(vq_size, size);
445 
446 	/* allocsize1: descriptor table + avail ring + pad */
447 	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
448 	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
449 	/* allocsize2: used ring + pad */
450 	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
451 	    sizeof (struct vring_used_elem) * vq_size);
452 
453 	allocsize = allocsize1 + allocsize2;
454 
455 	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
456 	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
457 	if (ret != DDI_SUCCESS) {
458 		dev_err(sc->sc_dev, CE_WARN,
459 		    "Failed to allocate dma handle for vq %d", index);
460 		goto out_alloc_handle;
461 	}
462 
463 	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
464 	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
465 	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
466 	if (ret != DDI_SUCCESS) {
467 		dev_err(sc->sc_dev, CE_WARN,
468 		    "Failed to allocate dma memory for vq %d", index);
469 		goto out_alloc;
470 	}
471 
472 	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
473 	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
474 	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
475 	if (ret != DDI_DMA_MAPPED) {
476 		dev_err(sc->sc_dev, CE_WARN,
477 		    "Failed to bind dma memory for vq %d", index);
478 		goto out_bind;
479 	}
480 
481 	/* We asked for a single segment */
482 	ASSERT(ncookies == 1);
483 	/* and page-ligned buffers. */
484 	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
485 
486 	(void) memset(vq->vq_vaddr, 0, allocsize);
487 
488 	/* Make sure all zeros hit the buffer before we point the host to it */
489 	membar_producer();
490 
491 	/* set the vq address */
492 	ddi_put32(sc->sc_ioh,
493 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
494 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
495 	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
496 
497 	/* remember addresses and offsets for later use */
498 	vq->vq_owner = sc;
499 	vq->vq_num = vq_size;
500 	vq->vq_index = index;
501 	vq->vq_descs = vq->vq_vaddr;
502 	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
503 	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
504 	vq->vq_usedoffset = allocsize1;
505 	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
506 
507 	ASSERT(indirect_num == 0 ||
508 	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
509 	vq->vq_indirect_num = indirect_num;
510 
511 	/* free slot management */
512 	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
513 	    KM_SLEEP);
514 
515 	ret = virtio_init_vq(sc, vq);
516 	if (ret)
517 		goto out_init;
518 
519 	dev_debug(sc->sc_dev, CE_NOTE,
520 	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
521 	    vq_size, index, name, indirect_num * vq_size);
522 
523 	return (vq);
524 
525 out_init:
526 	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
527 	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
528 out_bind:
529 	ddi_dma_mem_free(&vq->vq_dma_acch);
530 out_alloc:
531 	ddi_dma_free_handle(&vq->vq_dma_handle);
532 out_alloc_handle:
533 	kmem_free(vq, sizeof (struct virtqueue));
534 out:
535 	return (NULL);
536 }
537 
538 void
539 virtio_free_vq(struct virtqueue *vq)
540 {
541 	struct virtio_softc *sc = vq->vq_owner;
542 	int i;
543 
544 	/* tell device that there's no virtqueue any longer */
545 	ddi_put16(sc->sc_ioh,
546 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
547 	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
548 	    vq->vq_index);
549 	ddi_put32(sc->sc_ioh,
550 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
551 	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
552 
553 	/* Free the indirect descriptors, if any. */
554 	for (i = 0; i < vq->vq_num; i++) {
555 		struct vq_entry *entry = &vq->vq_entries[i];
556 		if (entry->qe_indirect_descs)
557 			virtio_free_indirect(entry);
558 	}
559 
560 	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
561 
562 	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
563 	ddi_dma_mem_free(&vq->vq_dma_acch);
564 	ddi_dma_free_handle(&vq->vq_dma_handle);
565 
566 	mutex_destroy(&vq->vq_used_lock);
567 	mutex_destroy(&vq->vq_avail_lock);
568 	mutex_destroy(&vq->vq_freelist_lock);
569 
570 	kmem_free(vq, sizeof (struct virtqueue));
571 }
572 
573 /*
574  * Free descriptor management.
575  */
576 struct vq_entry *
577 vq_alloc_entry(struct virtqueue *vq)
578 {
579 	struct vq_entry *qe;
580 
581 	mutex_enter(&vq->vq_freelist_lock);
582 	if (list_is_empty(&vq->vq_freelist)) {
583 		mutex_exit(&vq->vq_freelist_lock);
584 		return (NULL);
585 	}
586 	qe = list_remove_head(&vq->vq_freelist);
587 
588 	ASSERT(vq->vq_used_entries >= 0);
589 	vq->vq_used_entries++;
590 
591 	mutex_exit(&vq->vq_freelist_lock);
592 
593 	qe->qe_next = NULL;
594 	qe->qe_indirect_next = 0;
595 	(void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
596 
597 	return (qe);
598 }
599 
600 void
601 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
602 {
603 	mutex_enter(&vq->vq_freelist_lock);
604 
605 	list_insert_head(&vq->vq_freelist, qe);
606 	vq->vq_used_entries--;
607 	ASSERT(vq->vq_used_entries >= 0);
608 	mutex_exit(&vq->vq_freelist_lock);
609 }
610 
611 /*
612  * We (intentionally) don't have a global vq mutex, so you are
613  * responsible for external locking to avoid allocting/freeing any
614  * entries before using the returned value. Have fun.
615  */
616 uint_t
617 vq_num_used(struct virtqueue *vq)
618 {
619 	/* vq->vq_freelist_lock would not help here. */
620 	return (vq->vq_used_entries);
621 }
622 
623 static inline void
624 virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
625     boolean_t write)
626 {
627 	desc->addr = paddr;
628 	desc->len = len;
629 	desc->next = 0;
630 	desc->flags = 0;
631 
632 	/* 'write' - from the driver's point of view */
633 	if (!write)
634 		desc->flags = VRING_DESC_F_WRITE;
635 }
636 
637 void
638 virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
639     boolean_t write)
640 {
641 	virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
642 }
643 
644 unsigned int
645 virtio_ve_indirect_available(struct vq_entry *qe)
646 {
647 	return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1));
648 }
649 
650 void
651 virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
652     boolean_t write)
653 {
654 	struct vring_desc *indirect_desc;
655 
656 	ASSERT(qe->qe_queue->vq_indirect_num);
657 	ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
658 
659 	indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
660 	virtio_ve_set_desc(indirect_desc, paddr, len, write);
661 	qe->qe_indirect_next++;
662 }
663 
664 void
665 virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
666     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
667 {
668 	int i;
669 
670 	for (i = 0; i < ncookies; i++) {
671 		virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
672 		    dma_cookie.dmac_size, write);
673 		ddi_dma_nextcookie(dma_handle, &dma_cookie);
674 	}
675 }
676 
677 void
678 virtio_sync_vq(struct virtqueue *vq)
679 {
680 	struct virtio_softc *vsc = vq->vq_owner;
681 
682 	/* Make sure the avail ring update hit the buffer */
683 	membar_producer();
684 
685 	vq->vq_avail->idx = vq->vq_avail_idx;
686 
687 	/* Make sure the avail idx update hits the buffer */
688 	membar_producer();
689 
690 	/* Make sure we see the flags update */
691 	membar_consumer();
692 
693 	if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
694 		ddi_put16(vsc->sc_ioh,
695 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
696 		    (uint16_t *)(vsc->sc_io_addr +
697 		    VIRTIO_CONFIG_QUEUE_NOTIFY),
698 		    vq->vq_index);
699 	}
700 }
701 
702 void
703 virtio_push_chain(struct vq_entry *qe, boolean_t sync)
704 {
705 	struct virtqueue *vq = qe->qe_queue;
706 	struct vq_entry *head = qe;
707 	struct vring_desc *desc;
708 	int idx;
709 
710 	ASSERT(qe);
711 
712 	/*
713 	 * Bind the descs together, paddr and len should be already
714 	 * set with virtio_ve_set
715 	 */
716 	do {
717 		/* Bind the indirect descriptors */
718 		if (qe->qe_indirect_next > 1) {
719 			uint16_t i = 0;
720 
721 			/*
722 			 * Set the pointer/flags to the
723 			 * first indirect descriptor
724 			 */
725 			virtio_ve_set_desc(qe->qe_desc,
726 			    qe->qe_indirect_dma_cookie.dmac_laddress,
727 			    sizeof (struct vring_desc) * qe->qe_indirect_next,
728 			    B_FALSE);
729 			qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
730 
731 			/* For all but the last one, add the next index/flag */
732 			do {
733 				desc = &qe->qe_indirect_descs[i];
734 				i++;
735 
736 				desc->flags |= VRING_DESC_F_NEXT;
737 				desc->next = i;
738 			} while (i < qe->qe_indirect_next - 1);
739 
740 		}
741 
742 		if (qe->qe_next) {
743 			qe->qe_desc->flags |= VRING_DESC_F_NEXT;
744 			qe->qe_desc->next = qe->qe_next->qe_index;
745 		}
746 
747 		qe = qe->qe_next;
748 	} while (qe);
749 
750 	mutex_enter(&vq->vq_avail_lock);
751 	idx = vq->vq_avail_idx;
752 	vq->vq_avail_idx++;
753 
754 	/* Make sure the bits hit the descriptor(s) */
755 	membar_producer();
756 	vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
757 
758 	/* Notify the device, if needed. */
759 	if (sync)
760 		virtio_sync_vq(vq);
761 
762 	mutex_exit(&vq->vq_avail_lock);
763 }
764 
765 /*
766  * Get a chain of descriptors from the used ring, if one is available.
767  */
768 struct vq_entry *
769 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
770 {
771 	struct vq_entry *head;
772 	int slot;
773 	int usedidx;
774 
775 	mutex_enter(&vq->vq_used_lock);
776 
777 	/* No used entries? Bye. */
778 	if (vq->vq_used_idx == vq->vq_used->idx) {
779 		mutex_exit(&vq->vq_used_lock);
780 		return (NULL);
781 	}
782 
783 	usedidx = vq->vq_used_idx;
784 	vq->vq_used_idx++;
785 	mutex_exit(&vq->vq_used_lock);
786 
787 	usedidx %= vq->vq_num;
788 
789 	/* Make sure we do the next step _after_ checking the idx. */
790 	membar_consumer();
791 
792 	slot = vq->vq_used->ring[usedidx].id;
793 	*len = vq->vq_used->ring[usedidx].len;
794 
795 	head = &vq->vq_entries[slot];
796 
797 	return (head);
798 }
799 
800 void
801 virtio_free_chain(struct vq_entry *qe)
802 {
803 	struct vq_entry *tmp;
804 	struct virtqueue *vq = qe->qe_queue;
805 
806 	ASSERT(qe);
807 
808 	do {
809 		ASSERT(qe->qe_queue == vq);
810 		tmp = qe->qe_next;
811 		vq_free_entry(vq, qe);
812 		qe = tmp;
813 	} while (tmp != NULL);
814 }
815 
816 void
817 virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
818 {
819 	first->qe_next = second;
820 }
821 
822 static int
823 virtio_register_msi(struct virtio_softc *sc,
824     struct virtio_int_handler *config_handler,
825     struct virtio_int_handler vq_handlers[], int intr_types)
826 {
827 	int count, actual;
828 	int int_type;
829 	int i;
830 	int handler_count;
831 	int ret;
832 
833 	/* If both MSI and MSI-x are reported, prefer MSI-x. */
834 	int_type = DDI_INTR_TYPE_MSI;
835 	if (intr_types & DDI_INTR_TYPE_MSIX)
836 		int_type = DDI_INTR_TYPE_MSIX;
837 
838 	/* Walk the handler table to get the number of handlers. */
839 	for (handler_count = 0;
840 	    vq_handlers && vq_handlers[handler_count].vh_func;
841 	    handler_count++)
842 		;
843 
844 	/* +1 if there is a config change handler. */
845 	if (config_handler != NULL)
846 		handler_count++;
847 
848 	/* Number of MSIs supported by the device. */
849 	ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
850 	if (ret != DDI_SUCCESS) {
851 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
852 		return (ret);
853 	}
854 
855 	/*
856 	 * Those who try to register more handlers then the device
857 	 * supports shall suffer.
858 	 */
859 	ASSERT(handler_count <= count);
860 
861 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
862 	    handler_count, KM_SLEEP);
863 
864 	ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
865 	    handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
866 	if (ret != DDI_SUCCESS) {
867 		dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
868 		goto out_msi_alloc;
869 	}
870 
871 	if (actual != handler_count) {
872 		dev_err(sc->sc_dev, CE_WARN,
873 		    "Not enough MSI available: need %d, available %d",
874 		    handler_count, actual);
875 		goto out_msi_available;
876 	}
877 
878 	sc->sc_intr_num = handler_count;
879 	sc->sc_intr_config = B_FALSE;
880 	if (config_handler != NULL) {
881 		sc->sc_intr_config = B_TRUE;
882 	}
883 
884 	/* Assume they are all same priority */
885 	ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
886 	if (ret != DDI_SUCCESS) {
887 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
888 		goto out_msi_prio;
889 	}
890 
891 	/* Add the vq handlers */
892 	for (i = 0; vq_handlers[i].vh_func; i++) {
893 		ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
894 		    vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
895 		if (ret != DDI_SUCCESS) {
896 			dev_err(sc->sc_dev, CE_WARN,
897 			    "ddi_intr_add_handler failed");
898 			/* Remove the handlers that succeeded. */
899 			while (--i >= 0) {
900 				(void) ddi_intr_remove_handler(
901 				    sc->sc_intr_htable[i]);
902 			}
903 			goto out_add_handlers;
904 		}
905 	}
906 
907 	/* Don't forget the config handler */
908 	if (config_handler != NULL) {
909 		ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
910 		    config_handler->vh_func, sc, config_handler->vh_priv);
911 		if (ret != DDI_SUCCESS) {
912 			dev_err(sc->sc_dev, CE_WARN,
913 			    "ddi_intr_add_handler failed");
914 			/* Remove the handlers that succeeded. */
915 			while (--i >= 0) {
916 				(void) ddi_intr_remove_handler(
917 				    sc->sc_intr_htable[i]);
918 			}
919 			goto out_add_handlers;
920 		}
921 	}
922 
923 	ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
924 	if (ret == DDI_SUCCESS) {
925 		sc->sc_int_type = int_type;
926 		return (DDI_SUCCESS);
927 	}
928 
929 out_add_handlers:
930 out_msi_prio:
931 out_msi_available:
932 	for (i = 0; i < actual; i++)
933 		(void) ddi_intr_free(sc->sc_intr_htable[i]);
934 out_msi_alloc:
935 	kmem_free(sc->sc_intr_htable,
936 	    sizeof (ddi_intr_handle_t) * handler_count);
937 
938 	return (ret);
939 }
940 
941 struct virtio_handler_container {
942 	int nhandlers;
943 	struct virtio_int_handler config_handler;
944 	struct virtio_int_handler vq_handlers[];
945 };
946 
947 uint_t
948 virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
949 {
950 	struct virtio_softc *sc = (void *)arg1;
951 	struct virtio_handler_container *vhc = (void *)arg2;
952 	uint8_t isr_status;
953 	int i;
954 
955 	isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
956 	    VIRTIO_CONFIG_ISR_STATUS));
957 
958 	if (!isr_status)
959 		return (DDI_INTR_UNCLAIMED);
960 
961 	if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
962 	    vhc->config_handler.vh_func) {
963 		vhc->config_handler.vh_func((void *)sc,
964 		    vhc->config_handler.vh_priv);
965 	}
966 
967 	/* Notify all handlers */
968 	for (i = 0; i < vhc->nhandlers; i++) {
969 		vhc->vq_handlers[i].vh_func((void *)sc,
970 		    vhc->vq_handlers[i].vh_priv);
971 	}
972 
973 	return (DDI_INTR_CLAIMED);
974 }
975 
976 /*
977  * config_handler and vq_handlers may be allocated on stack.
978  * Take precautions not to loose them.
979  */
980 static int
981 virtio_register_intx(struct virtio_softc *sc,
982     struct virtio_int_handler *config_handler,
983     struct virtio_int_handler vq_handlers[])
984 {
985 	int vq_handler_count;
986 	int config_handler_count = 0;
987 	int actual;
988 	struct virtio_handler_container *vhc;
989 	int ret = DDI_FAILURE;
990 
991 	/* Walk the handler table to get the number of handlers. */
992 	for (vq_handler_count = 0;
993 	    vq_handlers && vq_handlers[vq_handler_count].vh_func;
994 	    vq_handler_count++)
995 		;
996 
997 	if (config_handler != NULL)
998 		config_handler_count = 1;
999 
1000 	vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1001 	    sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);
1002 
1003 	vhc->nhandlers = vq_handler_count;
1004 	(void) memcpy(vhc->vq_handlers, vq_handlers,
1005 	    sizeof (struct virtio_int_handler) * vq_handler_count);
1006 
1007 	if (config_handler != NULL) {
1008 		(void) memcpy(&vhc->config_handler, config_handler,
1009 		    sizeof (struct virtio_int_handler));
1010 	}
1011 
1012 	/* Just a single entry for a single interrupt. */
1013 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1014 
1015 	ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1016 	    DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1017 	if (ret != DDI_SUCCESS) {
1018 		dev_err(sc->sc_dev, CE_WARN,
1019 		    "Failed to allocate a fixed interrupt: %d", ret);
1020 		goto out_int_alloc;
1021 	}
1022 
1023 	ASSERT(actual == 1);
1024 	sc->sc_intr_num = 1;
1025 
1026 	ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1027 	if (ret != DDI_SUCCESS) {
1028 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1029 		goto out_prio;
1030 	}
1031 
1032 	ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1033 	    virtio_intx_dispatch, sc, vhc);
1034 	if (ret != DDI_SUCCESS) {
1035 		dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1036 		goto out_add_handlers;
1037 	}
1038 
1039 	sc->sc_int_type = DDI_INTR_TYPE_FIXED;
1040 
1041 	return (DDI_SUCCESS);
1042 
1043 out_add_handlers:
1044 out_prio:
1045 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
1046 out_int_alloc:
1047 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1048 	kmem_free(vhc, sizeof (struct virtio_int_handler) *
1049 	    (vq_handler_count + config_handler_count));
1050 	return (ret);
1051 }
1052 
1053 /*
1054  * We find out if we support MSI during this, and the register layout
1055  * depends on the MSI (doh). Don't acces the device specific bits in
1056  * BAR 0 before calling it!
1057  */
1058 int
1059 virtio_register_ints(struct virtio_softc *sc,
1060     struct virtio_int_handler *config_handler,
1061     struct virtio_int_handler vq_handlers[])
1062 {
1063 	int ret;
1064 	int intr_types;
1065 
1066 	/* Default offset until MSI-X is enabled, if ever. */
1067 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1068 
1069 	/* Determine which types of interrupts are supported */
1070 	ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1071 	if (ret != DDI_SUCCESS) {
1072 		dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1073 		goto out_inttype;
1074 	}
1075 
1076 	/* If we have msi, let's use them. */
1077 	if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1078 		ret = virtio_register_msi(sc, config_handler,
1079 		    vq_handlers, intr_types);
1080 		if (!ret)
1081 			return (0);
1082 	}
1083 
1084 	/* Fall back to old-fashioned interrupts. */
1085 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1086 		dev_debug(sc->sc_dev, CE_WARN,
1087 		    "Using legacy interrupts");
1088 
1089 		return (virtio_register_intx(sc, config_handler, vq_handlers));
1090 	}
1091 
1092 	dev_err(sc->sc_dev, CE_WARN,
1093 	    "MSI failed and fixed interrupts not supported. Giving up.");
1094 	ret = DDI_FAILURE;
1095 
1096 out_inttype:
1097 	return (ret);
1098 }
1099 
1100 static int
1101 virtio_enable_msi(struct virtio_softc *sc)
1102 {
1103 	int ret, i;
1104 	int vq_handler_count = sc->sc_intr_num;
1105 
1106 	/* Number of handlers, not counting the counfig. */
1107 	if (sc->sc_intr_config)
1108 		vq_handler_count--;
1109 
1110 	/* Enable the interrupts. Either the whole block, or one by one. */
1111 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1112 		ret = ddi_intr_block_enable(sc->sc_intr_htable,
1113 		    sc->sc_intr_num);
1114 		if (ret != DDI_SUCCESS) {
1115 			dev_err(sc->sc_dev, CE_WARN,
1116 			    "Failed to enable MSI, falling back to INTx");
1117 			goto out_enable;
1118 		}
1119 	} else {
1120 		for (i = 0; i < sc->sc_intr_num; i++) {
1121 			ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1122 			if (ret != DDI_SUCCESS) {
1123 				dev_err(sc->sc_dev, CE_WARN,
1124 				    "Failed to enable MSI %d, "
1125 				    "falling back to INTx", i);
1126 
1127 				while (--i >= 0) {
1128 					(void) ddi_intr_disable(
1129 					    sc->sc_intr_htable[i]);
1130 				}
1131 				goto out_enable;
1132 			}
1133 		}
1134 	}
1135 
1136 	/* Bind the allocated MSI to the queues and config */
1137 	for (i = 0; i < vq_handler_count; i++) {
1138 		int check;
1139 
1140 		ddi_put16(sc->sc_ioh,
1141 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1142 		    (uint16_t *)(sc->sc_io_addr +
1143 		    VIRTIO_CONFIG_QUEUE_SELECT), i);
1144 
1145 		ddi_put16(sc->sc_ioh,
1146 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1147 		    (uint16_t *)(sc->sc_io_addr +
1148 		    VIRTIO_CONFIG_QUEUE_VECTOR), i);
1149 
1150 		check = ddi_get16(sc->sc_ioh,
1151 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1152 		    (uint16_t *)(sc->sc_io_addr +
1153 		    VIRTIO_CONFIG_QUEUE_VECTOR));
1154 		if (check != i) {
1155 			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1156 			    "for VQ %d, MSI %d. Check = %x", i, i, check);
1157 			ret = ENODEV;
1158 			goto out_bind;
1159 		}
1160 	}
1161 
1162 	if (sc->sc_intr_config) {
1163 		int check;
1164 
1165 		ddi_put16(sc->sc_ioh,
1166 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1167 		    (uint16_t *)(sc->sc_io_addr +
1168 		    VIRTIO_CONFIG_CONFIG_VECTOR), i);
1169 
1170 		check = ddi_get16(sc->sc_ioh,
1171 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1172 		    (uint16_t *)(sc->sc_io_addr +
1173 		    VIRTIO_CONFIG_CONFIG_VECTOR));
1174 		if (check != i) {
1175 			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1176 			    "for Config updates, MSI %d", i);
1177 			ret = ENODEV;
1178 			goto out_bind;
1179 		}
1180 	}
1181 
1182 	/* Configuration offset depends on whether MSI-X is used. */
1183 	if (sc->sc_int_type == DDI_INTR_TYPE_MSIX)
1184 		sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSIX;
1185 	else
1186 		ASSERT(sc->sc_int_type == DDI_INTR_TYPE_MSI);
1187 
1188 	return (DDI_SUCCESS);
1189 
1190 out_bind:
1191 	/* Unbind the vqs */
1192 	for (i = 0; i < vq_handler_count - 1; i++) {
1193 		ddi_put16(sc->sc_ioh,
1194 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1195 		    (uint16_t *)(sc->sc_io_addr +
1196 		    VIRTIO_CONFIG_QUEUE_SELECT), i);
1197 
1198 		ddi_put16(sc->sc_ioh,
1199 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
1200 		    (uint16_t *)(sc->sc_io_addr +
1201 		    VIRTIO_CONFIG_QUEUE_VECTOR),
1202 		    VIRTIO_MSI_NO_VECTOR);
1203 	}
1204 	/* And the config */
1205 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1206 	ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1207 	    VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1208 
1209 	/* Disable the interrupts. Either the whole block, or one by one. */
1210 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1211 		ret = ddi_intr_block_disable(sc->sc_intr_htable,
1212 		    sc->sc_intr_num);
1213 		if (ret != DDI_SUCCESS) {
1214 			dev_err(sc->sc_dev, CE_WARN,
1215 			    "Failed to disable MSIs, won't be able to "
1216 			    "reuse next time");
1217 		}
1218 	} else {
1219 		for (i = 0; i < sc->sc_intr_num; i++) {
1220 			ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1221 			if (ret != DDI_SUCCESS) {
1222 				dev_err(sc->sc_dev, CE_WARN,
1223 				    "Failed to disable interrupt %d, "
1224 				    "won't be able to reuse", i);
1225 			}
1226 		}
1227 	}
1228 
1229 	ret = DDI_FAILURE;
1230 
1231 out_enable:
1232 	return (ret);
1233 }
1234 
1235 static int
1236 virtio_enable_intx(struct virtio_softc *sc)
1237 {
1238 	int ret;
1239 
1240 	ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1241 	if (ret != DDI_SUCCESS) {
1242 		dev_err(sc->sc_dev, CE_WARN,
1243 		    "Failed to enable interrupt: %d", ret);
1244 	}
1245 
1246 	return (ret);
1247 }
1248 
1249 /*
1250  * We can't enable/disable individual handlers in the INTx case so do
1251  * the whole bunch even in the msi case.
1252  */
1253 int
1254 virtio_enable_ints(struct virtio_softc *sc)
1255 {
1256 
1257 	ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX);
1258 
1259 	/* See if we are using MSI. */
1260 	if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1261 	    sc->sc_int_type == DDI_INTR_TYPE_MSI)
1262 		return (virtio_enable_msi(sc));
1263 
1264 	ASSERT(sc->sc_int_type == DDI_INTR_TYPE_FIXED);
1265 	return (virtio_enable_intx(sc));
1266 }
1267 
1268 void
1269 virtio_release_ints(struct virtio_softc *sc)
1270 {
1271 	int i;
1272 	int ret;
1273 
1274 	/* We were running with MSI, unbind them. */
1275 	if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1276 	    sc->sc_int_type == DDI_INTR_TYPE_MSI) {
1277 		/* Unbind all vqs */
1278 		for (i = 0; i < sc->sc_nvqs; i++) {
1279 			ddi_put16(sc->sc_ioh,
1280 			    /* LINTED E_BAD_PTR_CAST_ALIGN */
1281 			    (uint16_t *)(sc->sc_io_addr +
1282 			    VIRTIO_CONFIG_QUEUE_SELECT), i);
1283 
1284 			ddi_put16(sc->sc_ioh,
1285 			    /* LINTED E_BAD_PTR_CAST_ALIGN */
1286 			    (uint16_t *)(sc->sc_io_addr +
1287 			    VIRTIO_CONFIG_QUEUE_VECTOR),
1288 			    VIRTIO_MSI_NO_VECTOR);
1289 		}
1290 		/* And the config */
1291 		/* LINTED E_BAD_PTR_CAST_ALIGN */
1292 		ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1293 		    VIRTIO_CONFIG_CONFIG_VECTOR),
1294 		    VIRTIO_MSI_NO_VECTOR);
1295 
1296 	}
1297 
1298 	/* Disable the interrupts. Either the whole block, or one by one. */
1299 	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1300 		ret = ddi_intr_block_disable(sc->sc_intr_htable,
1301 		    sc->sc_intr_num);
1302 		if (ret != DDI_SUCCESS) {
1303 			dev_err(sc->sc_dev, CE_WARN,
1304 			    "Failed to disable MSIs, won't be able to "
1305 			    "reuse next time");
1306 		}
1307 	} else {
1308 		for (i = 0; i < sc->sc_intr_num; i++) {
1309 			ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1310 			if (ret != DDI_SUCCESS) {
1311 				dev_err(sc->sc_dev, CE_WARN,
1312 				    "Failed to disable interrupt %d, "
1313 				    "won't be able to reuse", i);
1314 			}
1315 		}
1316 	}
1317 
1318 
1319 	for (i = 0; i < sc->sc_intr_num; i++) {
1320 		(void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1321 	}
1322 
1323 	for (i = 0; i < sc->sc_intr_num; i++)
1324 		(void) ddi_intr_free(sc->sc_intr_htable[i]);
1325 
1326 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
1327 	    sc->sc_intr_num);
1328 
1329 	/* After disabling interrupts, the config offset is non-MSI-X. */
1330 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1331 }
1332 
1333 /*
1334  * Module linkage information for the kernel.
1335  */
1336 static struct modlmisc modlmisc = {
1337 	&mod_miscops,	/* Type of module */
1338 	"VirtIO common library module",
1339 };
1340 
1341 static struct modlinkage modlinkage = {
1342 	MODREV_1,
1343 	{
1344 		(void *)&modlmisc,
1345 		NULL
1346 	}
1347 };
1348 
1349 int
1350 _init(void)
1351 {
1352 	return (mod_install(&modlinkage));
1353 }
1354 
1355 int
1356 _fini(void)
1357 {
1358 	return (mod_remove(&modlinkage));
1359 }
1360 
1361 int
1362 _info(struct modinfo *modinfop)
1363 {
1364 	return (mod_info(&modlinkage, modinfop));
1365 }
1366