Lines Matching defs:vq

239  * Start/stop vq interrupt.  No guarantee.
242 virtio_stop_vq_intr(struct virtqueue *vq)
244 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
248 virtio_start_vq_intr(struct virtqueue *vq)
250 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
322 "entry %d, vq %d", entry->qe_index,
334 "entry %d, vq %d,", entry->qe_index,
348 "entry %d, vq %d", entry->qe_index,
368 * Initialize the vq structure.
371 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
375 int vq_size = vq->vq_num;
376 int indirect_num = vq->vq_indirect_num;
379 list_create(&vq->vq_freelist, sizeof (struct vq_entry),
383 struct vq_entry *entry = &vq->vq_entries[i];
384 list_insert_tail(&vq->vq_freelist, entry);
386 entry->qe_desc = &vq->vq_descs[i];
387 entry->qe_queue = vq;
396 mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
398 mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
400 mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
407 struct vq_entry *entry = &vq->vq_entries[i];
416 * Allocate/free a vq.
426 struct virtqueue *vq;
440 vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
442 /* size 0 => use native vq size, good for receive queues. */
456 DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
459 "Failed to allocate dma handle for vq %d", index);
463 ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
465 (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
468 "Failed to allocate dma memory for vq %d", index);
472 ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
473 (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
474 DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
477 "Failed to bind dma memory for vq %d", index);
484 ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
486 (void) memset(vq->vq_vaddr, 0, allocsize);
491 /* set the vq address */
495 (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
498 vq->vq_owner = sc;
499 vq->vq_num = vq_size;
500 vq->vq_index = index;
501 vq->vq_descs = vq->vq_vaddr;
502 vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
503 vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
504 vq->vq_usedoffset = allocsize1;
505 vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
509 vq->vq_indirect_num = indirect_num;
512 vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
515 ret = virtio_init_vq(sc, vq);
520 "Allocated %d entries for vq %d:%s (%d indirect descs)",
523 return (vq);
526 kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
527 (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
529 ddi_dma_mem_free(&vq->vq_dma_acch);
531 ddi_dma_free_handle(&vq->vq_dma_handle);
533 kmem_free(vq, sizeof (struct virtqueue));
539 virtio_free_vq(struct virtqueue *vq)
541 struct virtio_softc *sc = vq->vq_owner;
548 vq->vq_index);
554 for (i = 0; i < vq->vq_num; i++) {
555 struct vq_entry *entry = &vq->vq_entries[i];
560 kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
562 (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
563 ddi_dma_mem_free(&vq->vq_dma_acch);
564 ddi_dma_free_handle(&vq->vq_dma_handle);
566 mutex_destroy(&vq->vq_used_lock);
567 mutex_destroy(&vq->vq_avail_lock);
568 mutex_destroy(&vq->vq_freelist_lock);
570 kmem_free(vq, sizeof (struct virtqueue));
577 vq_alloc_entry(struct virtqueue *vq)
581 mutex_enter(&vq->vq_freelist_lock);
582 if (list_is_empty(&vq->vq_freelist)) {
583 mutex_exit(&vq->vq_freelist_lock);
586 qe = list_remove_head(&vq->vq_freelist);
588 ASSERT(vq->vq_used_entries >= 0);
589 vq->vq_used_entries++;
591 mutex_exit(&vq->vq_freelist_lock);
601 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
603 mutex_enter(&vq->vq_freelist_lock);
605 list_insert_head(&vq->vq_freelist, qe);
606 vq->vq_used_entries--;
607 ASSERT(vq->vq_used_entries >= 0);
608 mutex_exit(&vq->vq_freelist_lock);
612 * We (intentionally) don't have a global vq mutex, so you are
617 vq_num_used(struct virtqueue *vq)
619 /* vq->vq_freelist_lock would not help here. */
620 return (vq->vq_used_entries);
678 virtio_sync_vq(struct virtqueue *vq)
680 struct virtio_softc *vsc = vq->vq_owner;
685 vq->vq_avail->idx = vq->vq_avail_idx;
693 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
698 vq->vq_index);
705 struct virtqueue *vq = qe->qe_queue;
750 mutex_enter(&vq->vq_avail_lock);
751 idx = vq->vq_avail_idx;
752 vq->vq_avail_idx++;
756 vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
760 virtio_sync_vq(vq);
762 mutex_exit(&vq->vq_avail_lock);
769 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
775 mutex_enter(&vq->vq_used_lock);
778 if (vq->vq_used_idx == vq->vq_used->idx) {
779 mutex_exit(&vq->vq_used_lock);
783 usedidx = vq->vq_used_idx;
784 vq->vq_used_idx++;
785 mutex_exit(&vq->vq_used_lock);
787 usedidx %= vq->vq_num;
792 slot = vq->vq_used->ring[usedidx].id;
793 *len = vq->vq_used->ring[usedidx].len;
795 head = &vq->vq_entries[slot];
804 struct virtqueue *vq = qe->qe_queue;
809 ASSERT(qe->qe_queue == vq);
811 vq_free_entry(vq, qe);
891 /* Add the vq handlers */