Lines Matching refs:vq
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
31 (_vq)->vq.name, (_vq)->in_use); \
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
63 #define START_USE(vq) argument
64 #define END_USE(vq) argument
65 #define LAST_ADD_TIME_UPDATE(vq) argument
66 #define LAST_ADD_TIME_CHECK(vq) argument
67 #define LAST_ADD_TIME_INVALID(vq) argument
163 struct virtqueue vq; member
208 bool (*notify)(struct virtqueue *vq);
233 #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
235 static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, in virtqueue_use_indirect() argument
242 return (vq->indirect && total_sg > 1 && vq->vq.num_free); in virtqueue_use_indirect()
357 static struct device *vring_dma_dev(const struct vring_virtqueue *vq) in vring_dma_dev() argument
359 return vq->dma_dev; in vring_dma_dev()
363 static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg, in vring_map_one_sg() argument
375 if (!vq->use_dma_api) { in vring_map_one_sg()
391 *addr = dma_map_page(vring_dma_dev(vq), in vring_map_one_sg()
395 if (dma_mapping_error(vring_dma_dev(vq), *addr)) in vring_map_one_sg()
401 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, in vring_map_single() argument
405 if (!vq->use_dma_api) in vring_map_single()
408 return dma_map_single(vring_dma_dev(vq), in vring_map_single()
412 static int vring_mapping_error(const struct vring_virtqueue *vq, in vring_mapping_error() argument
415 if (!vq->use_dma_api) in vring_mapping_error()
418 return dma_mapping_error(vring_dma_dev(vq), addr); in vring_mapping_error()
421 static void virtqueue_init(struct vring_virtqueue *vq, u32 num) in virtqueue_init() argument
423 vq->vq.num_free = num; in virtqueue_init()
425 if (vq->packed_ring) in virtqueue_init()
426 vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); in virtqueue_init()
428 vq->last_used_idx = 0; in virtqueue_init()
430 vq->event_triggered = false; in virtqueue_init()
431 vq->num_added = 0; in virtqueue_init()
434 vq->in_use = false; in virtqueue_init()
435 vq->last_add_time_valid = false; in virtqueue_init()
444 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, in vring_unmap_one_split() argument
452 if (!vq->use_dma_api) in vring_unmap_one_split()
455 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_one_split()
461 if (!vring_need_unmap_buffer(vq, extra)) in vring_unmap_one_split()
464 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split()
504 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, in virtqueue_add_desc_split() argument
514 desc[i].flags = cpu_to_virtio16(vq->vdev, flags); in virtqueue_add_desc_split()
515 desc[i].addr = cpu_to_virtio64(vq->vdev, addr); in virtqueue_add_desc_split()
516 desc[i].len = cpu_to_virtio32(vq->vdev, len); in virtqueue_add_desc_split()
524 desc[i].next = cpu_to_virtio16(vq->vdev, next); in virtqueue_add_desc_split()
539 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_split() local
547 START_USE(vq); in virtqueue_add_split()
550 BUG_ON(ctx && vq->indirect); in virtqueue_add_split()
552 if (unlikely(vq->broken)) { in virtqueue_add_split()
553 END_USE(vq); in virtqueue_add_split()
557 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_split()
561 head = vq->free_head; in virtqueue_add_split()
563 if (virtqueue_use_indirect(vq, total_sg)) in virtqueue_add_split()
567 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
579 desc = vq->split.vring.desc; in virtqueue_add_split()
580 extra = vq->split.desc_extra; in virtqueue_add_split()
585 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_split()
587 descs_used, vq->vq.num_free); in virtqueue_add_split()
592 vq->notify(&vq->vq); in virtqueue_add_split()
595 END_USE(vq); in virtqueue_add_split()
604 if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped)) in virtqueue_add_split()
621 if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped)) in virtqueue_add_split()
636 if (!indirect && vring_need_unmap_buffer(vq, &extra[prev])) in virtqueue_add_split()
637 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split()
643 vq, desc, total_sg * sizeof(struct vring_desc), in virtqueue_add_split()
645 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
648 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
649 vq->split.desc_extra, in virtqueue_add_split()
656 vq->vq.num_free -= descs_used; in virtqueue_add_split()
660 vq->free_head = vq->split.desc_extra[head].next; in virtqueue_add_split()
662 vq->free_head = i; in virtqueue_add_split()
665 vq->split.desc_state[head].data = data; in virtqueue_add_split()
667 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
669 vq->split.desc_state[head].indir_desc = ctx; in virtqueue_add_split()
673 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split()
674 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
678 virtio_wmb(vq->weak_barriers); in virtqueue_add_split()
679 vq->split.avail_idx_shadow++; in virtqueue_add_split()
680 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
681 vq->split.avail_idx_shadow); in virtqueue_add_split()
682 vq->num_added++; in virtqueue_add_split()
684 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_split()
685 END_USE(vq); in virtqueue_add_split()
689 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add_split()
706 i = vring_unmap_one_split(vq, &extra[i]); in virtqueue_add_split()
712 END_USE(vq); in virtqueue_add_split()
718 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_split() local
722 START_USE(vq); in virtqueue_kick_prepare_split()
725 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_split()
727 old = vq->split.avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare_split()
728 new = vq->split.avail_idx_shadow; in virtqueue_kick_prepare_split()
729 vq->num_added = 0; in virtqueue_kick_prepare_split()
731 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_split()
732 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_split()
734 if (vq->event) { in virtqueue_kick_prepare_split()
736 vring_avail_event(&vq->split.vring)), in virtqueue_kick_prepare_split()
739 needs_kick = !(vq->split.vring.used->flags & in virtqueue_kick_prepare_split()
743 END_USE(vq); in virtqueue_kick_prepare_split()
747 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, in detach_buf_split() argument
752 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); in detach_buf_split()
755 vq->split.desc_state[head].data = NULL; in detach_buf_split()
757 extra = vq->split.desc_extra; in detach_buf_split()
762 while (vq->split.vring.desc[i].flags & nextflag) { in detach_buf_split()
763 vring_unmap_one_split(vq, &extra[i]); in detach_buf_split()
764 i = vq->split.desc_extra[i].next; in detach_buf_split()
765 vq->vq.num_free++; in detach_buf_split()
768 vring_unmap_one_split(vq, &extra[i]); in detach_buf_split()
769 vq->split.desc_extra[i].next = vq->free_head; in detach_buf_split()
770 vq->free_head = head; in detach_buf_split()
773 vq->vq.num_free++; in detach_buf_split()
775 if (vq->indirect) { in detach_buf_split()
777 vq->split.desc_state[head].indir_desc; in detach_buf_split()
783 len = vq->split.desc_extra[head].len; in detach_buf_split()
785 BUG_ON(!(vq->split.desc_extra[head].flags & in detach_buf_split()
793 if (vq->use_dma_api) { in detach_buf_split()
795 vring_unmap_one_split(vq, &extra[j]); in detach_buf_split()
799 vq->split.desc_state[head].indir_desc = NULL; in detach_buf_split()
801 *ctx = vq->split.desc_state[head].indir_desc; in detach_buf_split()
805 static bool more_used_split(const struct vring_virtqueue *vq) in more_used_split() argument
807 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, in more_used_split()
808 vq->split.vring.used->idx); in more_used_split()
815 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_split() local
820 START_USE(vq); in virtqueue_get_buf_ctx_split()
822 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_split()
823 END_USE(vq); in virtqueue_get_buf_ctx_split()
827 if (!more_used_split(vq)) { in virtqueue_get_buf_ctx_split()
829 END_USE(vq); in virtqueue_get_buf_ctx_split()
834 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_split()
836 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); in virtqueue_get_buf_ctx_split()
838 vq->split.vring.used->ring[last_used].id); in virtqueue_get_buf_ctx_split()
840 vq->split.vring.used->ring[last_used].len); in virtqueue_get_buf_ctx_split()
842 if (unlikely(i >= vq->split.vring.num)) { in virtqueue_get_buf_ctx_split()
843 BAD_RING(vq, "id %u out of range\n", i); in virtqueue_get_buf_ctx_split()
846 if (unlikely(!vq->split.desc_state[i].data)) { in virtqueue_get_buf_ctx_split()
847 BAD_RING(vq, "id %u is not a head!\n", i); in virtqueue_get_buf_ctx_split()
852 ret = vq->split.desc_state[i].data; in virtqueue_get_buf_ctx_split()
853 detach_buf_split(vq, i, ctx); in virtqueue_get_buf_ctx_split()
854 vq->last_used_idx++; in virtqueue_get_buf_ctx_split()
858 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) in virtqueue_get_buf_ctx_split()
859 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_split()
860 &vring_used_event(&vq->split.vring), in virtqueue_get_buf_ctx_split()
861 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
863 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_split()
865 END_USE(vq); in virtqueue_get_buf_ctx_split()
871 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_split() local
873 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb_split()
874 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb_split()
880 if (vq->event_triggered) in virtqueue_disable_cb_split()
883 if (vq->event) in virtqueue_disable_cb_split()
885 vring_used_event(&vq->split.vring) = 0x0; in virtqueue_disable_cb_split()
887 vq->split.vring.avail->flags = in virtqueue_disable_cb_split()
889 vq->split.avail_flags_shadow); in virtqueue_disable_cb_split()
895 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_split() local
898 START_USE(vq); in virtqueue_enable_cb_prepare_split()
905 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare_split()
906 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare_split()
907 if (!vq->event) in virtqueue_enable_cb_prepare_split()
908 vq->split.vring.avail->flags = in virtqueue_enable_cb_prepare_split()
910 vq->split.avail_flags_shadow); in virtqueue_enable_cb_prepare_split()
912 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
913 last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare_split()
914 END_USE(vq); in virtqueue_enable_cb_prepare_split()
920 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_split() local
923 vq->split.vring.used->idx); in virtqueue_poll_split()
928 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_split() local
931 START_USE(vq); in virtqueue_enable_cb_delayed_split()
938 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed_split()
939 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed_split()
940 if (!vq->event) in virtqueue_enable_cb_delayed_split()
941 vq->split.vring.avail->flags = in virtqueue_enable_cb_delayed_split()
943 vq->split.avail_flags_shadow); in virtqueue_enable_cb_delayed_split()
946 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed_split()
948 virtio_store_mb(vq->weak_barriers, in virtqueue_enable_cb_delayed_split()
949 &vring_used_event(&vq->split.vring), in virtqueue_enable_cb_delayed_split()
950 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
952 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
953 - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed_split()
954 END_USE(vq); in virtqueue_enable_cb_delayed_split()
958 END_USE(vq); in virtqueue_enable_cb_delayed_split()
964 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_split() local
968 START_USE(vq); in virtqueue_detach_unused_buf_split()
970 for (i = 0; i < vq->split.vring.num; i++) { in virtqueue_detach_unused_buf_split()
971 if (!vq->split.desc_state[i].data) in virtqueue_detach_unused_buf_split()
974 buf = vq->split.desc_state[i].data; in virtqueue_detach_unused_buf_split()
975 detach_buf_split(vq, i, NULL); in virtqueue_detach_unused_buf_split()
976 vq->split.avail_idx_shadow--; in virtqueue_detach_unused_buf_split()
977 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
978 vq->split.avail_idx_shadow); in virtqueue_detach_unused_buf_split()
979 END_USE(vq); in virtqueue_detach_unused_buf_split()
983 BUG_ON(vq->vq.num_free != vq->split.vring.num); in virtqueue_detach_unused_buf_split()
985 END_USE(vq); in virtqueue_detach_unused_buf_split()
990 struct vring_virtqueue *vq) in virtqueue_vring_init_split() argument
994 vdev = vq->vq.vdev; in virtqueue_vring_init_split()
1000 if (!vq->vq.callback) { in virtqueue_vring_init_split()
1002 if (!vq->event) in virtqueue_vring_init_split()
1008 static void virtqueue_reinit_split(struct vring_virtqueue *vq) in virtqueue_reinit_split() argument
1012 num = vq->split.vring.num; in virtqueue_reinit_split()
1014 vq->split.vring.avail->flags = 0; in virtqueue_reinit_split()
1015 vq->split.vring.avail->idx = 0; in virtqueue_reinit_split()
1018 vq->split.vring.avail->ring[num] = 0; in virtqueue_reinit_split()
1020 vq->split.vring.used->flags = 0; in virtqueue_reinit_split()
1021 vq->split.vring.used->idx = 0; in virtqueue_reinit_split()
1024 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; in virtqueue_reinit_split()
1026 virtqueue_init(vq, num); in virtqueue_reinit_split()
1028 virtqueue_vring_init_split(&vq->split, vq); in virtqueue_reinit_split()
1031 static void virtqueue_vring_attach_split(struct vring_virtqueue *vq, in virtqueue_vring_attach_split() argument
1034 vq->split = *vring_split; in virtqueue_vring_attach_split()
1037 vq->free_head = 0; in virtqueue_vring_attach_split()
1139 struct vring_virtqueue *vq; in __vring_new_virtqueue_split() local
1142 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in __vring_new_virtqueue_split()
1143 if (!vq) in __vring_new_virtqueue_split()
1146 vq->packed_ring = false; in __vring_new_virtqueue_split()
1147 vq->vq.callback = callback; in __vring_new_virtqueue_split()
1148 vq->vq.vdev = vdev; in __vring_new_virtqueue_split()
1149 vq->vq.name = name; in __vring_new_virtqueue_split()
1150 vq->vq.index = index; in __vring_new_virtqueue_split()
1151 vq->vq.reset = false; in __vring_new_virtqueue_split()
1152 vq->we_own_ring = false; in __vring_new_virtqueue_split()
1153 vq->notify = notify; in __vring_new_virtqueue_split()
1154 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue_split()
1156 vq->broken = true; in __vring_new_virtqueue_split()
1158 vq->broken = false; in __vring_new_virtqueue_split()
1160 vq->dma_dev = dma_dev; in __vring_new_virtqueue_split()
1161 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue_split()
1163 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue_split()
1165 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue_split()
1168 vq->weak_barriers = false; in __vring_new_virtqueue_split()
1172 kfree(vq); in __vring_new_virtqueue_split()
1176 virtqueue_vring_init_split(vring_split, vq); in __vring_new_virtqueue_split()
1178 virtqueue_init(vq, vring_split->vring.num); in __vring_new_virtqueue_split()
1179 virtqueue_vring_attach_split(vq, vring_split); in __vring_new_virtqueue_split()
1182 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue_split()
1184 return &vq->vq; in __vring_new_virtqueue_split()
1201 struct virtqueue *vq; in vring_create_virtqueue_split() local
1209 vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, in vring_create_virtqueue_split()
1211 if (!vq) { in vring_create_virtqueue_split()
1216 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_split()
1218 return vq; in vring_create_virtqueue_split()
1224 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_resize_split() local
1229 vq->split.vring_align, in virtqueue_resize_split()
1230 vq->split.may_reduce_num, in virtqueue_resize_split()
1231 vring_dma_dev(vq)); in virtqueue_resize_split()
1239 vring_free(&vq->vq); in virtqueue_resize_split()
1241 virtqueue_vring_init_split(&vring_split, vq); in virtqueue_resize_split()
1243 virtqueue_init(vq, vring_split.vring.num); in virtqueue_resize_split()
1244 virtqueue_vring_attach_split(vq, &vring_split); in virtqueue_resize_split()
1249 vring_free_split(&vring_split, vdev, vring_dma_dev(vq)); in virtqueue_resize_split()
1251 virtqueue_reinit_split(vq); in virtqueue_resize_split()
1269 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, in vring_unmap_extra_packed() argument
1277 if (!vq->use_dma_api) in vring_unmap_extra_packed()
1280 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_extra_packed()
1285 if (!vring_need_unmap_buffer(vq, extra)) in vring_unmap_extra_packed()
1288 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_extra_packed()
1323 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, in virtqueue_add_indirect_packed() argument
1339 head = vq->packed.next_avail_idx; in virtqueue_add_indirect_packed()
1346 if (unlikely(vq->vq.num_free < 1)) { in virtqueue_add_indirect_packed()
1349 END_USE(vq); in virtqueue_add_indirect_packed()
1354 id = vq->free_head; in virtqueue_add_indirect_packed()
1355 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_indirect_packed()
1359 if (vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_indirect_packed()
1369 if (unlikely(vq->use_dma_api)) { in virtqueue_add_indirect_packed()
1380 addr = vring_map_single(vq, desc, in virtqueue_add_indirect_packed()
1383 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1386 vq->packed.vring.desc[head].addr = cpu_to_le64(addr); in virtqueue_add_indirect_packed()
1387 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * in virtqueue_add_indirect_packed()
1389 vq->packed.vring.desc[head].id = cpu_to_le16(id); in virtqueue_add_indirect_packed()
1391 if (vq->use_dma_api) { in virtqueue_add_indirect_packed()
1392 vq->packed.desc_extra[id].addr = addr; in virtqueue_add_indirect_packed()
1393 vq->packed.desc_extra[id].len = total_sg * in virtqueue_add_indirect_packed()
1395 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1396 vq->packed.avail_used_flags; in virtqueue_add_indirect_packed()
1404 virtio_wmb(vq->weak_barriers); in virtqueue_add_indirect_packed()
1405 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1406 vq->packed.avail_used_flags); in virtqueue_add_indirect_packed()
1409 vq->vq.num_free -= 1; in virtqueue_add_indirect_packed()
1413 if (n >= vq->packed.vring.num) { in virtqueue_add_indirect_packed()
1415 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_indirect_packed()
1416 vq->packed.avail_used_flags ^= in virtqueue_add_indirect_packed()
1420 vq->packed.next_avail_idx = n; in virtqueue_add_indirect_packed()
1421 vq->free_head = vq->packed.desc_extra[id].next; in virtqueue_add_indirect_packed()
1424 vq->packed.desc_state[id].num = 1; in virtqueue_add_indirect_packed()
1425 vq->packed.desc_state[id].data = data; in virtqueue_add_indirect_packed()
1426 vq->packed.desc_state[id].indir_desc = desc; in virtqueue_add_indirect_packed()
1427 vq->packed.desc_state[id].last = id; in virtqueue_add_indirect_packed()
1429 vq->num_added += 1; in virtqueue_add_indirect_packed()
1431 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_indirect_packed()
1432 END_USE(vq); in virtqueue_add_indirect_packed()
1440 vring_unmap_extra_packed(vq, &extra[i]); in virtqueue_add_indirect_packed()
1444 END_USE(vq); in virtqueue_add_indirect_packed()
1458 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_packed() local
1466 START_USE(vq); in virtqueue_add_packed()
1469 BUG_ON(ctx && vq->indirect); in virtqueue_add_packed()
1471 if (unlikely(vq->broken)) { in virtqueue_add_packed()
1472 END_USE(vq); in virtqueue_add_packed()
1476 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_packed()
1480 if (virtqueue_use_indirect(vq, total_sg)) { in virtqueue_add_packed()
1481 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in virtqueue_add_packed()
1484 END_USE(vq); in virtqueue_add_packed()
1491 head = vq->packed.next_avail_idx; in virtqueue_add_packed()
1492 avail_used_flags = vq->packed.avail_used_flags; in virtqueue_add_packed()
1494 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); in virtqueue_add_packed()
1496 desc = vq->packed.vring.desc; in virtqueue_add_packed()
1500 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_packed()
1502 descs_used, vq->vq.num_free); in virtqueue_add_packed()
1503 END_USE(vq); in virtqueue_add_packed()
1507 id = vq->free_head; in virtqueue_add_packed()
1508 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_packed()
1516 if (vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_packed()
1521 flags = cpu_to_le16(vq->packed.avail_used_flags | in virtqueue_add_packed()
1533 if (unlikely(vq->use_dma_api)) { in virtqueue_add_packed()
1534 vq->packed.desc_extra[curr].addr = premapped ? in virtqueue_add_packed()
1536 vq->packed.desc_extra[curr].len = len; in virtqueue_add_packed()
1537 vq->packed.desc_extra[curr].flags = in virtqueue_add_packed()
1541 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1543 if ((unlikely(++i >= vq->packed.vring.num))) { in virtqueue_add_packed()
1545 vq->packed.avail_used_flags ^= in virtqueue_add_packed()
1553 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_packed()
1556 vq->vq.num_free -= descs_used; in virtqueue_add_packed()
1559 vq->packed.next_avail_idx = i; in virtqueue_add_packed()
1560 vq->free_head = curr; in virtqueue_add_packed()
1563 vq->packed.desc_state[id].num = descs_used; in virtqueue_add_packed()
1564 vq->packed.desc_state[id].data = data; in virtqueue_add_packed()
1565 vq->packed.desc_state[id].indir_desc = ctx; in virtqueue_add_packed()
1566 vq->packed.desc_state[id].last = prev; in virtqueue_add_packed()
1573 virtio_wmb(vq->weak_barriers); in virtqueue_add_packed()
1574 vq->packed.vring.desc[head].flags = head_flags; in virtqueue_add_packed()
1575 vq->num_added += descs_used; in virtqueue_add_packed()
1577 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_packed()
1578 END_USE(vq); in virtqueue_add_packed()
1585 curr = vq->free_head; in virtqueue_add_packed()
1587 vq->packed.avail_used_flags = avail_used_flags; in virtqueue_add_packed()
1592 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); in virtqueue_add_packed()
1593 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1595 if (i >= vq->packed.vring.num) in virtqueue_add_packed()
1599 END_USE(vq); in virtqueue_add_packed()
1605 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_packed() local
1616 START_USE(vq); in virtqueue_kick_prepare_packed()
1622 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_packed()
1624 old = vq->packed.next_avail_idx - vq->num_added; in virtqueue_kick_prepare_packed()
1625 new = vq->packed.next_avail_idx; in virtqueue_kick_prepare_packed()
1626 vq->num_added = 0; in virtqueue_kick_prepare_packed()
1628 snapshot.u32 = *(u32 *)vq->packed.vring.device; in virtqueue_kick_prepare_packed()
1631 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_packed()
1632 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_packed()
1643 if (wrap_counter != vq->packed.avail_wrap_counter) in virtqueue_kick_prepare_packed()
1644 event_idx -= vq->packed.vring.num; in virtqueue_kick_prepare_packed()
1648 END_USE(vq); in virtqueue_kick_prepare_packed()
1652 static void detach_buf_packed(struct vring_virtqueue *vq, in detach_buf_packed() argument
1659 state = &vq->packed.desc_state[id]; in detach_buf_packed()
1664 vq->packed.desc_extra[state->last].next = vq->free_head; in detach_buf_packed()
1665 vq->free_head = id; in detach_buf_packed()
1666 vq->vq.num_free += state->num; in detach_buf_packed()
1668 if (unlikely(vq->use_dma_api)) { in detach_buf_packed()
1671 vring_unmap_extra_packed(vq, in detach_buf_packed()
1672 &vq->packed.desc_extra[curr]); in detach_buf_packed()
1673 curr = vq->packed.desc_extra[curr].next; in detach_buf_packed()
1677 if (vq->indirect) { in detach_buf_packed()
1686 if (vq->use_dma_api) { in detach_buf_packed()
1687 len = vq->packed.desc_extra[id].len; in detach_buf_packed()
1693 vring_unmap_extra_packed(vq, &extra[i]); in detach_buf_packed()
1702 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, in is_used_desc_packed() argument
1708 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); in is_used_desc_packed()
1715 static bool more_used_packed(const struct vring_virtqueue *vq) in more_used_packed() argument
1721 last_used_idx = READ_ONCE(vq->last_used_idx); in more_used_packed()
1724 return is_used_desc_packed(vq, last_used, used_wrap_counter); in more_used_packed()
1731 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_packed() local
1736 START_USE(vq); in virtqueue_get_buf_ctx_packed()
1738 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_packed()
1739 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1743 if (!more_used_packed(vq)) { in virtqueue_get_buf_ctx_packed()
1745 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1750 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_packed()
1752 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_get_buf_ctx_packed()
1755 id = le16_to_cpu(vq->packed.vring.desc[last_used].id); in virtqueue_get_buf_ctx_packed()
1756 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); in virtqueue_get_buf_ctx_packed()
1758 if (unlikely(id >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1759 BAD_RING(vq, "id %u out of range\n", id); in virtqueue_get_buf_ctx_packed()
1762 if (unlikely(!vq->packed.desc_state[id].data)) { in virtqueue_get_buf_ctx_packed()
1763 BAD_RING(vq, "id %u is not a head!\n", id); in virtqueue_get_buf_ctx_packed()
1768 ret = vq->packed.desc_state[id].data; in virtqueue_get_buf_ctx_packed()
1769 detach_buf_packed(vq, id, ctx); in virtqueue_get_buf_ctx_packed()
1771 last_used += vq->packed.desc_state[id].num; in virtqueue_get_buf_ctx_packed()
1772 if (unlikely(last_used >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1773 last_used -= vq->packed.vring.num; in virtqueue_get_buf_ctx_packed()
1778 WRITE_ONCE(vq->last_used_idx, last_used); in virtqueue_get_buf_ctx_packed()
1785 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) in virtqueue_get_buf_ctx_packed()
1786 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_packed()
1787 &vq->packed.vring.driver->off_wrap, in virtqueue_get_buf_ctx_packed()
1788 cpu_to_le16(vq->last_used_idx)); in virtqueue_get_buf_ctx_packed()
1790 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_packed()
1792 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1798 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_packed() local
1800 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_disable_cb_packed()
1801 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_disable_cb_packed()
1807 if (vq->event_triggered) in virtqueue_disable_cb_packed()
1810 vq->packed.vring.driver->flags = in virtqueue_disable_cb_packed()
1811 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_disable_cb_packed()
1817 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_packed() local
1819 START_USE(vq); in virtqueue_enable_cb_prepare_packed()
1826 if (vq->event) { in virtqueue_enable_cb_prepare_packed()
1827 vq->packed.vring.driver->off_wrap = in virtqueue_enable_cb_prepare_packed()
1828 cpu_to_le16(vq->last_used_idx); in virtqueue_enable_cb_prepare_packed()
1833 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_prepare_packed()
1836 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_prepare_packed()
1837 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_prepare_packed()
1840 vq->packed.vring.driver->flags = in virtqueue_enable_cb_prepare_packed()
1841 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_prepare_packed()
1844 END_USE(vq); in virtqueue_enable_cb_prepare_packed()
1845 return vq->last_used_idx; in virtqueue_enable_cb_prepare_packed()
1850 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_packed() local
1857 return is_used_desc_packed(vq, used_idx, wrap_counter); in virtqueue_poll_packed()
1862 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_packed() local
1866 START_USE(vq); in virtqueue_enable_cb_delayed_packed()
1873 if (vq->event) { in virtqueue_enable_cb_delayed_packed()
1875 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; in virtqueue_enable_cb_delayed_packed()
1876 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1880 if (used_idx >= vq->packed.vring.num) { in virtqueue_enable_cb_delayed_packed()
1881 used_idx -= vq->packed.vring.num; in virtqueue_enable_cb_delayed_packed()
1885 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | in virtqueue_enable_cb_delayed_packed()
1892 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1895 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_delayed_packed()
1896 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_delayed_packed()
1899 vq->packed.vring.driver->flags = in virtqueue_enable_cb_delayed_packed()
1900 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_delayed_packed()
1907 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1909 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1912 if (is_used_desc_packed(vq, used_idx, wrap_counter)) { in virtqueue_enable_cb_delayed_packed()
1913 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1917 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1923 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_packed() local
1927 START_USE(vq); in virtqueue_detach_unused_buf_packed()
1929 for (i = 0; i < vq->packed.vring.num; i++) { in virtqueue_detach_unused_buf_packed()
1930 if (!vq->packed.desc_state[i].data) in virtqueue_detach_unused_buf_packed()
1933 buf = vq->packed.desc_state[i].data; in virtqueue_detach_unused_buf_packed()
1934 detach_buf_packed(vq, i, NULL); in virtqueue_detach_unused_buf_packed()
1935 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1939 BUG_ON(vq->vq.num_free != vq->packed.vring.num); in virtqueue_detach_unused_buf_packed()
1941 END_USE(vq); in virtqueue_detach_unused_buf_packed()
2086 static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq, in virtqueue_vring_attach_packed() argument
2089 vq->packed = *vring_packed; in virtqueue_vring_attach_packed()
2092 vq->free_head = 0; in virtqueue_vring_attach_packed()
2095 static void virtqueue_reinit_packed(struct vring_virtqueue *vq) in virtqueue_reinit_packed() argument
2097 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
2098 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
2101 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); in virtqueue_reinit_packed()
2103 virtqueue_init(vq, vq->packed.vring.num); in virtqueue_reinit_packed()
2104 virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); in virtqueue_reinit_packed()
2117 struct vring_virtqueue *vq; in __vring_new_virtqueue_packed() local
2120 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in __vring_new_virtqueue_packed()
2121 if (!vq) in __vring_new_virtqueue_packed()
2124 vq->vq.callback = callback; in __vring_new_virtqueue_packed()
2125 vq->vq.vdev = vdev; in __vring_new_virtqueue_packed()
2126 vq->vq.name = name; in __vring_new_virtqueue_packed()
2127 vq->vq.index = index; in __vring_new_virtqueue_packed()
2128 vq->vq.reset = false; in __vring_new_virtqueue_packed()
2129 vq->we_own_ring = false; in __vring_new_virtqueue_packed()
2130 vq->notify = notify; in __vring_new_virtqueue_packed()
2131 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue_packed()
2133 vq->broken = true; in __vring_new_virtqueue_packed()
2135 vq->broken = false; in __vring_new_virtqueue_packed()
2137 vq->packed_ring = true; in __vring_new_virtqueue_packed()
2138 vq->dma_dev = dma_dev; in __vring_new_virtqueue_packed()
2139 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue_packed()
2141 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue_packed()
2143 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue_packed()
2146 vq->weak_barriers = false; in __vring_new_virtqueue_packed()
2150 kfree(vq); in __vring_new_virtqueue_packed()
2156 virtqueue_init(vq, vring_packed->vring.num); in __vring_new_virtqueue_packed()
2157 virtqueue_vring_attach_packed(vq, vring_packed); in __vring_new_virtqueue_packed()
2160 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue_packed()
2162 return &vq->vq; in __vring_new_virtqueue_packed()
2179 struct virtqueue *vq; in vring_create_virtqueue_packed() local
2184 vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers, in vring_create_virtqueue_packed()
2186 if (!vq) { in vring_create_virtqueue_packed()
2191 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_packed()
2193 return vq; in vring_create_virtqueue_packed()
2199 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_resize_packed() local
2203 if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq))) in virtqueue_resize_packed()
2210 vring_free(&vq->vq); in virtqueue_resize_packed()
2212 virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); in virtqueue_resize_packed()
2214 virtqueue_init(vq, vring_packed.vring.num); in virtqueue_resize_packed()
2215 virtqueue_vring_attach_packed(vq, &vring_packed); in virtqueue_resize_packed()
2220 vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq)); in virtqueue_resize_packed()
2222 virtqueue_reinit_packed(vq); in virtqueue_resize_packed()
2227 void (*recycle)(struct virtqueue *vq, void *buf)) in virtqueue_disable_and_recycle() argument
2229 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_and_recycle() local
2230 struct virtio_device *vdev = vq->vq.vdev; in virtqueue_disable_and_recycle()
2234 if (!vq->we_own_ring) in virtqueue_disable_and_recycle()
2255 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_after_reset() local
2256 struct virtio_device *vdev = vq->vq.vdev; in virtqueue_enable_after_reset()
2278 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add() local
2280 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
2334 int virtqueue_add_outbuf(struct virtqueue *vq, in virtqueue_add_outbuf() argument
2339 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp); in virtqueue_add_outbuf()
2357 int virtqueue_add_outbuf_premapped(struct virtqueue *vq, in virtqueue_add_outbuf_premapped() argument
2362 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp); in virtqueue_add_outbuf_premapped()
2379 int virtqueue_add_inbuf(struct virtqueue *vq, in virtqueue_add_inbuf() argument
2384 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp); in virtqueue_add_inbuf()
2402 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, in virtqueue_add_inbuf_ctx() argument
2408 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp); in virtqueue_add_inbuf_ctx()
2427 int virtqueue_add_inbuf_premapped(struct virtqueue *vq, in virtqueue_add_inbuf_premapped() argument
2433 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp); in virtqueue_add_inbuf_premapped()
2445 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_dma_dev() local
2447 if (vq->use_dma_api) in virtqueue_dma_dev()
2448 return vring_dma_dev(vq); in virtqueue_dma_dev()
2467 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare() local
2469 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
2484 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify() local
2486 if (unlikely(vq->broken)) in virtqueue_notify()
2490 if (!vq->notify(_vq)) { in virtqueue_notify()
2491 vq->broken = true; in virtqueue_notify()
2510 bool virtqueue_kick(struct virtqueue *vq) in virtqueue_kick() argument
2512 if (virtqueue_kick_prepare(vq)) in virtqueue_kick()
2513 return virtqueue_notify(vq); in virtqueue_kick()
2538 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx() local
2540 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
2561 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb() local
2563 if (vq->packed_ring) in virtqueue_disable_cb()
2584 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare() local
2586 if (vq->event_triggered) in virtqueue_enable_cb_prepare()
2587 vq->event_triggered = false; in virtqueue_enable_cb_prepare()
2589 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
2605 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll() local
2607 if (unlikely(vq->broken)) in virtqueue_poll()
2610 virtio_mb(vq->weak_barriers); in virtqueue_poll()
2611 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2650 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed() local
2652 if (vq->event_triggered) in virtqueue_enable_cb_delayed()
2653 vq->event_triggered = false; in virtqueue_enable_cb_delayed()
2655 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2670 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf() local
2672 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2677 static inline bool more_used(const struct vring_virtqueue *vq) in more_used() argument
2679 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); in more_used()
2692 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt() local
2694 if (!more_used(vq)) { in vring_interrupt()
2695 pr_debug("virtqueue interrupt with no work for %p\n", vq); in vring_interrupt()
2699 if (unlikely(vq->broken)) { in vring_interrupt()
2701 dev_warn_once(&vq->vq.vdev->dev, in vring_interrupt()
2710 if (vq->event) in vring_interrupt()
2711 data_race(vq->event_triggered = true); in vring_interrupt()
2713 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
2714 if (vq->vq.callback) in vring_interrupt()
2715 vq->vq.callback(&vq->vq); in vring_interrupt()
2796 void (*recycle)(struct virtqueue *vq, void *buf), in virtqueue_resize() argument
2797 void (*recycle_done)(struct virtqueue *vq)) in virtqueue_resize() argument
2799 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_resize() local
2802 if (num > vq->vq.num_max) in virtqueue_resize()
2808 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) in virtqueue_resize()
2817 if (vq->packed_ring) in virtqueue_resize()
2842 void (*recycle)(struct virtqueue *vq, void *buf), in virtqueue_reset() argument
2843 void (*recycle_done)(struct virtqueue *vq)) in virtqueue_reset() argument
2845 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_reset() local
2854 if (vq->packed_ring) in virtqueue_reset()
2855 virtqueue_reinit_packed(vq); in virtqueue_reset()
2857 virtqueue_reinit_split(vq); in virtqueue_reset()
2870 bool (*notify)(struct virtqueue *vq), in vring_new_virtqueue() argument
2871 void (*callback)(struct virtqueue *vq), in vring_new_virtqueue() argument
2896 struct vring_virtqueue *vq = to_vvq(_vq); in vring_free() local
2898 if (vq->we_own_ring) { in vring_free()
2899 if (vq->packed_ring) { in vring_free()
2900 vring_free_queue(vq->vq.vdev, in vring_free()
2901 vq->packed.ring_size_in_bytes, in vring_free()
2902 vq->packed.vring.desc, in vring_free()
2903 vq->packed.ring_dma_addr, in vring_free()
2904 vring_dma_dev(vq)); in vring_free()
2906 vring_free_queue(vq->vq.vdev, in vring_free()
2907 vq->packed.event_size_in_bytes, in vring_free()
2908 vq->packed.vring.driver, in vring_free()
2909 vq->packed.driver_event_dma_addr, in vring_free()
2910 vring_dma_dev(vq)); in vring_free()
2912 vring_free_queue(vq->vq.vdev, in vring_free()
2913 vq->packed.event_size_in_bytes, in vring_free()
2914 vq->packed.vring.device, in vring_free()
2915 vq->packed.device_event_dma_addr, in vring_free()
2916 vring_dma_dev(vq)); in vring_free()
2918 kfree(vq->packed.desc_state); in vring_free()
2919 kfree(vq->packed.desc_extra); in vring_free()
2921 vring_free_queue(vq->vq.vdev, in vring_free()
2922 vq->split.queue_size_in_bytes, in vring_free()
2923 vq->split.vring.desc, in vring_free()
2924 vq->split.queue_dma_addr, in vring_free()
2925 vring_dma_dev(vq)); in vring_free()
2928 if (!vq->packed_ring) { in vring_free()
2929 kfree(vq->split.desc_state); in vring_free()
2930 kfree(vq->split.desc_extra); in vring_free()
2936 struct vring_virtqueue *vq = to_vvq(_vq); in vring_del_virtqueue() local
2938 spin_lock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2940 spin_unlock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2944 kfree(vq); in vring_del_virtqueue()
2950 struct vring_virtqueue *vq = to_vvq(_vq); in vring_notification_data() local
2953 if (vq->packed_ring) in vring_notification_data()
2954 next = (vq->packed.next_avail_idx & in vring_notification_data()
2956 vq->packed.avail_wrap_counter << in vring_notification_data()
2959 next = vq->split.avail_idx_shadow; in vring_notification_data()
3004 const struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size() local
3006 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size()
3015 struct vring_virtqueue *vq = to_vvq(_vq); in __virtqueue_break() local
3018 WRITE_ONCE(vq->broken, true); in __virtqueue_break()
3027 struct vring_virtqueue *vq = to_vvq(_vq); in __virtqueue_unbreak() local
3030 WRITE_ONCE(vq->broken, false); in __virtqueue_unbreak()
3036 const struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken() local
3038 return READ_ONCE(vq->broken); in virtqueue_is_broken()
3052 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device() local
3055 WRITE_ONCE(vq->broken, true); in virtio_break_device()
3074 struct vring_virtqueue *vq = to_vvq(_vq); in __virtio_unbreak_device() local
3077 WRITE_ONCE(vq->broken, false); in __virtio_unbreak_device()
3085 const struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_desc_addr() local
3087 BUG_ON(!vq->we_own_ring); in virtqueue_get_desc_addr()
3089 if (vq->packed_ring) in virtqueue_get_desc_addr()
3090 return vq->packed.ring_dma_addr; in virtqueue_get_desc_addr()
3092 return vq->split.queue_dma_addr; in virtqueue_get_desc_addr()
3098 const struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail_addr() local
3100 BUG_ON(!vq->we_own_ring); in virtqueue_get_avail_addr()
3102 if (vq->packed_ring) in virtqueue_get_avail_addr()
3103 return vq->packed.driver_event_dma_addr; in virtqueue_get_avail_addr()
3105 return vq->split.queue_dma_addr + in virtqueue_get_avail_addr()
3106 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr()
3112 const struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used_addr() local
3114 BUG_ON(!vq->we_own_ring); in virtqueue_get_used_addr()
3116 if (vq->packed_ring) in virtqueue_get_used_addr()
3117 return vq->packed.device_event_dma_addr; in virtqueue_get_used_addr()
3119 return vq->split.queue_dma_addr + in virtqueue_get_used_addr()
3120 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); in virtqueue_get_used_addr()
3125 const struct vring *virtqueue_get_vring(const struct virtqueue *vq) in virtqueue_get_vring() argument
3127 return &to_vvq(vq)->split.vring; in virtqueue_get_vring()
3149 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_dma_map_single_attrs() local
3151 if (!vq->use_dma_api) { in virtqueue_dma_map_single_attrs()
3156 return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs); in virtqueue_dma_map_single_attrs()
3175 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_dma_unmap_single_attrs() local
3177 if (!vq->use_dma_api) in virtqueue_dma_unmap_single_attrs()
3180 dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs); in virtqueue_dma_unmap_single_attrs()
3193 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_dma_mapping_error() local
3195 if (!vq->use_dma_api) in virtqueue_dma_mapping_error()
3198 return dma_mapping_error(vring_dma_dev(vq), addr); in virtqueue_dma_mapping_error()
3214 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_dma_need_sync() local
3216 if (!vq->use_dma_api) in virtqueue_dma_need_sync()
3219 return dma_need_sync(vring_dma_dev(vq), addr); in virtqueue_dma_need_sync()
3240 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_dma_sync_single_range_for_cpu() local
3241 struct device *dev = vring_dma_dev(vq); in virtqueue_dma_sync_single_range_for_cpu()
3243 if (!vq->use_dma_api) in virtqueue_dma_sync_single_range_for_cpu()
3266 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_dma_sync_single_range_for_device() local
3267 struct device *dev = vring_dma_dev(vq); in virtqueue_dma_sync_single_range_for_device()
3269 if (!vq->use_dma_api) in virtqueue_dma_sync_single_range_for_device()