1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * Implements the virtqueue interface as basically described 29 * in the original VirtIO paper. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/sglist.h> 40 #include <vm/vm.h> 41 #include <vm/pmap.h> 42 43 #include <machine/cpu.h> 44 #include <machine/bus.h> 45 #include <machine/atomic.h> 46 #include <machine/resource.h> 47 #include <sys/bus.h> 48 #include <sys/rman.h> 49 50 #include <dev/virtio/virtio.h> 51 #include <dev/virtio/virtqueue.h> 52 #include <dev/virtio/virtio_ring.h> 53 54 #include "virtio_bus_if.h" 55 56 struct virtqueue { 57 device_t vq_dev; 58 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 59 uint16_t vq_queue_index; 60 uint16_t vq_nentries; 61 uint32_t vq_flags; 62 #define VIRTQUEUE_FLAG_INDIRECT 0x0001 63 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002 64 65 int vq_alignment; 66 int vq_ring_size; 67 void *vq_ring_mem; 68 int vq_max_indirect_size; 69 int vq_indirect_mem_size; 70 virtqueue_intr_t *vq_intrhand; 71 void *vq_intrhand_arg; 72 73 struct vring vq_ring; 74 uint16_t vq_free_cnt; 75 uint16_t vq_queued_cnt; 76 /* 77 * Head of the free chain in the descriptor table. If 78 * there are no free descriptors, this will be set to 79 * VQ_RING_DESC_CHAIN_END. 80 */ 81 uint16_t vq_desc_head_idx; 82 /* 83 * Last consumed descriptor in the used table, 84 * trails vq_ring.used->idx. 85 */ 86 uint16_t vq_used_cons_idx; 87 88 struct vq_desc_extra { 89 void *cookie; 90 struct vring_desc *indirect; 91 vm_paddr_t indirect_paddr; 92 uint16_t ndescs; 93 } vq_descx[0]; 94 }; 95 96 /* 97 * The maximum virtqueue size is 2^15. Use that value as the end of 98 * descriptor chain terminator since it will never be a valid index 99 * in the descriptor table. This is used to verify we are correctly 100 * handling vq_free_cnt. 101 */ 102 #define VQ_RING_DESC_CHAIN_END 32768 103 104 #define VQASSERT(_vq, _exp, _msg, ...) \ 105 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \ 106 ##__VA_ARGS__)) 107 108 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \ 109 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \ 110 "invalid ring index: %d, max: %d", (_idx), \ 111 (_vq)->vq_nentries) 112 113 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \ 114 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \ 115 VQ_RING_DESC_CHAIN_END, "full ring terminated " \ 116 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx) 117 118 static int virtqueue_init_indirect(struct virtqueue *vq, int); 119 static void virtqueue_free_indirect(struct virtqueue *vq); 120 static void virtqueue_init_indirect_list(struct virtqueue *, 121 struct vring_desc *); 122 123 static void vq_ring_init(struct virtqueue *); 124 static void vq_ring_update_avail(struct virtqueue *, uint16_t); 125 static uint16_t vq_ring_enqueue_segments(struct virtqueue *, 126 struct vring_desc *, uint16_t, struct sglist *, int, int); 127 static int vq_ring_use_indirect(struct virtqueue *, int); 128 static void vq_ring_enqueue_indirect(struct virtqueue *, void *, 129 struct sglist *, int, int); 130 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t); 131 static int vq_ring_must_notify_host(struct virtqueue *); 132 static void vq_ring_notify_host(struct virtqueue *); 133 static void vq_ring_free_chain(struct virtqueue *, uint16_t); 134 135 uint64_t 136 virtqueue_filter_features(uint64_t features) 137 { 138 uint64_t mask; 139 140 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1; 141 mask |= VIRTIO_RING_F_INDIRECT_DESC; 142 mask |= VIRTIO_RING_F_EVENT_IDX; 143 144 return (features & mask); 145 } 146 147 int 148 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align, 149 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp) 150 { 151 struct virtqueue *vq; 152 int error; 153 154 *vqp = NULL; 155 error = 0; 156 157 if (size == 0) { 158 device_printf(dev, 159 "virtqueue %d (%s) does not exist (size is zero)\n", 160 queue, info->vqai_name); 161 return (ENODEV); 162 } else if (!powerof2(size)) { 163 device_printf(dev, 164 "virtqueue %d (%s) size is not a power of 2: %d\n", 165 queue, info->vqai_name, size); 166 return (ENXIO); 167 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) { 168 device_printf(dev, "virtqueue %d (%s) requested too many " 169 "indirect descriptors: %d, max %d\n", 170 queue, info->vqai_name, info->vqai_maxindirsz, 171 VIRTIO_MAX_INDIRECT); 172 return (EINVAL); 173 } 174 175 vq = malloc(sizeof(struct virtqueue) + 176 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO); 177 if (vq == NULL) { 178 device_printf(dev, "cannot allocate virtqueue\n"); 179 return (ENOMEM); 180 } 181 182 vq->vq_dev = dev; 183 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name)); 184 vq->vq_queue_index = queue; 185 vq->vq_alignment = align; 186 vq->vq_nentries = size; 187 vq->vq_free_cnt = size; 188 vq->vq_intrhand = info->vqai_intr; 189 vq->vq_intrhand_arg = info->vqai_intr_arg; 190 191 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0) 192 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX; 193 194 if (info->vqai_maxindirsz > 1) { 195 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz); 196 if (error) 197 goto fail; 198 } 199 200 vq->vq_ring_size = round_page(vring_size(size, align)); 201 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF, 202 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0); 203 if (vq->vq_ring_mem == NULL) { 204 device_printf(dev, 205 "cannot allocate memory for virtqueue ring\n"); 206 error = ENOMEM; 207 goto fail; 208 } 209 210 vq_ring_init(vq); 211 virtqueue_disable_intr(vq); 212 213 *vqp = vq; 214 215 fail: 216 if (error) 217 virtqueue_free(vq); 218 219 return (error); 220 } 221 222 static int 223 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size) 224 { 225 device_t dev; 226 struct vq_desc_extra *dxp; 227 int i, size; 228 229 dev = vq->vq_dev; 230 231 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) { 232 /* 233 * Indirect descriptors requested by the driver but not 234 * negotiated. Return zero to keep the initialization 235 * going: we'll run fine without. 236 */ 237 if (bootverbose) 238 device_printf(dev, "virtqueue %d (%s) requested " 239 "indirect descriptors but not negotiated\n", 240 vq->vq_queue_index, vq->vq_name); 241 return (0); 242 } 243 244 size = indirect_size * sizeof(struct vring_desc); 245 vq->vq_max_indirect_size = indirect_size; 246 vq->vq_indirect_mem_size = size; 247 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT; 248 249 for (i = 0; i < vq->vq_nentries; i++) { 250 dxp = &vq->vq_descx[i]; 251 252 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT); 253 if (dxp->indirect == NULL) { 254 device_printf(dev, "cannot allocate indirect list\n"); 255 return (ENOMEM); 256 } 257 258 dxp->indirect_paddr = vtophys(dxp->indirect); 259 virtqueue_init_indirect_list(vq, dxp->indirect); 260 } 261 262 return (0); 263 } 264 265 static void 266 virtqueue_free_indirect(struct virtqueue *vq) 267 { 268 struct vq_desc_extra *dxp; 269 int i; 270 271 for (i = 0; i < vq->vq_nentries; i++) { 272 dxp = &vq->vq_descx[i]; 273 274 if (dxp->indirect == NULL) 275 break; 276 277 free(dxp->indirect, M_DEVBUF); 278 dxp->indirect = NULL; 279 dxp->indirect_paddr = 0; 280 } 281 282 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT; 283 vq->vq_indirect_mem_size = 0; 284 } 285 286 static void 287 virtqueue_init_indirect_list(struct virtqueue *vq, 288 struct vring_desc *indirect) 289 { 290 int i; 291 292 bzero(indirect, vq->vq_indirect_mem_size); 293 294 for (i = 0; i < vq->vq_max_indirect_size - 1; i++) 295 indirect[i].next = i + 1; 296 indirect[i].next = VQ_RING_DESC_CHAIN_END; 297 } 298 299 int 300 virtqueue_reinit(struct virtqueue *vq, uint16_t size) 301 { 302 struct vq_desc_extra *dxp; 303 int i; 304 305 if (vq->vq_nentries != size) { 306 device_printf(vq->vq_dev, 307 "%s: '%s' changed size; old=%hu, new=%hu\n", 308 __func__, vq->vq_name, vq->vq_nentries, size); 309 return (EINVAL); 310 } 311 312 /* Warn if the virtqueue was not properly cleaned up. */ 313 if (vq->vq_free_cnt != vq->vq_nentries) { 314 device_printf(vq->vq_dev, 315 "%s: warning '%s' virtqueue not empty, " 316 "leaking %d entries\n", __func__, vq->vq_name, 317 vq->vq_nentries - vq->vq_free_cnt); 318 } 319 320 vq->vq_desc_head_idx = 0; 321 vq->vq_used_cons_idx = 0; 322 vq->vq_queued_cnt = 0; 323 vq->vq_free_cnt = vq->vq_nentries; 324 325 /* To be safe, reset all our allocated memory. */ 326 bzero(vq->vq_ring_mem, vq->vq_ring_size); 327 for (i = 0; i < vq->vq_nentries; i++) { 328 dxp = &vq->vq_descx[i]; 329 dxp->cookie = NULL; 330 dxp->ndescs = 0; 331 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 332 virtqueue_init_indirect_list(vq, dxp->indirect); 333 } 334 335 vq_ring_init(vq); 336 virtqueue_disable_intr(vq); 337 338 return (0); 339 } 340 341 void 342 virtqueue_free(struct virtqueue *vq) 343 { 344 345 if (vq->vq_free_cnt != vq->vq_nentries) { 346 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, " 347 "leaking %d entries\n", vq->vq_name, 348 vq->vq_nentries - vq->vq_free_cnt); 349 } 350 351 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 352 virtqueue_free_indirect(vq); 353 354 if (vq->vq_ring_mem != NULL) { 355 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF); 356 vq->vq_ring_size = 0; 357 vq->vq_ring_mem = NULL; 358 } 359 360 free(vq, M_DEVBUF); 361 } 362 363 vm_paddr_t 364 virtqueue_paddr(struct virtqueue *vq) 365 { 366 367 return (vtophys(vq->vq_ring_mem)); 368 } 369 370 int 371 virtqueue_size(struct virtqueue *vq) 372 { 373 374 return (vq->vq_nentries); 375 } 376 377 int 378 virtqueue_empty(struct virtqueue *vq) 379 { 380 381 return (vq->vq_nentries == vq->vq_free_cnt); 382 } 383 384 int 385 virtqueue_full(struct virtqueue *vq) 386 { 387 388 return (vq->vq_free_cnt == 0); 389 } 390 391 void 392 virtqueue_notify(struct virtqueue *vq) 393 { 394 395 /* Ensure updated avail->idx is visible to host. */ 396 mb(); 397 398 if (vq_ring_must_notify_host(vq)) 399 vq_ring_notify_host(vq); 400 vq->vq_queued_cnt = 0; 401 } 402 403 int 404 virtqueue_nused(struct virtqueue *vq) 405 { 406 uint16_t used_idx, nused; 407 408 used_idx = vq->vq_ring.used->idx; 409 410 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx); 411 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available"); 412 413 return (nused); 414 } 415 416 int 417 virtqueue_intr(struct virtqueue *vq) 418 { 419 420 if (vq->vq_intrhand == NULL || 421 vq->vq_used_cons_idx == vq->vq_ring.used->idx) 422 return (0); 423 424 vq->vq_intrhand(vq->vq_intrhand_arg); 425 426 return (1); 427 } 428 429 int 430 virtqueue_enable_intr(struct virtqueue *vq) 431 { 432 433 return (vq_ring_enable_interrupt(vq, 0)); 434 } 435 436 int 437 virtqueue_postpone_intr(struct virtqueue *vq) 438 { 439 uint16_t ndesc, avail_idx; 440 441 /* 442 * Request the next interrupt be postponed until at least half 443 * of the available descriptors have been consumed. 444 */ 445 avail_idx = vq->vq_ring.avail->idx; 446 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx) / 2; 447 448 return (vq_ring_enable_interrupt(vq, ndesc)); 449 } 450 451 void 452 virtqueue_disable_intr(struct virtqueue *vq) 453 { 454 455 /* 456 * Note this is only considered a hint to the host. 457 */ 458 if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) == 0) 459 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 460 } 461 462 int 463 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg, 464 int readable, int writable) 465 { 466 struct vq_desc_extra *dxp; 467 int needed; 468 uint16_t head_idx, idx; 469 470 needed = readable + writable; 471 472 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie"); 473 VQASSERT(vq, needed == sg->sg_nseg, 474 "segment count mismatch, %d, %d", needed, sg->sg_nseg); 475 VQASSERT(vq, 476 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size, 477 "too many segments to enqueue: %d, %d/%d", needed, 478 vq->vq_nentries, vq->vq_max_indirect_size); 479 480 if (needed < 1) 481 return (EINVAL); 482 if (vq->vq_free_cnt == 0) 483 return (ENOSPC); 484 485 if (vq_ring_use_indirect(vq, needed)) { 486 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable); 487 return (0); 488 } else if (vq->vq_free_cnt < needed) 489 return (EMSGSIZE); 490 491 head_idx = vq->vq_desc_head_idx; 492 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 493 dxp = &vq->vq_descx[head_idx]; 494 495 VQASSERT(vq, dxp->cookie == NULL, 496 "cookie already exists for index %d", head_idx); 497 dxp->cookie = cookie; 498 dxp->ndescs = needed; 499 500 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx, 501 sg, readable, writable); 502 503 vq->vq_desc_head_idx = idx; 504 vq->vq_free_cnt -= needed; 505 if (vq->vq_free_cnt == 0) 506 VQ_RING_ASSERT_CHAIN_TERM(vq); 507 else 508 VQ_RING_ASSERT_VALID_IDX(vq, idx); 509 510 vq_ring_update_avail(vq, head_idx); 511 512 return (0); 513 } 514 515 void * 516 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len) 517 { 518 struct vring_used_elem *uep; 519 void *cookie; 520 uint16_t used_idx, desc_idx; 521 522 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 523 return (NULL); 524 525 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1); 526 uep = &vq->vq_ring.used->ring[used_idx]; 527 528 rmb(); 529 desc_idx = (uint16_t) uep->id; 530 if (len != NULL) 531 *len = uep->len; 532 533 vq_ring_free_chain(vq, desc_idx); 534 535 cookie = vq->vq_descx[desc_idx].cookie; 536 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx); 537 vq->vq_descx[desc_idx].cookie = NULL; 538 539 return (cookie); 540 } 541 542 void * 543 virtqueue_poll(struct virtqueue *vq, uint32_t *len) 544 { 545 void *cookie; 546 547 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) 548 cpu_spinwait(); 549 550 return (cookie); 551 } 552 553 void * 554 virtqueue_drain(struct virtqueue *vq, int *last) 555 { 556 void *cookie; 557 int idx; 558 559 cookie = NULL; 560 idx = *last; 561 562 while (idx < vq->vq_nentries && cookie == NULL) { 563 if ((cookie = vq->vq_descx[idx].cookie) != NULL) { 564 vq->vq_descx[idx].cookie = NULL; 565 /* Free chain to keep free count consistent. */ 566 vq_ring_free_chain(vq, idx); 567 } 568 idx++; 569 } 570 571 *last = idx; 572 573 return (cookie); 574 } 575 576 void 577 virtqueue_dump(struct virtqueue *vq) 578 { 579 580 if (vq == NULL) 581 return; 582 583 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; " 584 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; " 585 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n", 586 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, 587 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx, 588 vq->vq_ring.avail->idx, vq->vq_used_cons_idx, 589 vq->vq_ring.used->idx, vq->vq_ring.avail->flags, 590 vq->vq_ring.used->flags); 591 } 592 593 static void 594 vq_ring_init(struct virtqueue *vq) 595 { 596 struct vring *vr; 597 char *ring_mem; 598 int i, size; 599 600 ring_mem = vq->vq_ring_mem; 601 size = vq->vq_nentries; 602 vr = &vq->vq_ring; 603 604 vring_init(vr, size, ring_mem, vq->vq_alignment); 605 606 for (i = 0; i < size - 1; i++) 607 vr->desc[i].next = i + 1; 608 vr->desc[i].next = VQ_RING_DESC_CHAIN_END; 609 } 610 611 static void 612 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) 613 { 614 uint16_t avail_idx; 615 616 /* 617 * Place the head of the descriptor chain into the next slot and make 618 * it usable to the host. The chain is made available now rather than 619 * deferring to virtqueue_notify() in the hopes that if the host is 620 * currently running on another CPU, we can keep it processing the new 621 * descriptor. 622 */ 623 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1); 624 vq->vq_ring.avail->ring[avail_idx] = desc_idx; 625 626 wmb(); 627 vq->vq_ring.avail->idx++; 628 629 /* Keep pending count until virtqueue_notify(). */ 630 vq->vq_queued_cnt++; 631 } 632 633 static uint16_t 634 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc, 635 uint16_t head_idx, struct sglist *sg, int readable, int writable) 636 { 637 struct sglist_seg *seg; 638 struct vring_desc *dp; 639 int i, needed; 640 uint16_t idx; 641 642 needed = readable + writable; 643 644 for (i = 0, idx = head_idx, seg = sg->sg_segs; 645 i < needed; 646 i++, idx = dp->next, seg++) { 647 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END, 648 "premature end of free desc chain"); 649 650 dp = &desc[idx]; 651 dp->addr = seg->ss_paddr; 652 dp->len = seg->ss_len; 653 dp->flags = 0; 654 655 if (i < needed - 1) 656 dp->flags |= VRING_DESC_F_NEXT; 657 if (i >= readable) 658 dp->flags |= VRING_DESC_F_WRITE; 659 } 660 661 return (idx); 662 } 663 664 static int 665 vq_ring_use_indirect(struct virtqueue *vq, int needed) 666 { 667 668 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0) 669 return (0); 670 671 if (vq->vq_max_indirect_size < needed) 672 return (0); 673 674 if (needed < 2) 675 return (0); 676 677 return (1); 678 } 679 680 static void 681 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie, 682 struct sglist *sg, int readable, int writable) 683 { 684 struct vring_desc *dp; 685 struct vq_desc_extra *dxp; 686 int needed; 687 uint16_t head_idx; 688 689 needed = readable + writable; 690 VQASSERT(vq, needed <= vq->vq_max_indirect_size, 691 "enqueuing too many indirect descriptors"); 692 693 head_idx = vq->vq_desc_head_idx; 694 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 695 dp = &vq->vq_ring.desc[head_idx]; 696 dxp = &vq->vq_descx[head_idx]; 697 698 VQASSERT(vq, dxp->cookie == NULL, 699 "cookie already exists for index %d", head_idx); 700 dxp->cookie = cookie; 701 dxp->ndescs = 1; 702 703 dp->addr = dxp->indirect_paddr; 704 dp->len = needed * sizeof(struct vring_desc); 705 dp->flags = VRING_DESC_F_INDIRECT; 706 707 vq_ring_enqueue_segments(vq, dxp->indirect, 0, 708 sg, readable, writable); 709 710 vq->vq_desc_head_idx = dp->next; 711 vq->vq_free_cnt--; 712 if (vq->vq_free_cnt == 0) 713 VQ_RING_ASSERT_CHAIN_TERM(vq); 714 else 715 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx); 716 717 vq_ring_update_avail(vq, head_idx); 718 } 719 720 static int 721 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) 722 { 723 724 /* 725 * Enable interrupts, making sure we get the latest index of 726 * what's already been consumed. 727 */ 728 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) 729 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; 730 else 731 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 732 733 mb(); 734 735 /* 736 * Enough items may have already been consumed to meet our threshold 737 * since we last checked. Let our caller know so it processes the new 738 * entries. 739 */ 740 if (virtqueue_nused(vq) > ndesc) 741 return (1); 742 743 return (0); 744 } 745 746 static int 747 vq_ring_must_notify_host(struct virtqueue *vq) 748 { 749 uint16_t new_idx, prev_idx, event_idx; 750 751 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 752 new_idx = vq->vq_ring.avail->idx; 753 prev_idx = new_idx - vq->vq_queued_cnt; 754 event_idx = vring_avail_event(&vq->vq_ring); 755 756 return (vring_need_event(event_idx, new_idx, prev_idx) != 0); 757 } 758 759 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0); 760 } 761 762 static void 763 vq_ring_notify_host(struct virtqueue *vq) 764 { 765 766 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index); 767 } 768 769 static void 770 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 771 { 772 struct vring_desc *dp; 773 struct vq_desc_extra *dxp; 774 775 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx); 776 dp = &vq->vq_ring.desc[desc_idx]; 777 dxp = &vq->vq_descx[desc_idx]; 778 779 if (vq->vq_free_cnt == 0) 780 VQ_RING_ASSERT_CHAIN_TERM(vq); 781 782 vq->vq_free_cnt += dxp->ndescs; 783 dxp->ndescs--; 784 785 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 786 while (dp->flags & VRING_DESC_F_NEXT) { 787 VQ_RING_ASSERT_VALID_IDX(vq, dp->next); 788 dp = &vq->vq_ring.desc[dp->next]; 789 dxp->ndescs--; 790 } 791 } 792 793 VQASSERT(vq, dxp->ndescs == 0, 794 "failed to free entire desc chain, remaining: %d", dxp->ndescs); 795 796 /* 797 * We must append the existing free chain, if any, to the end of 798 * newly freed chain. If the virtqueue was completely used, then 799 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 800 */ 801 dp->next = vq->vq_desc_head_idx; 802 vq->vq_desc_head_idx = desc_idx; 803 } 804