1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * Implements the virtqueue interface as basically described 29 * in the original VirtIO paper. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/sglist.h> 40 #include <vm/vm.h> 41 #include <vm/pmap.h> 42 43 #include <machine/cpu.h> 44 #include <machine/bus.h> 45 #include <machine/atomic.h> 46 #include <machine/resource.h> 47 #include <sys/bus.h> 48 #include <sys/rman.h> 49 50 #include <dev/virtio/virtio.h> 51 #include <dev/virtio/virtqueue.h> 52 #include <dev/virtio/virtio_ring.h> 53 54 #include "virtio_bus_if.h" 55 56 struct virtqueue { 57 device_t vq_dev; 58 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 59 uint16_t vq_queue_index; 60 uint16_t vq_nentries; 61 uint32_t vq_flags; 62 #define VIRTQUEUE_FLAG_INDIRECT 0x0001 63 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002 64 65 int vq_alignment; 66 int vq_ring_size; 67 void *vq_ring_mem; 68 int vq_max_indirect_size; 69 int vq_indirect_mem_size; 70 virtqueue_intr_t *vq_intrhand; 71 void *vq_intrhand_arg; 72 73 struct vring vq_ring; 74 uint16_t vq_free_cnt; 75 uint16_t vq_queued_cnt; 76 /* 77 * Head of the free chain in the descriptor table. If 78 * there are no free descriptors, this will be set to 79 * VQ_RING_DESC_CHAIN_END. 80 */ 81 uint16_t vq_desc_head_idx; 82 /* 83 * Last consumed descriptor in the used table, 84 * trails vq_ring.used->idx. 85 */ 86 uint16_t vq_used_cons_idx; 87 88 struct vq_desc_extra { 89 void *cookie; 90 struct vring_desc *indirect; 91 vm_paddr_t indirect_paddr; 92 uint16_t ndescs; 93 } vq_descx[0]; 94 }; 95 96 /* 97 * The maximum virtqueue size is 2^15. Use that value as the end of 98 * descriptor chain terminator since it will never be a valid index 99 * in the descriptor table. This is used to verify we are correctly 100 * handling vq_free_cnt. 101 */ 102 #define VQ_RING_DESC_CHAIN_END 32768 103 104 #define VQASSERT(_vq, _exp, _msg, ...) \ 105 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \ 106 ##__VA_ARGS__)) 107 108 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \ 109 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \ 110 "invalid ring index: %d, max: %d", (_idx), \ 111 (_vq)->vq_nentries) 112 113 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \ 114 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \ 115 VQ_RING_DESC_CHAIN_END, "full ring terminated " \ 116 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx) 117 118 static int virtqueue_init_indirect(struct virtqueue *vq, int); 119 static void virtqueue_free_indirect(struct virtqueue *vq); 120 static void virtqueue_init_indirect_list(struct virtqueue *, 121 struct vring_desc *); 122 123 static void vq_ring_init(struct virtqueue *); 124 static void vq_ring_update_avail(struct virtqueue *, uint16_t); 125 static uint16_t vq_ring_enqueue_segments(struct virtqueue *, 126 struct vring_desc *, uint16_t, struct sglist *, int, int); 127 static int vq_ring_use_indirect(struct virtqueue *, int); 128 static void vq_ring_enqueue_indirect(struct virtqueue *, void *, 129 struct sglist *, int, int); 130 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t); 131 static int vq_ring_must_notify_host(struct virtqueue *); 132 static void vq_ring_notify_host(struct virtqueue *); 133 static void vq_ring_free_chain(struct virtqueue *, uint16_t); 134 135 uint64_t 136 virtqueue_filter_features(uint64_t features) 137 { 138 uint64_t mask; 139 140 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1; 141 mask |= VIRTIO_RING_F_INDIRECT_DESC; 142 mask |= VIRTIO_RING_F_EVENT_IDX; 143 144 return (features & mask); 145 } 146 147 int 148 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align, 149 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp) 150 { 151 struct virtqueue *vq; 152 int error; 153 154 *vqp = NULL; 155 error = 0; 156 157 if (size == 0) { 158 device_printf(dev, 159 "virtqueue %d (%s) does not exist (size is zero)\n", 160 queue, info->vqai_name); 161 return (ENODEV); 162 } else if (!powerof2(size)) { 163 device_printf(dev, 164 "virtqueue %d (%s) size is not a power of 2: %d\n", 165 queue, info->vqai_name, size); 166 return (ENXIO); 167 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) { 168 device_printf(dev, "virtqueue %d (%s) requested too many " 169 "indirect descriptors: %d, max %d\n", 170 queue, info->vqai_name, info->vqai_maxindirsz, 171 VIRTIO_MAX_INDIRECT); 172 return (EINVAL); 173 } 174 175 vq = malloc(sizeof(struct virtqueue) + 176 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO); 177 if (vq == NULL) { 178 device_printf(dev, "cannot allocate virtqueue\n"); 179 return (ENOMEM); 180 } 181 182 vq->vq_dev = dev; 183 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name)); 184 vq->vq_queue_index = queue; 185 vq->vq_alignment = align; 186 vq->vq_nentries = size; 187 vq->vq_free_cnt = size; 188 vq->vq_intrhand = info->vqai_intr; 189 vq->vq_intrhand_arg = info->vqai_intr_arg; 190 191 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0) 192 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX; 193 194 if (info->vqai_maxindirsz > 1) { 195 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz); 196 if (error) 197 goto fail; 198 } 199 200 vq->vq_ring_size = round_page(vring_size(size, align)); 201 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF, 202 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0); 203 if (vq->vq_ring_mem == NULL) { 204 device_printf(dev, 205 "cannot allocate memory for virtqueue ring\n"); 206 error = ENOMEM; 207 goto fail; 208 } 209 210 vq_ring_init(vq); 211 virtqueue_disable_intr(vq); 212 213 *vqp = vq; 214 215 fail: 216 if (error) 217 virtqueue_free(vq); 218 219 return (error); 220 } 221 222 static int 223 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size) 224 { 225 device_t dev; 226 struct vq_desc_extra *dxp; 227 int i, size; 228 229 dev = vq->vq_dev; 230 231 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) { 232 /* 233 * Indirect descriptors requested by the driver but not 234 * negotiated. Return zero to keep the initialization 235 * going: we'll run fine without. 236 */ 237 if (bootverbose) 238 device_printf(dev, "virtqueue %d (%s) requested " 239 "indirect descriptors but not negotiated\n", 240 vq->vq_queue_index, vq->vq_name); 241 return (0); 242 } 243 244 size = indirect_size * sizeof(struct vring_desc); 245 vq->vq_max_indirect_size = indirect_size; 246 vq->vq_indirect_mem_size = size; 247 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT; 248 249 for (i = 0; i < vq->vq_nentries; i++) { 250 dxp = &vq->vq_descx[i]; 251 252 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT); 253 if (dxp->indirect == NULL) { 254 device_printf(dev, "cannot allocate indirect list\n"); 255 return (ENOMEM); 256 } 257 258 dxp->indirect_paddr = vtophys(dxp->indirect); 259 virtqueue_init_indirect_list(vq, dxp->indirect); 260 } 261 262 return (0); 263 } 264 265 static void 266 virtqueue_free_indirect(struct virtqueue *vq) 267 { 268 struct vq_desc_extra *dxp; 269 int i; 270 271 for (i = 0; i < vq->vq_nentries; i++) { 272 dxp = &vq->vq_descx[i]; 273 274 if (dxp->indirect == NULL) 275 break; 276 277 free(dxp->indirect, M_DEVBUF); 278 dxp->indirect = NULL; 279 dxp->indirect_paddr = 0; 280 } 281 282 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT; 283 vq->vq_indirect_mem_size = 0; 284 } 285 286 static void 287 virtqueue_init_indirect_list(struct virtqueue *vq, 288 struct vring_desc *indirect) 289 { 290 int i; 291 292 bzero(indirect, vq->vq_indirect_mem_size); 293 294 for (i = 0; i < vq->vq_max_indirect_size - 1; i++) 295 indirect[i].next = i + 1; 296 indirect[i].next = VQ_RING_DESC_CHAIN_END; 297 } 298 299 int 300 virtqueue_reinit(struct virtqueue *vq, uint16_t size) 301 { 302 struct vq_desc_extra *dxp; 303 int i; 304 305 if (vq->vq_nentries != size) { 306 device_printf(vq->vq_dev, 307 "%s: '%s' changed size; old=%hu, new=%hu\n", 308 __func__, vq->vq_name, vq->vq_nentries, size); 309 return (EINVAL); 310 } 311 312 /* Warn if the virtqueue was not properly cleaned up. */ 313 if (vq->vq_free_cnt != vq->vq_nentries) { 314 device_printf(vq->vq_dev, 315 "%s: warning '%s' virtqueue not empty, " 316 "leaking %d entries\n", __func__, vq->vq_name, 317 vq->vq_nentries - vq->vq_free_cnt); 318 } 319 320 vq->vq_desc_head_idx = 0; 321 vq->vq_used_cons_idx = 0; 322 vq->vq_queued_cnt = 0; 323 vq->vq_free_cnt = vq->vq_nentries; 324 325 /* To be safe, reset all our allocated memory. */ 326 bzero(vq->vq_ring_mem, vq->vq_ring_size); 327 for (i = 0; i < vq->vq_nentries; i++) { 328 dxp = &vq->vq_descx[i]; 329 dxp->cookie = NULL; 330 dxp->ndescs = 0; 331 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 332 virtqueue_init_indirect_list(vq, dxp->indirect); 333 } 334 335 vq_ring_init(vq); 336 virtqueue_disable_intr(vq); 337 338 return (0); 339 } 340 341 void 342 virtqueue_free(struct virtqueue *vq) 343 { 344 345 if (vq->vq_free_cnt != vq->vq_nentries) { 346 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, " 347 "leaking %d entries\n", vq->vq_name, 348 vq->vq_nentries - vq->vq_free_cnt); 349 } 350 351 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 352 virtqueue_free_indirect(vq); 353 354 if (vq->vq_ring_mem != NULL) { 355 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF); 356 vq->vq_ring_size = 0; 357 vq->vq_ring_mem = NULL; 358 } 359 360 free(vq, M_DEVBUF); 361 } 362 363 vm_paddr_t 364 virtqueue_paddr(struct virtqueue *vq) 365 { 366 367 return (vtophys(vq->vq_ring_mem)); 368 } 369 370 int 371 virtqueue_size(struct virtqueue *vq) 372 { 373 374 return (vq->vq_nentries); 375 } 376 377 int 378 virtqueue_nfree(struct virtqueue *vq) 379 { 380 381 return (vq->vq_free_cnt); 382 } 383 384 int 385 virtqueue_empty(struct virtqueue *vq) 386 { 387 388 return (vq->vq_nentries == vq->vq_free_cnt); 389 } 390 391 int 392 virtqueue_full(struct virtqueue *vq) 393 { 394 395 return (vq->vq_free_cnt == 0); 396 } 397 398 void 399 virtqueue_notify(struct virtqueue *vq) 400 { 401 402 /* Ensure updated avail->idx is visible to host. */ 403 mb(); 404 405 if (vq_ring_must_notify_host(vq)) 406 vq_ring_notify_host(vq); 407 vq->vq_queued_cnt = 0; 408 } 409 410 int 411 virtqueue_nused(struct virtqueue *vq) 412 { 413 uint16_t used_idx, nused; 414 415 used_idx = vq->vq_ring.used->idx; 416 417 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx); 418 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available"); 419 420 return (nused); 421 } 422 423 int 424 virtqueue_intr_filter(struct virtqueue *vq) 425 { 426 427 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 428 return (0); 429 430 virtqueue_disable_intr(vq); 431 432 return (1); 433 } 434 435 void 436 virtqueue_intr(struct virtqueue *vq) 437 { 438 439 vq->vq_intrhand(vq->vq_intrhand_arg); 440 } 441 442 int 443 virtqueue_enable_intr(struct virtqueue *vq) 444 { 445 446 return (vq_ring_enable_interrupt(vq, 0)); 447 } 448 449 int 450 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint) 451 { 452 uint16_t ndesc, avail_idx; 453 454 avail_idx = vq->vq_ring.avail->idx; 455 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx); 456 457 switch (hint) { 458 case VQ_POSTPONE_SHORT: 459 ndesc = ndesc / 4; 460 break; 461 case VQ_POSTPONE_LONG: 462 ndesc = (ndesc * 3) / 4; 463 break; 464 case VQ_POSTPONE_EMPTIED: 465 break; 466 } 467 468 return (vq_ring_enable_interrupt(vq, ndesc)); 469 } 470 471 /* 472 * Note this is only considered a hint to the host. 473 */ 474 void 475 virtqueue_disable_intr(struct virtqueue *vq) 476 { 477 478 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 479 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - 480 vq->vq_nentries - 1; 481 } else 482 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 483 } 484 485 int 486 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg, 487 int readable, int writable) 488 { 489 struct vq_desc_extra *dxp; 490 int needed; 491 uint16_t head_idx, idx; 492 493 needed = readable + writable; 494 495 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie"); 496 VQASSERT(vq, needed == sg->sg_nseg, 497 "segment count mismatch, %d, %d", needed, sg->sg_nseg); 498 VQASSERT(vq, 499 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size, 500 "too many segments to enqueue: %d, %d/%d", needed, 501 vq->vq_nentries, vq->vq_max_indirect_size); 502 503 if (needed < 1) 504 return (EINVAL); 505 if (vq->vq_free_cnt == 0) 506 return (ENOSPC); 507 508 if (vq_ring_use_indirect(vq, needed)) { 509 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable); 510 return (0); 511 } else if (vq->vq_free_cnt < needed) 512 return (EMSGSIZE); 513 514 head_idx = vq->vq_desc_head_idx; 515 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 516 dxp = &vq->vq_descx[head_idx]; 517 518 VQASSERT(vq, dxp->cookie == NULL, 519 "cookie already exists for index %d", head_idx); 520 dxp->cookie = cookie; 521 dxp->ndescs = needed; 522 523 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx, 524 sg, readable, writable); 525 526 vq->vq_desc_head_idx = idx; 527 vq->vq_free_cnt -= needed; 528 if (vq->vq_free_cnt == 0) 529 VQ_RING_ASSERT_CHAIN_TERM(vq); 530 else 531 VQ_RING_ASSERT_VALID_IDX(vq, idx); 532 533 vq_ring_update_avail(vq, head_idx); 534 535 return (0); 536 } 537 538 void * 539 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len) 540 { 541 struct vring_used_elem *uep; 542 void *cookie; 543 uint16_t used_idx, desc_idx; 544 545 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 546 return (NULL); 547 548 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1); 549 uep = &vq->vq_ring.used->ring[used_idx]; 550 551 rmb(); 552 desc_idx = (uint16_t) uep->id; 553 if (len != NULL) 554 *len = uep->len; 555 556 vq_ring_free_chain(vq, desc_idx); 557 558 cookie = vq->vq_descx[desc_idx].cookie; 559 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx); 560 vq->vq_descx[desc_idx].cookie = NULL; 561 562 return (cookie); 563 } 564 565 void * 566 virtqueue_poll(struct virtqueue *vq, uint32_t *len) 567 { 568 void *cookie; 569 570 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) 571 cpu_spinwait(); 572 573 return (cookie); 574 } 575 576 void * 577 virtqueue_drain(struct virtqueue *vq, int *last) 578 { 579 void *cookie; 580 int idx; 581 582 cookie = NULL; 583 idx = *last; 584 585 while (idx < vq->vq_nentries && cookie == NULL) { 586 if ((cookie = vq->vq_descx[idx].cookie) != NULL) { 587 vq->vq_descx[idx].cookie = NULL; 588 /* Free chain to keep free count consistent. */ 589 vq_ring_free_chain(vq, idx); 590 } 591 idx++; 592 } 593 594 *last = idx; 595 596 return (cookie); 597 } 598 599 void 600 virtqueue_dump(struct virtqueue *vq) 601 { 602 603 if (vq == NULL) 604 return; 605 606 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; " 607 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; " 608 "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n", 609 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, 610 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx, 611 vq->vq_ring.avail->idx, vq->vq_used_cons_idx, 612 vq->vq_ring.used->idx, 613 vring_used_event(&vq->vq_ring), 614 vq->vq_ring.avail->flags, 615 vq->vq_ring.used->flags); 616 } 617 618 static void 619 vq_ring_init(struct virtqueue *vq) 620 { 621 struct vring *vr; 622 char *ring_mem; 623 int i, size; 624 625 ring_mem = vq->vq_ring_mem; 626 size = vq->vq_nentries; 627 vr = &vq->vq_ring; 628 629 vring_init(vr, size, ring_mem, vq->vq_alignment); 630 631 for (i = 0; i < size - 1; i++) 632 vr->desc[i].next = i + 1; 633 vr->desc[i].next = VQ_RING_DESC_CHAIN_END; 634 } 635 636 static void 637 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) 638 { 639 uint16_t avail_idx; 640 641 /* 642 * Place the head of the descriptor chain into the next slot and make 643 * it usable to the host. The chain is made available now rather than 644 * deferring to virtqueue_notify() in the hopes that if the host is 645 * currently running on another CPU, we can keep it processing the new 646 * descriptor. 647 */ 648 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1); 649 vq->vq_ring.avail->ring[avail_idx] = desc_idx; 650 651 wmb(); 652 vq->vq_ring.avail->idx++; 653 654 /* Keep pending count until virtqueue_notify(). */ 655 vq->vq_queued_cnt++; 656 } 657 658 static uint16_t 659 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc, 660 uint16_t head_idx, struct sglist *sg, int readable, int writable) 661 { 662 struct sglist_seg *seg; 663 struct vring_desc *dp; 664 int i, needed; 665 uint16_t idx; 666 667 needed = readable + writable; 668 669 for (i = 0, idx = head_idx, seg = sg->sg_segs; 670 i < needed; 671 i++, idx = dp->next, seg++) { 672 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END, 673 "premature end of free desc chain"); 674 675 dp = &desc[idx]; 676 dp->addr = seg->ss_paddr; 677 dp->len = seg->ss_len; 678 dp->flags = 0; 679 680 if (i < needed - 1) 681 dp->flags |= VRING_DESC_F_NEXT; 682 if (i >= readable) 683 dp->flags |= VRING_DESC_F_WRITE; 684 } 685 686 return (idx); 687 } 688 689 static int 690 vq_ring_use_indirect(struct virtqueue *vq, int needed) 691 { 692 693 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0) 694 return (0); 695 696 if (vq->vq_max_indirect_size < needed) 697 return (0); 698 699 if (needed < 2) 700 return (0); 701 702 return (1); 703 } 704 705 static void 706 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie, 707 struct sglist *sg, int readable, int writable) 708 { 709 struct vring_desc *dp; 710 struct vq_desc_extra *dxp; 711 int needed; 712 uint16_t head_idx; 713 714 needed = readable + writable; 715 VQASSERT(vq, needed <= vq->vq_max_indirect_size, 716 "enqueuing too many indirect descriptors"); 717 718 head_idx = vq->vq_desc_head_idx; 719 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 720 dp = &vq->vq_ring.desc[head_idx]; 721 dxp = &vq->vq_descx[head_idx]; 722 723 VQASSERT(vq, dxp->cookie == NULL, 724 "cookie already exists for index %d", head_idx); 725 dxp->cookie = cookie; 726 dxp->ndescs = 1; 727 728 dp->addr = dxp->indirect_paddr; 729 dp->len = needed * sizeof(struct vring_desc); 730 dp->flags = VRING_DESC_F_INDIRECT; 731 732 vq_ring_enqueue_segments(vq, dxp->indirect, 0, 733 sg, readable, writable); 734 735 vq->vq_desc_head_idx = dp->next; 736 vq->vq_free_cnt--; 737 if (vq->vq_free_cnt == 0) 738 VQ_RING_ASSERT_CHAIN_TERM(vq); 739 else 740 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx); 741 742 vq_ring_update_avail(vq, head_idx); 743 } 744 745 static int 746 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) 747 { 748 749 /* 750 * Enable interrupts, making sure we get the latest index of 751 * what's already been consumed. 752 */ 753 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) 754 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; 755 else 756 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 757 758 mb(); 759 760 /* 761 * Enough items may have already been consumed to meet our threshold 762 * since we last checked. Let our caller know so it processes the new 763 * entries. 764 */ 765 if (virtqueue_nused(vq) > ndesc) 766 return (1); 767 768 return (0); 769 } 770 771 static int 772 vq_ring_must_notify_host(struct virtqueue *vq) 773 { 774 uint16_t new_idx, prev_idx, event_idx; 775 776 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 777 new_idx = vq->vq_ring.avail->idx; 778 prev_idx = new_idx - vq->vq_queued_cnt; 779 event_idx = vring_avail_event(&vq->vq_ring); 780 781 return (vring_need_event(event_idx, new_idx, prev_idx) != 0); 782 } 783 784 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0); 785 } 786 787 static void 788 vq_ring_notify_host(struct virtqueue *vq) 789 { 790 791 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index); 792 } 793 794 static void 795 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 796 { 797 struct vring_desc *dp; 798 struct vq_desc_extra *dxp; 799 800 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx); 801 dp = &vq->vq_ring.desc[desc_idx]; 802 dxp = &vq->vq_descx[desc_idx]; 803 804 if (vq->vq_free_cnt == 0) 805 VQ_RING_ASSERT_CHAIN_TERM(vq); 806 807 vq->vq_free_cnt += dxp->ndescs; 808 dxp->ndescs--; 809 810 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 811 while (dp->flags & VRING_DESC_F_NEXT) { 812 VQ_RING_ASSERT_VALID_IDX(vq, dp->next); 813 dp = &vq->vq_ring.desc[dp->next]; 814 dxp->ndescs--; 815 } 816 } 817 818 VQASSERT(vq, dxp->ndescs == 0, 819 "failed to free entire desc chain, remaining: %d", dxp->ndescs); 820 821 /* 822 * We must append the existing free chain, if any, to the end of 823 * newly freed chain. If the virtqueue was completely used, then 824 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 825 */ 826 dp->next = vq->vq_desc_head_idx; 827 vq->vq_desc_head_idx = desc_idx; 828 } 829