1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 27 #ifdef DEBUG 28 /* For development, we want to crash whenever the ring is screwed. */ 29 #define BAD_RING(_vq, fmt, args...) \ 30 do { \ 31 dev_err(&(_vq)->vq.vdev->dev, \ 32 "%s:"fmt, (_vq)->vq.name, ##args); \ 33 BUG(); \ 34 } while (0) 35 /* Caller is supposed to guarantee no reentry. */ 36 #define START_USE(_vq) \ 37 do { \ 38 if ((_vq)->in_use) \ 39 panic("%s:in_use = %i\n", \ 40 (_vq)->vq.name, (_vq)->in_use); \ 41 (_vq)->in_use = __LINE__; \ 42 } while (0) 43 #define END_USE(_vq) \ 44 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 45 #else 46 #define BAD_RING(_vq, fmt, args...) \ 47 do { \ 48 dev_err(&_vq->vq.vdev->dev, \ 49 "%s:"fmt, (_vq)->vq.name, ##args); \ 50 (_vq)->broken = true; \ 51 } while (0) 52 #define START_USE(vq) 53 #define END_USE(vq) 54 #endif 55 56 struct vring_virtqueue 57 { 58 struct virtqueue vq; 59 60 /* Actual memory layout for this queue */ 61 struct vring vring; 62 63 /* Can we use weak barriers? */ 64 bool weak_barriers; 65 66 /* Other side has made a mess, don't try any more. */ 67 bool broken; 68 69 /* Host supports indirect buffers */ 70 bool indirect; 71 72 /* Host publishes avail event idx */ 73 bool event; 74 75 /* Head of free buffer list. */ 76 unsigned int free_head; 77 /* Number we've added since last sync. */ 78 unsigned int num_added; 79 80 /* Last used index we've seen. */ 81 u16 last_used_idx; 82 83 /* How to notify other side. FIXME: commonalize hcalls! */ 84 void (*notify)(struct virtqueue *vq); 85 86 #ifdef DEBUG 87 /* They're supposed to lock for us. */ 88 unsigned int in_use; 89 90 /* Figure out if their kicks are too delayed. */ 91 bool last_add_time_valid; 92 ktime_t last_add_time; 93 #endif 94 95 /* Tokens for callbacks. */ 96 void *data[]; 97 }; 98 99 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 100 101 /* Set up an indirect table of descriptors and add it to the queue. */ 102 static int vring_add_indirect(struct vring_virtqueue *vq, 103 struct scatterlist sg[], 104 unsigned int out, 105 unsigned int in, 106 gfp_t gfp) 107 { 108 struct vring_desc *desc; 109 unsigned head; 110 int i; 111 112 /* 113 * We require lowmem mappings for the descriptors because 114 * otherwise virt_to_phys will give us bogus addresses in the 115 * virtqueue. 116 */ 117 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); 118 119 desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); 120 if (!desc) 121 return -ENOMEM; 122 123 /* Transfer entries from the sg list into the indirect page */ 124 for (i = 0; i < out; i++) { 125 desc[i].flags = VRING_DESC_F_NEXT; 126 desc[i].addr = sg_phys(sg); 127 desc[i].len = sg->length; 128 desc[i].next = i+1; 129 sg++; 130 } 131 for (; i < (out + in); i++) { 132 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 133 desc[i].addr = sg_phys(sg); 134 desc[i].len = sg->length; 135 desc[i].next = i+1; 136 sg++; 137 } 138 139 /* Last one doesn't continue. */ 140 desc[i-1].flags &= ~VRING_DESC_F_NEXT; 141 desc[i-1].next = 0; 142 143 /* We're about to use a buffer */ 144 vq->vq.num_free--; 145 146 /* Use a single buffer which doesn't continue */ 147 head = vq->free_head; 148 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; 149 vq->vring.desc[head].addr = virt_to_phys(desc); 150 vq->vring.desc[head].len = i * sizeof(struct vring_desc); 151 152 /* Update free pointer */ 153 vq->free_head = vq->vring.desc[head].next; 154 155 return head; 156 } 157 158 /** 159 * virtqueue_add_buf - expose buffer to other end 160 * @vq: the struct virtqueue we're talking about. 161 * @sg: the description of the buffer(s). 162 * @out_num: the number of sg readable by other side 163 * @in_num: the number of sg which are writable (after readable ones) 164 * @data: the token identifying the buffer. 165 * @gfp: how to do memory allocations (if necessary). 166 * 167 * Caller must ensure we don't call this with other virtqueue operations 168 * at the same time (except where noted). 169 * 170 * Returns zero or a negative error (ie. ENOSPC, ENOMEM). 171 */ 172 int virtqueue_add_buf(struct virtqueue *_vq, 173 struct scatterlist sg[], 174 unsigned int out, 175 unsigned int in, 176 void *data, 177 gfp_t gfp) 178 { 179 struct vring_virtqueue *vq = to_vvq(_vq); 180 unsigned int i, avail, uninitialized_var(prev); 181 int head; 182 183 START_USE(vq); 184 185 BUG_ON(data == NULL); 186 187 #ifdef DEBUG 188 { 189 ktime_t now = ktime_get(); 190 191 /* No kick or get, with .1 second between? Warn. */ 192 if (vq->last_add_time_valid) 193 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 194 > 100); 195 vq->last_add_time = now; 196 vq->last_add_time_valid = true; 197 } 198 #endif 199 200 /* If the host supports indirect descriptor tables, and we have multiple 201 * buffers, then go indirect. FIXME: tune this threshold */ 202 if (vq->indirect && (out + in) > 1 && vq->vq.num_free) { 203 head = vring_add_indirect(vq, sg, out, in, gfp); 204 if (likely(head >= 0)) 205 goto add_head; 206 } 207 208 BUG_ON(out + in > vq->vring.num); 209 BUG_ON(out + in == 0); 210 211 if (vq->vq.num_free < out + in) { 212 pr_debug("Can't add buf len %i - avail = %i\n", 213 out + in, vq->vq.num_free); 214 /* FIXME: for historical reasons, we force a notify here if 215 * there are outgoing parts to the buffer. Presumably the 216 * host should service the ring ASAP. */ 217 if (out) 218 vq->notify(&vq->vq); 219 END_USE(vq); 220 return -ENOSPC; 221 } 222 223 /* We're about to use some buffers from the free list. */ 224 vq->vq.num_free -= out + in; 225 226 head = vq->free_head; 227 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { 228 vq->vring.desc[i].flags = VRING_DESC_F_NEXT; 229 vq->vring.desc[i].addr = sg_phys(sg); 230 vq->vring.desc[i].len = sg->length; 231 prev = i; 232 sg++; 233 } 234 for (; in; i = vq->vring.desc[i].next, in--) { 235 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 236 vq->vring.desc[i].addr = sg_phys(sg); 237 vq->vring.desc[i].len = sg->length; 238 prev = i; 239 sg++; 240 } 241 /* Last one doesn't continue. */ 242 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; 243 244 /* Update free pointer */ 245 vq->free_head = i; 246 247 add_head: 248 /* Set token. */ 249 vq->data[head] = data; 250 251 /* Put entry in available array (but don't update avail->idx until they 252 * do sync). */ 253 avail = (vq->vring.avail->idx & (vq->vring.num-1)); 254 vq->vring.avail->ring[avail] = head; 255 256 /* Descriptors and available array need to be set before we expose the 257 * new available array entries. */ 258 virtio_wmb(vq->weak_barriers); 259 vq->vring.avail->idx++; 260 vq->num_added++; 261 262 /* This is very unlikely, but theoretically possible. Kick 263 * just in case. */ 264 if (unlikely(vq->num_added == (1 << 16) - 1)) 265 virtqueue_kick(_vq); 266 267 pr_debug("Added buffer head %i to %p\n", head, vq); 268 END_USE(vq); 269 270 return 0; 271 } 272 EXPORT_SYMBOL_GPL(virtqueue_add_buf); 273 274 /** 275 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 276 * @vq: the struct virtqueue 277 * 278 * Instead of virtqueue_kick(), you can do: 279 * if (virtqueue_kick_prepare(vq)) 280 * virtqueue_notify(vq); 281 * 282 * This is sometimes useful because the virtqueue_kick_prepare() needs 283 * to be serialized, but the actual virtqueue_notify() call does not. 284 */ 285 bool virtqueue_kick_prepare(struct virtqueue *_vq) 286 { 287 struct vring_virtqueue *vq = to_vvq(_vq); 288 u16 new, old; 289 bool needs_kick; 290 291 START_USE(vq); 292 /* We need to expose available array entries before checking avail 293 * event. */ 294 virtio_mb(vq->weak_barriers); 295 296 old = vq->vring.avail->idx - vq->num_added; 297 new = vq->vring.avail->idx; 298 vq->num_added = 0; 299 300 #ifdef DEBUG 301 if (vq->last_add_time_valid) { 302 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 303 vq->last_add_time)) > 100); 304 } 305 vq->last_add_time_valid = false; 306 #endif 307 308 if (vq->event) { 309 needs_kick = vring_need_event(vring_avail_event(&vq->vring), 310 new, old); 311 } else { 312 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); 313 } 314 END_USE(vq); 315 return needs_kick; 316 } 317 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 318 319 /** 320 * virtqueue_notify - second half of split virtqueue_kick call. 321 * @vq: the struct virtqueue 322 * 323 * This does not need to be serialized. 324 */ 325 void virtqueue_notify(struct virtqueue *_vq) 326 { 327 struct vring_virtqueue *vq = to_vvq(_vq); 328 329 /* Prod other side to tell it about changes. */ 330 vq->notify(_vq); 331 } 332 EXPORT_SYMBOL_GPL(virtqueue_notify); 333 334 /** 335 * virtqueue_kick - update after add_buf 336 * @vq: the struct virtqueue 337 * 338 * After one or more virtqueue_add_buf calls, invoke this to kick 339 * the other side. 340 * 341 * Caller must ensure we don't call this with other virtqueue 342 * operations at the same time (except where noted). 343 */ 344 void virtqueue_kick(struct virtqueue *vq) 345 { 346 if (virtqueue_kick_prepare(vq)) 347 virtqueue_notify(vq); 348 } 349 EXPORT_SYMBOL_GPL(virtqueue_kick); 350 351 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 352 { 353 unsigned int i; 354 355 /* Clear data ptr. */ 356 vq->data[head] = NULL; 357 358 /* Put back on free list: find end */ 359 i = head; 360 361 /* Free the indirect table */ 362 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) 363 kfree(phys_to_virt(vq->vring.desc[i].addr)); 364 365 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 366 i = vq->vring.desc[i].next; 367 vq->vq.num_free++; 368 } 369 370 vq->vring.desc[i].next = vq->free_head; 371 vq->free_head = head; 372 /* Plus final descriptor */ 373 vq->vq.num_free++; 374 } 375 376 static inline bool more_used(const struct vring_virtqueue *vq) 377 { 378 return vq->last_used_idx != vq->vring.used->idx; 379 } 380 381 /** 382 * virtqueue_get_buf - get the next used buffer 383 * @vq: the struct virtqueue we're talking about. 384 * @len: the length written into the buffer 385 * 386 * If the driver wrote data into the buffer, @len will be set to the 387 * amount written. This means you don't need to clear the buffer 388 * beforehand to ensure there's no data leakage in the case of short 389 * writes. 390 * 391 * Caller must ensure we don't call this with other virtqueue 392 * operations at the same time (except where noted). 393 * 394 * Returns NULL if there are no used buffers, or the "data" token 395 * handed to virtqueue_add_buf(). 396 */ 397 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 398 { 399 struct vring_virtqueue *vq = to_vvq(_vq); 400 void *ret; 401 unsigned int i; 402 u16 last_used; 403 404 START_USE(vq); 405 406 if (unlikely(vq->broken)) { 407 END_USE(vq); 408 return NULL; 409 } 410 411 if (!more_used(vq)) { 412 pr_debug("No more buffers in queue\n"); 413 END_USE(vq); 414 return NULL; 415 } 416 417 /* Only get used array entries after they have been exposed by host. */ 418 virtio_rmb(vq->weak_barriers); 419 420 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 421 i = vq->vring.used->ring[last_used].id; 422 *len = vq->vring.used->ring[last_used].len; 423 424 if (unlikely(i >= vq->vring.num)) { 425 BAD_RING(vq, "id %u out of range\n", i); 426 return NULL; 427 } 428 if (unlikely(!vq->data[i])) { 429 BAD_RING(vq, "id %u is not a head!\n", i); 430 return NULL; 431 } 432 433 /* detach_buf clears data, so grab it now. */ 434 ret = vq->data[i]; 435 detach_buf(vq, i); 436 vq->last_used_idx++; 437 /* If we expect an interrupt for the next entry, tell host 438 * by writing event index and flush out the write before 439 * the read in the next get_buf call. */ 440 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 441 vring_used_event(&vq->vring) = vq->last_used_idx; 442 virtio_mb(vq->weak_barriers); 443 } 444 445 #ifdef DEBUG 446 vq->last_add_time_valid = false; 447 #endif 448 449 END_USE(vq); 450 return ret; 451 } 452 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 453 454 /** 455 * virtqueue_disable_cb - disable callbacks 456 * @vq: the struct virtqueue we're talking about. 457 * 458 * Note that this is not necessarily synchronous, hence unreliable and only 459 * useful as an optimization. 460 * 461 * Unlike other operations, this need not be serialized. 462 */ 463 void virtqueue_disable_cb(struct virtqueue *_vq) 464 { 465 struct vring_virtqueue *vq = to_vvq(_vq); 466 467 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 468 } 469 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 470 471 /** 472 * virtqueue_enable_cb - restart callbacks after disable_cb. 473 * @vq: the struct virtqueue we're talking about. 474 * 475 * This re-enables callbacks; it returns "false" if there are pending 476 * buffers in the queue, to detect a possible race between the driver 477 * checking for more work, and enabling callbacks. 478 * 479 * Caller must ensure we don't call this with other virtqueue 480 * operations at the same time (except where noted). 481 */ 482 bool virtqueue_enable_cb(struct virtqueue *_vq) 483 { 484 struct vring_virtqueue *vq = to_vvq(_vq); 485 486 START_USE(vq); 487 488 /* We optimistically turn back on interrupts, then check if there was 489 * more to do. */ 490 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 491 * either clear the flags bit or point the event index at the next 492 * entry. Always do both to keep code simple. */ 493 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 494 vring_used_event(&vq->vring) = vq->last_used_idx; 495 virtio_mb(vq->weak_barriers); 496 if (unlikely(more_used(vq))) { 497 END_USE(vq); 498 return false; 499 } 500 501 END_USE(vq); 502 return true; 503 } 504 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 505 506 /** 507 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 508 * @vq: the struct virtqueue we're talking about. 509 * 510 * This re-enables callbacks but hints to the other side to delay 511 * interrupts until most of the available buffers have been processed; 512 * it returns "false" if there are many pending buffers in the queue, 513 * to detect a possible race between the driver checking for more work, 514 * and enabling callbacks. 515 * 516 * Caller must ensure we don't call this with other virtqueue 517 * operations at the same time (except where noted). 518 */ 519 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 520 { 521 struct vring_virtqueue *vq = to_vvq(_vq); 522 u16 bufs; 523 524 START_USE(vq); 525 526 /* We optimistically turn back on interrupts, then check if there was 527 * more to do. */ 528 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 529 * either clear the flags bit or point the event index at the next 530 * entry. Always do both to keep code simple. */ 531 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 532 /* TODO: tune this threshold */ 533 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 534 vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 535 virtio_mb(vq->weak_barriers); 536 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 537 END_USE(vq); 538 return false; 539 } 540 541 END_USE(vq); 542 return true; 543 } 544 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 545 546 /** 547 * virtqueue_detach_unused_buf - detach first unused buffer 548 * @vq: the struct virtqueue we're talking about. 549 * 550 * Returns NULL or the "data" token handed to virtqueue_add_buf(). 551 * This is not valid on an active queue; it is useful only for device 552 * shutdown. 553 */ 554 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 555 { 556 struct vring_virtqueue *vq = to_vvq(_vq); 557 unsigned int i; 558 void *buf; 559 560 START_USE(vq); 561 562 for (i = 0; i < vq->vring.num; i++) { 563 if (!vq->data[i]) 564 continue; 565 /* detach_buf clears data, so grab it now. */ 566 buf = vq->data[i]; 567 detach_buf(vq, i); 568 vq->vring.avail->idx--; 569 END_USE(vq); 570 return buf; 571 } 572 /* That should have freed everything. */ 573 BUG_ON(vq->vq.num_free != vq->vring.num); 574 575 END_USE(vq); 576 return NULL; 577 } 578 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 579 580 irqreturn_t vring_interrupt(int irq, void *_vq) 581 { 582 struct vring_virtqueue *vq = to_vvq(_vq); 583 584 if (!more_used(vq)) { 585 pr_debug("virtqueue interrupt with no work for %p\n", vq); 586 return IRQ_NONE; 587 } 588 589 if (unlikely(vq->broken)) 590 return IRQ_HANDLED; 591 592 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 593 if (vq->vq.callback) 594 vq->vq.callback(&vq->vq); 595 596 return IRQ_HANDLED; 597 } 598 EXPORT_SYMBOL_GPL(vring_interrupt); 599 600 struct virtqueue *vring_new_virtqueue(unsigned int index, 601 unsigned int num, 602 unsigned int vring_align, 603 struct virtio_device *vdev, 604 bool weak_barriers, 605 void *pages, 606 void (*notify)(struct virtqueue *), 607 void (*callback)(struct virtqueue *), 608 const char *name) 609 { 610 struct vring_virtqueue *vq; 611 unsigned int i; 612 613 /* We assume num is a power of 2. */ 614 if (num & (num - 1)) { 615 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 616 return NULL; 617 } 618 619 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); 620 if (!vq) 621 return NULL; 622 623 vring_init(&vq->vring, num, pages, vring_align); 624 vq->vq.callback = callback; 625 vq->vq.vdev = vdev; 626 vq->vq.name = name; 627 vq->vq.num_free = num; 628 vq->vq.index = index; 629 vq->notify = notify; 630 vq->weak_barriers = weak_barriers; 631 vq->broken = false; 632 vq->last_used_idx = 0; 633 vq->num_added = 0; 634 list_add_tail(&vq->vq.list, &vdev->vqs); 635 #ifdef DEBUG 636 vq->in_use = false; 637 vq->last_add_time_valid = false; 638 #endif 639 640 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 641 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 642 643 /* No callback? Tell other side not to bother us. */ 644 if (!callback) 645 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 646 647 /* Put everything in free lists. */ 648 vq->free_head = 0; 649 for (i = 0; i < num-1; i++) { 650 vq->vring.desc[i].next = i+1; 651 vq->data[i] = NULL; 652 } 653 vq->data[i] = NULL; 654 655 return &vq->vq; 656 } 657 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 658 659 void vring_del_virtqueue(struct virtqueue *vq) 660 { 661 list_del(&vq->list); 662 kfree(to_vvq(vq)); 663 } 664 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 665 666 /* Manipulates transport-specific feature bits. */ 667 void vring_transport_features(struct virtio_device *vdev) 668 { 669 unsigned int i; 670 671 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 672 switch (i) { 673 case VIRTIO_RING_F_INDIRECT_DESC: 674 break; 675 case VIRTIO_RING_F_EVENT_IDX: 676 break; 677 default: 678 /* We don't understand this bit. */ 679 clear_bit(i, vdev->features); 680 } 681 } 682 } 683 EXPORT_SYMBOL_GPL(vring_transport_features); 684 685 /** 686 * virtqueue_get_vring_size - return the size of the virtqueue's vring 687 * @vq: the struct virtqueue containing the vring of interest. 688 * 689 * Returns the size of the vring. This is mainly used for boasting to 690 * userspace. Unlike other operations, this need not be serialized. 691 */ 692 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 693 { 694 695 struct vring_virtqueue *vq = to_vvq(_vq); 696 697 return vq->vring.num; 698 } 699 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 700 701 MODULE_LICENSE("GPL"); 702