1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 #include <linux/kmemleak.h> 27 #include <linux/dma-mapping.h> 28 #include <xen/xen.h> 29 30 #ifdef DEBUG 31 /* For development, we want to crash whenever the ring is screwed. */ 32 #define BAD_RING(_vq, fmt, args...) \ 33 do { \ 34 dev_err(&(_vq)->vq.vdev->dev, \ 35 "%s:"fmt, (_vq)->vq.name, ##args); \ 36 BUG(); \ 37 } while (0) 38 /* Caller is supposed to guarantee no reentry. */ 39 #define START_USE(_vq) \ 40 do { \ 41 if ((_vq)->in_use) \ 42 panic("%s:in_use = %i\n", \ 43 (_vq)->vq.name, (_vq)->in_use); \ 44 (_vq)->in_use = __LINE__; \ 45 } while (0) 46 #define END_USE(_vq) \ 47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 48 #else 49 #define BAD_RING(_vq, fmt, args...) \ 50 do { \ 51 dev_err(&_vq->vq.vdev->dev, \ 52 "%s:"fmt, (_vq)->vq.name, ##args); \ 53 (_vq)->broken = true; \ 54 } while (0) 55 #define START_USE(vq) 56 #define END_USE(vq) 57 #endif 58 59 struct vring_desc_state { 60 void *data; /* Data for callback. */ 61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 62 }; 63 64 struct vring_virtqueue { 65 struct virtqueue vq; 66 67 /* Actual memory layout for this queue */ 68 struct vring vring; 69 70 /* Can we use weak barriers? */ 71 bool weak_barriers; 72 73 /* Other side has made a mess, don't try any more. */ 74 bool broken; 75 76 /* Host supports indirect buffers */ 77 bool indirect; 78 79 /* Host publishes avail event idx */ 80 bool event; 81 82 /* Head of free buffer list. */ 83 unsigned int free_head; 84 /* Number we've added since last sync. */ 85 unsigned int num_added; 86 87 /* Last used index we've seen. */ 88 u16 last_used_idx; 89 90 /* Last written value to avail->flags */ 91 u16 avail_flags_shadow; 92 93 /* Last written value to avail->idx in guest byte order */ 94 u16 avail_idx_shadow; 95 96 /* How to notify other side. FIXME: commonalize hcalls! */ 97 bool (*notify)(struct virtqueue *vq); 98 99 /* DMA, allocation, and size information */ 100 bool we_own_ring; 101 size_t queue_size_in_bytes; 102 dma_addr_t queue_dma_addr; 103 104 #ifdef DEBUG 105 /* They're supposed to lock for us. */ 106 unsigned int in_use; 107 108 /* Figure out if their kicks are too delayed. */ 109 bool last_add_time_valid; 110 ktime_t last_add_time; 111 #endif 112 113 /* Per-descriptor state. */ 114 struct vring_desc_state desc_state[]; 115 }; 116 117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 118 119 /* 120 * Modern virtio devices have feature bits to specify whether they need a 121 * quirk and bypass the IOMMU. If not there, just use the DMA API. 122 * 123 * If there, the interaction between virtio and DMA API is messy. 124 * 125 * On most systems with virtio, physical addresses match bus addresses, 126 * and it doesn't particularly matter whether we use the DMA API. 127 * 128 * On some systems, including Xen and any system with a physical device 129 * that speaks virtio behind a physical IOMMU, we must use the DMA API 130 * for virtio DMA to work at all. 131 * 132 * On other systems, including SPARC and PPC64, virtio-pci devices are 133 * enumerated as though they are behind an IOMMU, but the virtio host 134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't 135 * there or somehow map everything as the identity. 136 * 137 * For the time being, we preserve historic behavior and bypass the DMA 138 * API. 139 * 140 * TODO: install a per-device DMA ops structure that does the right thing 141 * taking into account all the above quirks, and use the DMA API 142 * unconditionally on data path. 143 */ 144 145 static bool vring_use_dma_api(struct virtio_device *vdev) 146 { 147 if (!virtio_has_iommu_quirk(vdev)) 148 return true; 149 150 /* Otherwise, we are left to guess. */ 151 /* 152 * In theory, it's possible to have a buggy QEMU-supposed 153 * emulated Q35 IOMMU and Xen enabled at the same time. On 154 * such a configuration, virtio has never worked and will 155 * not work without an even larger kludge. Instead, enable 156 * the DMA API if we're a Xen guest, which at least allows 157 * all of the sensible Xen configurations to work correctly. 158 */ 159 if (xen_domain()) 160 return true; 161 162 return false; 163 } 164 165 /* 166 * The DMA ops on various arches are rather gnarly right now, and 167 * making all of the arch DMA ops work on the vring device itself 168 * is a mess. For now, we use the parent device for DMA ops. 169 */ 170 struct device *vring_dma_dev(const struct vring_virtqueue *vq) 171 { 172 return vq->vq.vdev->dev.parent; 173 } 174 175 /* Map one sg entry. */ 176 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 177 struct scatterlist *sg, 178 enum dma_data_direction direction) 179 { 180 if (!vring_use_dma_api(vq->vq.vdev)) 181 return (dma_addr_t)sg_phys(sg); 182 183 /* 184 * We can't use dma_map_sg, because we don't use scatterlists in 185 * the way it expects (we don't guarantee that the scatterlist 186 * will exist for the lifetime of the mapping). 187 */ 188 return dma_map_page(vring_dma_dev(vq), 189 sg_page(sg), sg->offset, sg->length, 190 direction); 191 } 192 193 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 194 void *cpu_addr, size_t size, 195 enum dma_data_direction direction) 196 { 197 if (!vring_use_dma_api(vq->vq.vdev)) 198 return (dma_addr_t)virt_to_phys(cpu_addr); 199 200 return dma_map_single(vring_dma_dev(vq), 201 cpu_addr, size, direction); 202 } 203 204 static void vring_unmap_one(const struct vring_virtqueue *vq, 205 struct vring_desc *desc) 206 { 207 u16 flags; 208 209 if (!vring_use_dma_api(vq->vq.vdev)) 210 return; 211 212 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 213 214 if (flags & VRING_DESC_F_INDIRECT) { 215 dma_unmap_single(vring_dma_dev(vq), 216 virtio64_to_cpu(vq->vq.vdev, desc->addr), 217 virtio32_to_cpu(vq->vq.vdev, desc->len), 218 (flags & VRING_DESC_F_WRITE) ? 219 DMA_FROM_DEVICE : DMA_TO_DEVICE); 220 } else { 221 dma_unmap_page(vring_dma_dev(vq), 222 virtio64_to_cpu(vq->vq.vdev, desc->addr), 223 virtio32_to_cpu(vq->vq.vdev, desc->len), 224 (flags & VRING_DESC_F_WRITE) ? 225 DMA_FROM_DEVICE : DMA_TO_DEVICE); 226 } 227 } 228 229 static int vring_mapping_error(const struct vring_virtqueue *vq, 230 dma_addr_t addr) 231 { 232 if (!vring_use_dma_api(vq->vq.vdev)) 233 return 0; 234 235 return dma_mapping_error(vring_dma_dev(vq), addr); 236 } 237 238 static struct vring_desc *alloc_indirect(struct virtqueue *_vq, 239 unsigned int total_sg, gfp_t gfp) 240 { 241 struct vring_desc *desc; 242 unsigned int i; 243 244 /* 245 * We require lowmem mappings for the descriptors because 246 * otherwise virt_to_phys will give us bogus addresses in the 247 * virtqueue. 248 */ 249 gfp &= ~__GFP_HIGHMEM; 250 251 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 252 if (!desc) 253 return NULL; 254 255 for (i = 0; i < total_sg; i++) 256 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 257 return desc; 258 } 259 260 static inline int virtqueue_add(struct virtqueue *_vq, 261 struct scatterlist *sgs[], 262 unsigned int total_sg, 263 unsigned int out_sgs, 264 unsigned int in_sgs, 265 void *data, 266 gfp_t gfp) 267 { 268 struct vring_virtqueue *vq = to_vvq(_vq); 269 struct scatterlist *sg; 270 struct vring_desc *desc; 271 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 272 int head; 273 bool indirect; 274 275 START_USE(vq); 276 277 BUG_ON(data == NULL); 278 279 if (unlikely(vq->broken)) { 280 END_USE(vq); 281 return -EIO; 282 } 283 284 #ifdef DEBUG 285 { 286 ktime_t now = ktime_get(); 287 288 /* No kick or get, with .1 second between? Warn. */ 289 if (vq->last_add_time_valid) 290 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 291 > 100); 292 vq->last_add_time = now; 293 vq->last_add_time_valid = true; 294 } 295 #endif 296 297 BUG_ON(total_sg > vq->vring.num); 298 BUG_ON(total_sg == 0); 299 300 head = vq->free_head; 301 302 /* If the host supports indirect descriptor tables, and we have multiple 303 * buffers, then go indirect. FIXME: tune this threshold */ 304 if (vq->indirect && total_sg > 1 && vq->vq.num_free) 305 desc = alloc_indirect(_vq, total_sg, gfp); 306 else 307 desc = NULL; 308 309 if (desc) { 310 /* Use a single buffer which doesn't continue */ 311 indirect = true; 312 /* Set up rest to use this indirect table. */ 313 i = 0; 314 descs_used = 1; 315 } else { 316 indirect = false; 317 desc = vq->vring.desc; 318 i = head; 319 descs_used = total_sg; 320 } 321 322 if (vq->vq.num_free < descs_used) { 323 pr_debug("Can't add buf len %i - avail = %i\n", 324 descs_used, vq->vq.num_free); 325 /* FIXME: for historical reasons, we force a notify here if 326 * there are outgoing parts to the buffer. Presumably the 327 * host should service the ring ASAP. */ 328 if (out_sgs) 329 vq->notify(&vq->vq); 330 END_USE(vq); 331 return -ENOSPC; 332 } 333 334 for (n = 0; n < out_sgs; n++) { 335 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 336 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 337 if (vring_mapping_error(vq, addr)) 338 goto unmap_release; 339 340 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 341 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 342 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 343 prev = i; 344 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 345 } 346 } 347 for (; n < (out_sgs + in_sgs); n++) { 348 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 349 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 350 if (vring_mapping_error(vq, addr)) 351 goto unmap_release; 352 353 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 354 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 355 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 356 prev = i; 357 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 358 } 359 } 360 /* Last one doesn't continue. */ 361 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 362 363 if (indirect) { 364 /* Now that the indirect table is filled in, map it. */ 365 dma_addr_t addr = vring_map_single( 366 vq, desc, total_sg * sizeof(struct vring_desc), 367 DMA_TO_DEVICE); 368 if (vring_mapping_error(vq, addr)) 369 goto unmap_release; 370 371 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); 372 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr); 373 374 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); 375 } 376 377 /* We're using some buffers from the free list. */ 378 vq->vq.num_free -= descs_used; 379 380 /* Update free pointer */ 381 if (indirect) 382 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); 383 else 384 vq->free_head = i; 385 386 /* Store token and indirect buffer state. */ 387 vq->desc_state[head].data = data; 388 if (indirect) 389 vq->desc_state[head].indir_desc = desc; 390 391 /* Put entry in available array (but don't update avail->idx until they 392 * do sync). */ 393 avail = vq->avail_idx_shadow & (vq->vring.num - 1); 394 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 395 396 /* Descriptors and available array need to be set before we expose the 397 * new available array entries. */ 398 virtio_wmb(vq->weak_barriers); 399 vq->avail_idx_shadow++; 400 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 401 vq->num_added++; 402 403 pr_debug("Added buffer head %i to %p\n", head, vq); 404 END_USE(vq); 405 406 /* This is very unlikely, but theoretically possible. Kick 407 * just in case. */ 408 if (unlikely(vq->num_added == (1 << 16) - 1)) 409 virtqueue_kick(_vq); 410 411 return 0; 412 413 unmap_release: 414 err_idx = i; 415 i = head; 416 417 for (n = 0; n < total_sg; n++) { 418 if (i == err_idx) 419 break; 420 vring_unmap_one(vq, &desc[i]); 421 i = vq->vring.desc[i].next; 422 } 423 424 vq->vq.num_free += total_sg; 425 426 if (indirect) 427 kfree(desc); 428 429 return -EIO; 430 } 431 432 /** 433 * virtqueue_add_sgs - expose buffers to other end 434 * @vq: the struct virtqueue we're talking about. 435 * @sgs: array of terminated scatterlists. 436 * @out_num: the number of scatterlists readable by other side 437 * @in_num: the number of scatterlists which are writable (after readable ones) 438 * @data: the token identifying the buffer. 439 * @gfp: how to do memory allocations (if necessary). 440 * 441 * Caller must ensure we don't call this with other virtqueue operations 442 * at the same time (except where noted). 443 * 444 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 445 */ 446 int virtqueue_add_sgs(struct virtqueue *_vq, 447 struct scatterlist *sgs[], 448 unsigned int out_sgs, 449 unsigned int in_sgs, 450 void *data, 451 gfp_t gfp) 452 { 453 unsigned int i, total_sg = 0; 454 455 /* Count them first. */ 456 for (i = 0; i < out_sgs + in_sgs; i++) { 457 struct scatterlist *sg; 458 for (sg = sgs[i]; sg; sg = sg_next(sg)) 459 total_sg++; 460 } 461 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); 462 } 463 EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 464 465 /** 466 * virtqueue_add_outbuf - expose output buffers to other end 467 * @vq: the struct virtqueue we're talking about. 468 * @sg: scatterlist (must be well-formed and terminated!) 469 * @num: the number of entries in @sg readable by other side 470 * @data: the token identifying the buffer. 471 * @gfp: how to do memory allocations (if necessary). 472 * 473 * Caller must ensure we don't call this with other virtqueue operations 474 * at the same time (except where noted). 475 * 476 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 477 */ 478 int virtqueue_add_outbuf(struct virtqueue *vq, 479 struct scatterlist *sg, unsigned int num, 480 void *data, 481 gfp_t gfp) 482 { 483 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); 484 } 485 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 486 487 /** 488 * virtqueue_add_inbuf - expose input buffers to other end 489 * @vq: the struct virtqueue we're talking about. 490 * @sg: scatterlist (must be well-formed and terminated!) 491 * @num: the number of entries in @sg writable by other side 492 * @data: the token identifying the buffer. 493 * @gfp: how to do memory allocations (if necessary). 494 * 495 * Caller must ensure we don't call this with other virtqueue operations 496 * at the same time (except where noted). 497 * 498 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 499 */ 500 int virtqueue_add_inbuf(struct virtqueue *vq, 501 struct scatterlist *sg, unsigned int num, 502 void *data, 503 gfp_t gfp) 504 { 505 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); 506 } 507 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 508 509 /** 510 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 511 * @vq: the struct virtqueue 512 * 513 * Instead of virtqueue_kick(), you can do: 514 * if (virtqueue_kick_prepare(vq)) 515 * virtqueue_notify(vq); 516 * 517 * This is sometimes useful because the virtqueue_kick_prepare() needs 518 * to be serialized, but the actual virtqueue_notify() call does not. 519 */ 520 bool virtqueue_kick_prepare(struct virtqueue *_vq) 521 { 522 struct vring_virtqueue *vq = to_vvq(_vq); 523 u16 new, old; 524 bool needs_kick; 525 526 START_USE(vq); 527 /* We need to expose available array entries before checking avail 528 * event. */ 529 virtio_mb(vq->weak_barriers); 530 531 old = vq->avail_idx_shadow - vq->num_added; 532 new = vq->avail_idx_shadow; 533 vq->num_added = 0; 534 535 #ifdef DEBUG 536 if (vq->last_add_time_valid) { 537 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 538 vq->last_add_time)) > 100); 539 } 540 vq->last_add_time_valid = false; 541 #endif 542 543 if (vq->event) { 544 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), 545 new, old); 546 } else { 547 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); 548 } 549 END_USE(vq); 550 return needs_kick; 551 } 552 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 553 554 /** 555 * virtqueue_notify - second half of split virtqueue_kick call. 556 * @vq: the struct virtqueue 557 * 558 * This does not need to be serialized. 559 * 560 * Returns false if host notify failed or queue is broken, otherwise true. 561 */ 562 bool virtqueue_notify(struct virtqueue *_vq) 563 { 564 struct vring_virtqueue *vq = to_vvq(_vq); 565 566 if (unlikely(vq->broken)) 567 return false; 568 569 /* Prod other side to tell it about changes. */ 570 if (!vq->notify(_vq)) { 571 vq->broken = true; 572 return false; 573 } 574 return true; 575 } 576 EXPORT_SYMBOL_GPL(virtqueue_notify); 577 578 /** 579 * virtqueue_kick - update after add_buf 580 * @vq: the struct virtqueue 581 * 582 * After one or more virtqueue_add_* calls, invoke this to kick 583 * the other side. 584 * 585 * Caller must ensure we don't call this with other virtqueue 586 * operations at the same time (except where noted). 587 * 588 * Returns false if kick failed, otherwise true. 589 */ 590 bool virtqueue_kick(struct virtqueue *vq) 591 { 592 if (virtqueue_kick_prepare(vq)) 593 return virtqueue_notify(vq); 594 return true; 595 } 596 EXPORT_SYMBOL_GPL(virtqueue_kick); 597 598 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 599 { 600 unsigned int i, j; 601 u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 602 603 /* Clear data ptr. */ 604 vq->desc_state[head].data = NULL; 605 606 /* Put back on free list: unmap first-level descriptors and find end */ 607 i = head; 608 609 while (vq->vring.desc[i].flags & nextflag) { 610 vring_unmap_one(vq, &vq->vring.desc[i]); 611 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); 612 vq->vq.num_free++; 613 } 614 615 vring_unmap_one(vq, &vq->vring.desc[i]); 616 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); 617 vq->free_head = head; 618 619 /* Plus final descriptor */ 620 vq->vq.num_free++; 621 622 /* Free the indirect table, if any, now that it's unmapped. */ 623 if (vq->desc_state[head].indir_desc) { 624 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 625 u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); 626 627 BUG_ON(!(vq->vring.desc[head].flags & 628 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 629 BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 630 631 for (j = 0; j < len / sizeof(struct vring_desc); j++) 632 vring_unmap_one(vq, &indir_desc[j]); 633 634 kfree(vq->desc_state[head].indir_desc); 635 vq->desc_state[head].indir_desc = NULL; 636 } 637 } 638 639 static inline bool more_used(const struct vring_virtqueue *vq) 640 { 641 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); 642 } 643 644 /** 645 * virtqueue_get_buf - get the next used buffer 646 * @vq: the struct virtqueue we're talking about. 647 * @len: the length written into the buffer 648 * 649 * If the driver wrote data into the buffer, @len will be set to the 650 * amount written. This means you don't need to clear the buffer 651 * beforehand to ensure there's no data leakage in the case of short 652 * writes. 653 * 654 * Caller must ensure we don't call this with other virtqueue 655 * operations at the same time (except where noted). 656 * 657 * Returns NULL if there are no used buffers, or the "data" token 658 * handed to virtqueue_add_*(). 659 */ 660 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 661 { 662 struct vring_virtqueue *vq = to_vvq(_vq); 663 void *ret; 664 unsigned int i; 665 u16 last_used; 666 667 START_USE(vq); 668 669 if (unlikely(vq->broken)) { 670 END_USE(vq); 671 return NULL; 672 } 673 674 if (!more_used(vq)) { 675 pr_debug("No more buffers in queue\n"); 676 END_USE(vq); 677 return NULL; 678 } 679 680 /* Only get used array entries after they have been exposed by host. */ 681 virtio_rmb(vq->weak_barriers); 682 683 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 684 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); 685 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); 686 687 if (unlikely(i >= vq->vring.num)) { 688 BAD_RING(vq, "id %u out of range\n", i); 689 return NULL; 690 } 691 if (unlikely(!vq->desc_state[i].data)) { 692 BAD_RING(vq, "id %u is not a head!\n", i); 693 return NULL; 694 } 695 696 /* detach_buf clears data, so grab it now. */ 697 ret = vq->desc_state[i].data; 698 detach_buf(vq, i); 699 vq->last_used_idx++; 700 /* If we expect an interrupt for the next entry, tell host 701 * by writing event index and flush out the write before 702 * the read in the next get_buf call. */ 703 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 704 virtio_store_mb(vq->weak_barriers, 705 &vring_used_event(&vq->vring), 706 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 707 708 #ifdef DEBUG 709 vq->last_add_time_valid = false; 710 #endif 711 712 END_USE(vq); 713 return ret; 714 } 715 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 716 717 /** 718 * virtqueue_disable_cb - disable callbacks 719 * @vq: the struct virtqueue we're talking about. 720 * 721 * Note that this is not necessarily synchronous, hence unreliable and only 722 * useful as an optimization. 723 * 724 * Unlike other operations, this need not be serialized. 725 */ 726 void virtqueue_disable_cb(struct virtqueue *_vq) 727 { 728 struct vring_virtqueue *vq = to_vvq(_vq); 729 730 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 731 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 732 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 733 } 734 735 } 736 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 737 738 /** 739 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 740 * @vq: the struct virtqueue we're talking about. 741 * 742 * This re-enables callbacks; it returns current queue state 743 * in an opaque unsigned value. This value should be later tested by 744 * virtqueue_poll, to detect a possible race between the driver checking for 745 * more work, and enabling callbacks. 746 * 747 * Caller must ensure we don't call this with other virtqueue 748 * operations at the same time (except where noted). 749 */ 750 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 751 { 752 struct vring_virtqueue *vq = to_vvq(_vq); 753 u16 last_used_idx; 754 755 START_USE(vq); 756 757 /* We optimistically turn back on interrupts, then check if there was 758 * more to do. */ 759 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 760 * either clear the flags bit or point the event index at the next 761 * entry. Always do both to keep code simple. */ 762 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 763 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 764 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 765 } 766 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 767 END_USE(vq); 768 return last_used_idx; 769 } 770 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 771 772 /** 773 * virtqueue_poll - query pending used buffers 774 * @vq: the struct virtqueue we're talking about. 775 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 776 * 777 * Returns "true" if there are pending used buffers in the queue. 778 * 779 * This does not need to be serialized. 780 */ 781 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 782 { 783 struct vring_virtqueue *vq = to_vvq(_vq); 784 785 virtio_mb(vq->weak_barriers); 786 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); 787 } 788 EXPORT_SYMBOL_GPL(virtqueue_poll); 789 790 /** 791 * virtqueue_enable_cb - restart callbacks after disable_cb. 792 * @vq: the struct virtqueue we're talking about. 793 * 794 * This re-enables callbacks; it returns "false" if there are pending 795 * buffers in the queue, to detect a possible race between the driver 796 * checking for more work, and enabling callbacks. 797 * 798 * Caller must ensure we don't call this with other virtqueue 799 * operations at the same time (except where noted). 800 */ 801 bool virtqueue_enable_cb(struct virtqueue *_vq) 802 { 803 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 804 return !virtqueue_poll(_vq, last_used_idx); 805 } 806 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 807 808 /** 809 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 810 * @vq: the struct virtqueue we're talking about. 811 * 812 * This re-enables callbacks but hints to the other side to delay 813 * interrupts until most of the available buffers have been processed; 814 * it returns "false" if there are many pending buffers in the queue, 815 * to detect a possible race between the driver checking for more work, 816 * and enabling callbacks. 817 * 818 * Caller must ensure we don't call this with other virtqueue 819 * operations at the same time (except where noted). 820 */ 821 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 822 { 823 struct vring_virtqueue *vq = to_vvq(_vq); 824 u16 bufs; 825 826 START_USE(vq); 827 828 /* We optimistically turn back on interrupts, then check if there was 829 * more to do. */ 830 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 831 * either clear the flags bit or point the event index at the next 832 * entry. Always do both to keep code simple. */ 833 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 834 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 835 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 836 } 837 /* TODO: tune this threshold */ 838 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; 839 840 virtio_store_mb(vq->weak_barriers, 841 &vring_used_event(&vq->vring), 842 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 843 844 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 845 END_USE(vq); 846 return false; 847 } 848 849 END_USE(vq); 850 return true; 851 } 852 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 853 854 /** 855 * virtqueue_detach_unused_buf - detach first unused buffer 856 * @vq: the struct virtqueue we're talking about. 857 * 858 * Returns NULL or the "data" token handed to virtqueue_add_*(). 859 * This is not valid on an active queue; it is useful only for device 860 * shutdown. 861 */ 862 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 863 { 864 struct vring_virtqueue *vq = to_vvq(_vq); 865 unsigned int i; 866 void *buf; 867 868 START_USE(vq); 869 870 for (i = 0; i < vq->vring.num; i++) { 871 if (!vq->desc_state[i].data) 872 continue; 873 /* detach_buf clears data, so grab it now. */ 874 buf = vq->desc_state[i].data; 875 detach_buf(vq, i); 876 vq->avail_idx_shadow--; 877 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 878 END_USE(vq); 879 return buf; 880 } 881 /* That should have freed everything. */ 882 BUG_ON(vq->vq.num_free != vq->vring.num); 883 884 END_USE(vq); 885 return NULL; 886 } 887 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 888 889 irqreturn_t vring_interrupt(int irq, void *_vq) 890 { 891 struct vring_virtqueue *vq = to_vvq(_vq); 892 893 if (!more_used(vq)) { 894 pr_debug("virtqueue interrupt with no work for %p\n", vq); 895 return IRQ_NONE; 896 } 897 898 if (unlikely(vq->broken)) 899 return IRQ_HANDLED; 900 901 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 902 if (vq->vq.callback) 903 vq->vq.callback(&vq->vq); 904 905 return IRQ_HANDLED; 906 } 907 EXPORT_SYMBOL_GPL(vring_interrupt); 908 909 struct virtqueue *__vring_new_virtqueue(unsigned int index, 910 struct vring vring, 911 struct virtio_device *vdev, 912 bool weak_barriers, 913 bool (*notify)(struct virtqueue *), 914 void (*callback)(struct virtqueue *), 915 const char *name) 916 { 917 unsigned int i; 918 struct vring_virtqueue *vq; 919 920 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), 921 GFP_KERNEL); 922 if (!vq) 923 return NULL; 924 925 vq->vring = vring; 926 vq->vq.callback = callback; 927 vq->vq.vdev = vdev; 928 vq->vq.name = name; 929 vq->vq.num_free = vring.num; 930 vq->vq.index = index; 931 vq->we_own_ring = false; 932 vq->queue_dma_addr = 0; 933 vq->queue_size_in_bytes = 0; 934 vq->notify = notify; 935 vq->weak_barriers = weak_barriers; 936 vq->broken = false; 937 vq->last_used_idx = 0; 938 vq->avail_flags_shadow = 0; 939 vq->avail_idx_shadow = 0; 940 vq->num_added = 0; 941 list_add_tail(&vq->vq.list, &vdev->vqs); 942 #ifdef DEBUG 943 vq->in_use = false; 944 vq->last_add_time_valid = false; 945 #endif 946 947 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 948 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 949 950 /* No callback? Tell other side not to bother us. */ 951 if (!callback) { 952 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 953 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); 954 } 955 956 /* Put everything in free lists. */ 957 vq->free_head = 0; 958 for (i = 0; i < vring.num-1; i++) 959 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 960 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); 961 962 return &vq->vq; 963 } 964 EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 965 966 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 967 dma_addr_t *dma_handle, gfp_t flag) 968 { 969 if (vring_use_dma_api(vdev)) { 970 return dma_alloc_coherent(vdev->dev.parent, size, 971 dma_handle, flag); 972 } else { 973 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 974 if (queue) { 975 phys_addr_t phys_addr = virt_to_phys(queue); 976 *dma_handle = (dma_addr_t)phys_addr; 977 978 /* 979 * Sanity check: make sure we dind't truncate 980 * the address. The only arches I can find that 981 * have 64-bit phys_addr_t but 32-bit dma_addr_t 982 * are certain non-highmem MIPS and x86 983 * configurations, but these configurations 984 * should never allocate physical pages above 32 985 * bits, so this is fine. Just in case, throw a 986 * warning and abort if we end up with an 987 * unrepresentable address. 988 */ 989 if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 990 free_pages_exact(queue, PAGE_ALIGN(size)); 991 return NULL; 992 } 993 } 994 return queue; 995 } 996 } 997 998 static void vring_free_queue(struct virtio_device *vdev, size_t size, 999 void *queue, dma_addr_t dma_handle) 1000 { 1001 if (vring_use_dma_api(vdev)) { 1002 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 1003 } else { 1004 free_pages_exact(queue, PAGE_ALIGN(size)); 1005 } 1006 } 1007 1008 struct virtqueue *vring_create_virtqueue( 1009 unsigned int index, 1010 unsigned int num, 1011 unsigned int vring_align, 1012 struct virtio_device *vdev, 1013 bool weak_barriers, 1014 bool may_reduce_num, 1015 bool (*notify)(struct virtqueue *), 1016 void (*callback)(struct virtqueue *), 1017 const char *name) 1018 { 1019 struct virtqueue *vq; 1020 void *queue = NULL; 1021 dma_addr_t dma_addr; 1022 size_t queue_size_in_bytes; 1023 struct vring vring; 1024 1025 /* We assume num is a power of 2. */ 1026 if (num & (num - 1)) { 1027 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 1028 return NULL; 1029 } 1030 1031 /* TODO: allocate each queue chunk individually */ 1032 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 1033 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1034 &dma_addr, 1035 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 1036 if (queue) 1037 break; 1038 } 1039 1040 if (!num) 1041 return NULL; 1042 1043 if (!queue) { 1044 /* Try to get a single page. You are my only hope! */ 1045 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1046 &dma_addr, GFP_KERNEL|__GFP_ZERO); 1047 } 1048 if (!queue) 1049 return NULL; 1050 1051 queue_size_in_bytes = vring_size(num, vring_align); 1052 vring_init(&vring, num, queue, vring_align); 1053 1054 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, 1055 notify, callback, name); 1056 if (!vq) { 1057 vring_free_queue(vdev, queue_size_in_bytes, queue, 1058 dma_addr); 1059 return NULL; 1060 } 1061 1062 to_vvq(vq)->queue_dma_addr = dma_addr; 1063 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 1064 to_vvq(vq)->we_own_ring = true; 1065 1066 return vq; 1067 } 1068 EXPORT_SYMBOL_GPL(vring_create_virtqueue); 1069 1070 struct virtqueue *vring_new_virtqueue(unsigned int index, 1071 unsigned int num, 1072 unsigned int vring_align, 1073 struct virtio_device *vdev, 1074 bool weak_barriers, 1075 void *pages, 1076 bool (*notify)(struct virtqueue *vq), 1077 void (*callback)(struct virtqueue *vq), 1078 const char *name) 1079 { 1080 struct vring vring; 1081 vring_init(&vring, num, pages, vring_align); 1082 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, 1083 notify, callback, name); 1084 } 1085 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 1086 1087 void vring_del_virtqueue(struct virtqueue *_vq) 1088 { 1089 struct vring_virtqueue *vq = to_vvq(_vq); 1090 1091 if (vq->we_own_ring) { 1092 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1093 vq->vring.desc, vq->queue_dma_addr); 1094 } 1095 list_del(&_vq->list); 1096 kfree(vq); 1097 } 1098 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 1099 1100 /* Manipulates transport-specific feature bits. */ 1101 void vring_transport_features(struct virtio_device *vdev) 1102 { 1103 unsigned int i; 1104 1105 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1106 switch (i) { 1107 case VIRTIO_RING_F_INDIRECT_DESC: 1108 break; 1109 case VIRTIO_RING_F_EVENT_IDX: 1110 break; 1111 case VIRTIO_F_VERSION_1: 1112 break; 1113 case VIRTIO_F_IOMMU_PLATFORM: 1114 break; 1115 default: 1116 /* We don't understand this bit. */ 1117 __virtio_clear_bit(vdev, i); 1118 } 1119 } 1120 } 1121 EXPORT_SYMBOL_GPL(vring_transport_features); 1122 1123 /** 1124 * virtqueue_get_vring_size - return the size of the virtqueue's vring 1125 * @vq: the struct virtqueue containing the vring of interest. 1126 * 1127 * Returns the size of the vring. This is mainly used for boasting to 1128 * userspace. Unlike other operations, this need not be serialized. 1129 */ 1130 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 1131 { 1132 1133 struct vring_virtqueue *vq = to_vvq(_vq); 1134 1135 return vq->vring.num; 1136 } 1137 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 1138 1139 bool virtqueue_is_broken(struct virtqueue *_vq) 1140 { 1141 struct vring_virtqueue *vq = to_vvq(_vq); 1142 1143 return vq->broken; 1144 } 1145 EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1146 1147 /* 1148 * This should prevent the device from being used, allowing drivers to 1149 * recover. You may need to grab appropriate locks to flush. 1150 */ 1151 void virtio_break_device(struct virtio_device *dev) 1152 { 1153 struct virtqueue *_vq; 1154 1155 list_for_each_entry(_vq, &dev->vqs, list) { 1156 struct vring_virtqueue *vq = to_vvq(_vq); 1157 vq->broken = true; 1158 } 1159 } 1160 EXPORT_SYMBOL_GPL(virtio_break_device); 1161 1162 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 1163 { 1164 struct vring_virtqueue *vq = to_vvq(_vq); 1165 1166 BUG_ON(!vq->we_own_ring); 1167 1168 return vq->queue_dma_addr; 1169 } 1170 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 1171 1172 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 1173 { 1174 struct vring_virtqueue *vq = to_vvq(_vq); 1175 1176 BUG_ON(!vq->we_own_ring); 1177 1178 return vq->queue_dma_addr + 1179 ((char *)vq->vring.avail - (char *)vq->vring.desc); 1180 } 1181 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 1182 1183 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 1184 { 1185 struct vring_virtqueue *vq = to_vvq(_vq); 1186 1187 BUG_ON(!vq->we_own_ring); 1188 1189 return vq->queue_dma_addr + 1190 ((char *)vq->vring.used - (char *)vq->vring.desc); 1191 } 1192 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 1193 1194 const struct vring *virtqueue_get_vring(struct virtqueue *vq) 1195 { 1196 return &to_vvq(vq)->vring; 1197 } 1198 EXPORT_SYMBOL_GPL(virtqueue_get_vring); 1199 1200 MODULE_LICENSE("GPL"); 1201