1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/device.h> 22 23 #ifdef DEBUG 24 /* For development, we want to crash whenever the ring is screwed. */ 25 #define BAD_RING(vq, fmt...) \ 26 do { dev_err(&vq->vq.vdev->dev, fmt); BUG(); } while(0) 27 #define START_USE(vq) \ 28 do { if ((vq)->in_use) panic("in_use = %i\n", (vq)->in_use); (vq)->in_use = __LINE__; mb(); } while(0) 29 #define END_USE(vq) \ 30 do { BUG_ON(!(vq)->in_use); (vq)->in_use = 0; mb(); } while(0) 31 #else 32 #define BAD_RING(vq, fmt...) \ 33 do { dev_err(&vq->vq.vdev->dev, fmt); (vq)->broken = true; } while(0) 34 #define START_USE(vq) 35 #define END_USE(vq) 36 #endif 37 38 struct vring_virtqueue 39 { 40 struct virtqueue vq; 41 42 /* Actual memory layout for this queue */ 43 struct vring vring; 44 45 /* Other side has made a mess, don't try any more. */ 46 bool broken; 47 48 /* Number of free buffers */ 49 unsigned int num_free; 50 /* Head of free buffer list. */ 51 unsigned int free_head; 52 /* Number we've added since last sync. */ 53 unsigned int num_added; 54 55 /* Last used index we've seen. */ 56 u16 last_used_idx; 57 58 /* How to notify other side. FIXME: commonalize hcalls! */ 59 void (*notify)(struct virtqueue *vq); 60 61 #ifdef DEBUG 62 /* They're supposed to lock for us. */ 63 unsigned int in_use; 64 #endif 65 66 /* Tokens for callbacks. */ 67 void *data[]; 68 }; 69 70 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 71 72 static int vring_add_buf(struct virtqueue *_vq, 73 struct scatterlist sg[], 74 unsigned int out, 75 unsigned int in, 76 void *data) 77 { 78 struct vring_virtqueue *vq = to_vvq(_vq); 79 unsigned int i, avail, head, uninitialized_var(prev); 80 81 BUG_ON(data == NULL); 82 BUG_ON(out + in > vq->vring.num); 83 BUG_ON(out + in == 0); 84 85 START_USE(vq); 86 87 if (vq->num_free < out + in) { 88 pr_debug("Can't add buf len %i - avail = %i\n", 89 out + in, vq->num_free); 90 /* We notify *even if* VRING_USED_F_NO_NOTIFY is set here. */ 91 vq->notify(&vq->vq); 92 END_USE(vq); 93 return -ENOSPC; 94 } 95 96 /* We're about to use some buffers from the free list. */ 97 vq->num_free -= out + in; 98 99 head = vq->free_head; 100 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { 101 vq->vring.desc[i].flags = VRING_DESC_F_NEXT; 102 vq->vring.desc[i].addr = sg_phys(sg); 103 vq->vring.desc[i].len = sg->length; 104 prev = i; 105 sg++; 106 } 107 for (; in; i = vq->vring.desc[i].next, in--) { 108 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 109 vq->vring.desc[i].addr = sg_phys(sg); 110 vq->vring.desc[i].len = sg->length; 111 prev = i; 112 sg++; 113 } 114 /* Last one doesn't continue. */ 115 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; 116 117 /* Update free pointer */ 118 vq->free_head = i; 119 120 /* Set token. */ 121 vq->data[head] = data; 122 123 /* Put entry in available array (but don't update avail->idx until they 124 * do sync). FIXME: avoid modulus here? */ 125 avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; 126 vq->vring.avail->ring[avail] = head; 127 128 pr_debug("Added buffer head %i to %p\n", head, vq); 129 END_USE(vq); 130 return 0; 131 } 132 133 static void vring_kick(struct virtqueue *_vq) 134 { 135 struct vring_virtqueue *vq = to_vvq(_vq); 136 START_USE(vq); 137 /* Descriptors and available array need to be set before we expose the 138 * new available array entries. */ 139 wmb(); 140 141 vq->vring.avail->idx += vq->num_added; 142 vq->num_added = 0; 143 144 /* Need to update avail index before checking if we should notify */ 145 mb(); 146 147 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 148 /* Prod other side to tell it about changes. */ 149 vq->notify(&vq->vq); 150 151 END_USE(vq); 152 } 153 154 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 155 { 156 unsigned int i; 157 158 /* Clear data ptr. */ 159 vq->data[head] = NULL; 160 161 /* Put back on free list: find end */ 162 i = head; 163 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 164 i = vq->vring.desc[i].next; 165 vq->num_free++; 166 } 167 168 vq->vring.desc[i].next = vq->free_head; 169 vq->free_head = head; 170 /* Plus final descriptor */ 171 vq->num_free++; 172 } 173 174 static inline bool more_used(const struct vring_virtqueue *vq) 175 { 176 return vq->last_used_idx != vq->vring.used->idx; 177 } 178 179 static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) 180 { 181 struct vring_virtqueue *vq = to_vvq(_vq); 182 void *ret; 183 unsigned int i; 184 185 START_USE(vq); 186 187 if (unlikely(vq->broken)) { 188 END_USE(vq); 189 return NULL; 190 } 191 192 if (!more_used(vq)) { 193 pr_debug("No more buffers in queue\n"); 194 END_USE(vq); 195 return NULL; 196 } 197 198 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 199 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; 200 201 if (unlikely(i >= vq->vring.num)) { 202 BAD_RING(vq, "id %u out of range\n", i); 203 return NULL; 204 } 205 if (unlikely(!vq->data[i])) { 206 BAD_RING(vq, "id %u is not a head!\n", i); 207 return NULL; 208 } 209 210 /* detach_buf clears data, so grab it now. */ 211 ret = vq->data[i]; 212 detach_buf(vq, i); 213 vq->last_used_idx++; 214 END_USE(vq); 215 return ret; 216 } 217 218 static void vring_disable_cb(struct virtqueue *_vq) 219 { 220 struct vring_virtqueue *vq = to_vvq(_vq); 221 222 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 223 } 224 225 static bool vring_enable_cb(struct virtqueue *_vq) 226 { 227 struct vring_virtqueue *vq = to_vvq(_vq); 228 229 START_USE(vq); 230 231 /* We optimistically turn back on interrupts, then check if there was 232 * more to do. */ 233 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 234 mb(); 235 if (unlikely(more_used(vq))) { 236 END_USE(vq); 237 return false; 238 } 239 240 END_USE(vq); 241 return true; 242 } 243 244 irqreturn_t vring_interrupt(int irq, void *_vq) 245 { 246 struct vring_virtqueue *vq = to_vvq(_vq); 247 248 if (!more_used(vq)) { 249 pr_debug("virtqueue interrupt with no work for %p\n", vq); 250 return IRQ_NONE; 251 } 252 253 if (unlikely(vq->broken)) 254 return IRQ_HANDLED; 255 256 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 257 if (vq->vq.callback) 258 vq->vq.callback(&vq->vq); 259 260 return IRQ_HANDLED; 261 } 262 EXPORT_SYMBOL_GPL(vring_interrupt); 263 264 static struct virtqueue_ops vring_vq_ops = { 265 .add_buf = vring_add_buf, 266 .get_buf = vring_get_buf, 267 .kick = vring_kick, 268 .disable_cb = vring_disable_cb, 269 .enable_cb = vring_enable_cb, 270 }; 271 272 struct virtqueue *vring_new_virtqueue(unsigned int num, 273 struct virtio_device *vdev, 274 void *pages, 275 void (*notify)(struct virtqueue *), 276 void (*callback)(struct virtqueue *)) 277 { 278 struct vring_virtqueue *vq; 279 unsigned int i; 280 281 /* We assume num is a power of 2. */ 282 if (num & (num - 1)) { 283 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 284 return NULL; 285 } 286 287 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); 288 if (!vq) 289 return NULL; 290 291 vring_init(&vq->vring, num, pages, PAGE_SIZE); 292 vq->vq.callback = callback; 293 vq->vq.vdev = vdev; 294 vq->vq.vq_ops = &vring_vq_ops; 295 vq->notify = notify; 296 vq->broken = false; 297 vq->last_used_idx = 0; 298 vq->num_added = 0; 299 #ifdef DEBUG 300 vq->in_use = false; 301 #endif 302 303 /* No callback? Tell other side not to bother us. */ 304 if (!callback) 305 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 306 307 /* Put everything in free lists. */ 308 vq->num_free = num; 309 vq->free_head = 0; 310 for (i = 0; i < num-1; i++) 311 vq->vring.desc[i].next = i+1; 312 313 return &vq->vq; 314 } 315 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 316 317 void vring_del_virtqueue(struct virtqueue *vq) 318 { 319 kfree(to_vvq(vq)); 320 } 321 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 322 323 MODULE_LICENSE("GPL"); 324