xref: /linux/drivers/virtio/virtio_ring.c (revision 36239c6704b71da7fb8e2a9429e159a84d0c5a3e)
1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 
25 /* virtio guest is communicating with a virtual "device" that actually runs on
26  * a host processor.  Memory barriers are used to control SMP effects. */
27 #ifdef CONFIG_SMP
28 /* Where possible, use SMP barriers which are more lightweight than mandatory
29  * barriers, because mandatory barriers control MMIO effects on accesses
30  * through relaxed memory I/O windows (which virtio does not use). */
31 #define virtio_mb() smp_mb()
32 #define virtio_rmb() smp_rmb()
33 #define virtio_wmb() smp_wmb()
34 #else
35 /* We must force memory ordering even if guest is UP since host could be
36  * running on another CPU, but SMP barriers are defined to barrier() in that
37  * configuration. So fall back to mandatory barriers instead. */
38 #define virtio_mb() mb()
39 #define virtio_rmb() rmb()
40 #define virtio_wmb() wmb()
41 #endif
42 
43 #ifdef DEBUG
44 /* For development, we want to crash whenever the ring is screwed. */
45 #define BAD_RING(_vq, fmt, args...)				\
46 	do {							\
47 		dev_err(&(_vq)->vq.vdev->dev,			\
48 			"%s:"fmt, (_vq)->vq.name, ##args);	\
49 		BUG();						\
50 	} while (0)
51 /* Caller is supposed to guarantee no reentry. */
52 #define START_USE(_vq)						\
53 	do {							\
54 		if ((_vq)->in_use)				\
55 			panic("%s:in_use = %i\n",		\
56 			      (_vq)->vq.name, (_vq)->in_use);	\
57 		(_vq)->in_use = __LINE__;			\
58 	} while (0)
59 #define END_USE(_vq) \
60 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
61 #else
62 #define BAD_RING(_vq, fmt, args...)				\
63 	do {							\
64 		dev_err(&_vq->vq.vdev->dev,			\
65 			"%s:"fmt, (_vq)->vq.name, ##args);	\
66 		(_vq)->broken = true;				\
67 	} while (0)
68 #define START_USE(vq)
69 #define END_USE(vq)
70 #endif
71 
72 struct vring_virtqueue
73 {
74 	struct virtqueue vq;
75 
76 	/* Actual memory layout for this queue */
77 	struct vring vring;
78 
79 	/* Other side has made a mess, don't try any more. */
80 	bool broken;
81 
82 	/* Host supports indirect buffers */
83 	bool indirect;
84 
85 	/* Number of free buffers */
86 	unsigned int num_free;
87 	/* Head of free buffer list. */
88 	unsigned int free_head;
89 	/* Number we've added since last sync. */
90 	unsigned int num_added;
91 
92 	/* Last used index we've seen. */
93 	u16 last_used_idx;
94 
95 	/* How to notify other side. FIXME: commonalize hcalls! */
96 	void (*notify)(struct virtqueue *vq);
97 
98 #ifdef DEBUG
99 	/* They're supposed to lock for us. */
100 	unsigned int in_use;
101 #endif
102 
103 	/* Tokens for callbacks. */
104 	void *data[];
105 };
106 
107 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
108 
109 /* Set up an indirect table of descriptors and add it to the queue. */
110 static int vring_add_indirect(struct vring_virtqueue *vq,
111 			      struct scatterlist sg[],
112 			      unsigned int out,
113 			      unsigned int in,
114 			      gfp_t gfp)
115 {
116 	struct vring_desc *desc;
117 	unsigned head;
118 	int i;
119 
120 	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
121 	if (!desc)
122 		return -ENOMEM;
123 
124 	/* Transfer entries from the sg list into the indirect page */
125 	for (i = 0; i < out; i++) {
126 		desc[i].flags = VRING_DESC_F_NEXT;
127 		desc[i].addr = sg_phys(sg);
128 		desc[i].len = sg->length;
129 		desc[i].next = i+1;
130 		sg++;
131 	}
132 	for (; i < (out + in); i++) {
133 		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
134 		desc[i].addr = sg_phys(sg);
135 		desc[i].len = sg->length;
136 		desc[i].next = i+1;
137 		sg++;
138 	}
139 
140 	/* Last one doesn't continue. */
141 	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
142 	desc[i-1].next = 0;
143 
144 	/* We're about to use a buffer */
145 	vq->num_free--;
146 
147 	/* Use a single buffer which doesn't continue */
148 	head = vq->free_head;
149 	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
150 	vq->vring.desc[head].addr = virt_to_phys(desc);
151 	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
152 
153 	/* Update free pointer */
154 	vq->free_head = vq->vring.desc[head].next;
155 
156 	return head;
157 }
158 
159 int virtqueue_add_buf_gfp(struct virtqueue *_vq,
160 			  struct scatterlist sg[],
161 			  unsigned int out,
162 			  unsigned int in,
163 			  void *data,
164 			  gfp_t gfp)
165 {
166 	struct vring_virtqueue *vq = to_vvq(_vq);
167 	unsigned int i, avail, head, uninitialized_var(prev);
168 
169 	START_USE(vq);
170 
171 	BUG_ON(data == NULL);
172 
173 	/* If the host supports indirect descriptor tables, and we have multiple
174 	 * buffers, then go indirect. FIXME: tune this threshold */
175 	if (vq->indirect && (out + in) > 1 && vq->num_free) {
176 		head = vring_add_indirect(vq, sg, out, in, gfp);
177 		if (head != vq->vring.num)
178 			goto add_head;
179 	}
180 
181 	BUG_ON(out + in > vq->vring.num);
182 	BUG_ON(out + in == 0);
183 
184 	if (vq->num_free < out + in) {
185 		pr_debug("Can't add buf len %i - avail = %i\n",
186 			 out + in, vq->num_free);
187 		/* FIXME: for historical reasons, we force a notify here if
188 		 * there are outgoing parts to the buffer.  Presumably the
189 		 * host should service the ring ASAP. */
190 		if (out)
191 			vq->notify(&vq->vq);
192 		END_USE(vq);
193 		return -ENOSPC;
194 	}
195 
196 	/* We're about to use some buffers from the free list. */
197 	vq->num_free -= out + in;
198 
199 	head = vq->free_head;
200 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
201 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
202 		vq->vring.desc[i].addr = sg_phys(sg);
203 		vq->vring.desc[i].len = sg->length;
204 		prev = i;
205 		sg++;
206 	}
207 	for (; in; i = vq->vring.desc[i].next, in--) {
208 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
209 		vq->vring.desc[i].addr = sg_phys(sg);
210 		vq->vring.desc[i].len = sg->length;
211 		prev = i;
212 		sg++;
213 	}
214 	/* Last one doesn't continue. */
215 	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
216 
217 	/* Update free pointer */
218 	vq->free_head = i;
219 
220 add_head:
221 	/* Set token. */
222 	vq->data[head] = data;
223 
224 	/* Put entry in available array (but don't update avail->idx until they
225 	 * do sync).  FIXME: avoid modulus here? */
226 	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
227 	vq->vring.avail->ring[avail] = head;
228 
229 	pr_debug("Added buffer head %i to %p\n", head, vq);
230 	END_USE(vq);
231 
232 	/* If we're indirect, we can fit many (assuming not OOM). */
233 	if (vq->indirect)
234 		return vq->num_free ? vq->vring.num : 0;
235 	return vq->num_free;
236 }
237 EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
238 
239 void virtqueue_kick(struct virtqueue *_vq)
240 {
241 	struct vring_virtqueue *vq = to_vvq(_vq);
242 	START_USE(vq);
243 	/* Descriptors and available array need to be set before we expose the
244 	 * new available array entries. */
245 	virtio_wmb();
246 
247 	vq->vring.avail->idx += vq->num_added;
248 	vq->num_added = 0;
249 
250 	/* Need to update avail index before checking if we should notify */
251 	virtio_mb();
252 
253 	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
254 		/* Prod other side to tell it about changes. */
255 		vq->notify(&vq->vq);
256 
257 	END_USE(vq);
258 }
259 EXPORT_SYMBOL_GPL(virtqueue_kick);
260 
261 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
262 {
263 	unsigned int i;
264 
265 	/* Clear data ptr. */
266 	vq->data[head] = NULL;
267 
268 	/* Put back on free list: find end */
269 	i = head;
270 
271 	/* Free the indirect table */
272 	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
273 		kfree(phys_to_virt(vq->vring.desc[i].addr));
274 
275 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
276 		i = vq->vring.desc[i].next;
277 		vq->num_free++;
278 	}
279 
280 	vq->vring.desc[i].next = vq->free_head;
281 	vq->free_head = head;
282 	/* Plus final descriptor */
283 	vq->num_free++;
284 }
285 
286 static inline bool more_used(const struct vring_virtqueue *vq)
287 {
288 	return vq->last_used_idx != vq->vring.used->idx;
289 }
290 
291 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
292 {
293 	struct vring_virtqueue *vq = to_vvq(_vq);
294 	void *ret;
295 	unsigned int i;
296 
297 	START_USE(vq);
298 
299 	if (unlikely(vq->broken)) {
300 		END_USE(vq);
301 		return NULL;
302 	}
303 
304 	if (!more_used(vq)) {
305 		pr_debug("No more buffers in queue\n");
306 		END_USE(vq);
307 		return NULL;
308 	}
309 
310 	/* Only get used array entries after they have been exposed by host. */
311 	virtio_rmb();
312 
313 	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
314 	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
315 
316 	if (unlikely(i >= vq->vring.num)) {
317 		BAD_RING(vq, "id %u out of range\n", i);
318 		return NULL;
319 	}
320 	if (unlikely(!vq->data[i])) {
321 		BAD_RING(vq, "id %u is not a head!\n", i);
322 		return NULL;
323 	}
324 
325 	/* detach_buf clears data, so grab it now. */
326 	ret = vq->data[i];
327 	detach_buf(vq, i);
328 	vq->last_used_idx++;
329 	END_USE(vq);
330 	return ret;
331 }
332 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
333 
334 void virtqueue_disable_cb(struct virtqueue *_vq)
335 {
336 	struct vring_virtqueue *vq = to_vvq(_vq);
337 
338 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
339 }
340 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
341 
342 bool virtqueue_enable_cb(struct virtqueue *_vq)
343 {
344 	struct vring_virtqueue *vq = to_vvq(_vq);
345 
346 	START_USE(vq);
347 
348 	/* We optimistically turn back on interrupts, then check if there was
349 	 * more to do. */
350 	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
351 	virtio_mb();
352 	if (unlikely(more_used(vq))) {
353 		END_USE(vq);
354 		return false;
355 	}
356 
357 	END_USE(vq);
358 	return true;
359 }
360 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
361 
362 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
363 {
364 	struct vring_virtqueue *vq = to_vvq(_vq);
365 	unsigned int i;
366 	void *buf;
367 
368 	START_USE(vq);
369 
370 	for (i = 0; i < vq->vring.num; i++) {
371 		if (!vq->data[i])
372 			continue;
373 		/* detach_buf clears data, so grab it now. */
374 		buf = vq->data[i];
375 		detach_buf(vq, i);
376 		END_USE(vq);
377 		return buf;
378 	}
379 	/* That should have freed everything. */
380 	BUG_ON(vq->num_free != vq->vring.num);
381 
382 	END_USE(vq);
383 	return NULL;
384 }
385 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
386 
387 irqreturn_t vring_interrupt(int irq, void *_vq)
388 {
389 	struct vring_virtqueue *vq = to_vvq(_vq);
390 
391 	if (!more_used(vq)) {
392 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
393 		return IRQ_NONE;
394 	}
395 
396 	if (unlikely(vq->broken))
397 		return IRQ_HANDLED;
398 
399 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
400 	if (vq->vq.callback)
401 		vq->vq.callback(&vq->vq);
402 
403 	return IRQ_HANDLED;
404 }
405 EXPORT_SYMBOL_GPL(vring_interrupt);
406 
407 struct virtqueue *vring_new_virtqueue(unsigned int num,
408 				      unsigned int vring_align,
409 				      struct virtio_device *vdev,
410 				      void *pages,
411 				      void (*notify)(struct virtqueue *),
412 				      void (*callback)(struct virtqueue *),
413 				      const char *name)
414 {
415 	struct vring_virtqueue *vq;
416 	unsigned int i;
417 
418 	/* We assume num is a power of 2. */
419 	if (num & (num - 1)) {
420 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
421 		return NULL;
422 	}
423 
424 	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
425 	if (!vq)
426 		return NULL;
427 
428 	vring_init(&vq->vring, num, pages, vring_align);
429 	vq->vq.callback = callback;
430 	vq->vq.vdev = vdev;
431 	vq->vq.name = name;
432 	vq->notify = notify;
433 	vq->broken = false;
434 	vq->last_used_idx = 0;
435 	vq->num_added = 0;
436 	list_add_tail(&vq->vq.list, &vdev->vqs);
437 #ifdef DEBUG
438 	vq->in_use = false;
439 #endif
440 
441 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
442 
443 	/* No callback?  Tell other side not to bother us. */
444 	if (!callback)
445 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
446 
447 	/* Put everything in free lists. */
448 	vq->num_free = num;
449 	vq->free_head = 0;
450 	for (i = 0; i < num-1; i++) {
451 		vq->vring.desc[i].next = i+1;
452 		vq->data[i] = NULL;
453 	}
454 	vq->data[i] = NULL;
455 
456 	return &vq->vq;
457 }
458 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
459 
460 void vring_del_virtqueue(struct virtqueue *vq)
461 {
462 	list_del(&vq->list);
463 	kfree(to_vvq(vq));
464 }
465 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
466 
467 /* Manipulates transport-specific feature bits. */
468 void vring_transport_features(struct virtio_device *vdev)
469 {
470 	unsigned int i;
471 
472 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
473 		switch (i) {
474 		case VIRTIO_RING_F_INDIRECT_DESC:
475 			break;
476 		default:
477 			/* We don't understand this bit. */
478 			clear_bit(i, vdev->features);
479 		}
480 	}
481 }
482 EXPORT_SYMBOL_GPL(vring_transport_features);
483 
484 MODULE_LICENSE("GPL");
485