xref: /linux/drivers/block/virtio_blk.c (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/blkdev.h>
4 #include <linux/hdreg.h>
5 #include <linux/virtio.h>
6 #include <linux/virtio_ids.h>
7 #include <linux/virtio_blk.h>
8 #include <linux/scatterlist.h>
9 
10 #define PART_BITS 4
11 
12 static int major, index;
13 
14 struct virtio_blk
15 {
16 	spinlock_t lock;
17 
18 	struct virtio_device *vdev;
19 	struct virtqueue *vq;
20 
21 	/* The disk structure for the kernel. */
22 	struct gendisk *disk;
23 
24 	/* Request tracking. */
25 	struct list_head reqs;
26 
27 	mempool_t *pool;
28 
29 	/* What host tells us, plus 2 for header & tailer. */
30 	unsigned int sg_elems;
31 
32 	/* Scatterlist: can be too big for stack. */
33 	struct scatterlist sg[/*sg_elems*/];
34 };
35 
36 struct virtblk_req
37 {
38 	struct list_head list;
39 	struct request *req;
40 	struct virtio_blk_outhdr out_hdr;
41 	struct virtio_scsi_inhdr in_hdr;
42 	u8 status;
43 };
44 
45 static void blk_done(struct virtqueue *vq)
46 {
47 	struct virtio_blk *vblk = vq->vdev->priv;
48 	struct virtblk_req *vbr;
49 	unsigned int len;
50 	unsigned long flags;
51 
52 	spin_lock_irqsave(&vblk->lock, flags);
53 	while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
54 		int error;
55 
56 		switch (vbr->status) {
57 		case VIRTIO_BLK_S_OK:
58 			error = 0;
59 			break;
60 		case VIRTIO_BLK_S_UNSUPP:
61 			error = -ENOTTY;
62 			break;
63 		default:
64 			error = -EIO;
65 			break;
66 		}
67 
68 		if (blk_pc_request(vbr->req)) {
69 			vbr->req->resid_len = vbr->in_hdr.residual;
70 			vbr->req->sense_len = vbr->in_hdr.sense_len;
71 			vbr->req->errors = vbr->in_hdr.errors;
72 		}
73 
74 		__blk_end_request_all(vbr->req, error);
75 		list_del(&vbr->list);
76 		mempool_free(vbr, vblk->pool);
77 	}
78 	/* In case queue is stopped waiting for more buffers. */
79 	blk_start_queue(vblk->disk->queue);
80 	spin_unlock_irqrestore(&vblk->lock, flags);
81 }
82 
83 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
84 		   struct request *req)
85 {
86 	unsigned long num, out = 0, in = 0;
87 	struct virtblk_req *vbr;
88 
89 	vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
90 	if (!vbr)
91 		/* When another request finishes we'll try again. */
92 		return false;
93 
94 	vbr->req = req;
95 	switch (req->cmd_type) {
96 	case REQ_TYPE_FS:
97 		vbr->out_hdr.type = 0;
98 		vbr->out_hdr.sector = blk_rq_pos(vbr->req);
99 		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
100 		break;
101 	case REQ_TYPE_BLOCK_PC:
102 		vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
103 		vbr->out_hdr.sector = 0;
104 		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
105 		break;
106 	case REQ_TYPE_LINUX_BLOCK:
107 		if (req->cmd[0] == REQ_LB_OP_FLUSH) {
108 			vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
109 			vbr->out_hdr.sector = 0;
110 			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
111 			break;
112 		}
113 		/*FALLTHRU*/
114 	default:
115 		/* We don't put anything else in the queue. */
116 		BUG();
117 	}
118 
119 	if (blk_barrier_rq(vbr->req))
120 		vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
121 
122 	sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
123 
124 	/*
125 	 * If this is a packet command we need a couple of additional headers.
126 	 * Behind the normal outhdr we put a segment with the scsi command
127 	 * block, and before the normal inhdr we put the sense data and the
128 	 * inhdr with additional status information before the normal inhdr.
129 	 */
130 	if (blk_pc_request(vbr->req))
131 		sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
132 
133 	num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
134 
135 	if (blk_pc_request(vbr->req)) {
136 		sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
137 		sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
138 			   sizeof(vbr->in_hdr));
139 	}
140 
141 	sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
142 		   sizeof(vbr->status));
143 
144 	if (num) {
145 		if (rq_data_dir(vbr->req) == WRITE) {
146 			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
147 			out += num;
148 		} else {
149 			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
150 			in += num;
151 		}
152 	}
153 
154 	if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
155 		mempool_free(vbr, vblk->pool);
156 		return false;
157 	}
158 
159 	list_add_tail(&vbr->list, &vblk->reqs);
160 	return true;
161 }
162 
163 static void do_virtblk_request(struct request_queue *q)
164 {
165 	struct virtio_blk *vblk = q->queuedata;
166 	struct request *req;
167 	unsigned int issued = 0;
168 
169 	while ((req = blk_peek_request(q)) != NULL) {
170 		BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
171 
172 		/* If this request fails, stop queue and wait for something to
173 		   finish to restart it. */
174 		if (!do_req(q, vblk, req)) {
175 			blk_stop_queue(q);
176 			break;
177 		}
178 		blk_start_request(req);
179 		issued++;
180 	}
181 
182 	if (issued)
183 		vblk->vq->vq_ops->kick(vblk->vq);
184 }
185 
186 /* return ATA identify data
187  */
188 static int virtblk_identify(struct gendisk *disk, void *argp)
189 {
190 	struct virtio_blk *vblk = disk->private_data;
191 	void *opaque;
192 	int err = -ENOMEM;
193 
194 	opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
195 	if (!opaque)
196 		goto out;
197 
198 	err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
199 		offsetof(struct virtio_blk_config, identify), opaque,
200 		VIRTIO_BLK_ID_BYTES);
201 
202 	if (err)
203 		goto out_kfree;
204 
205 	if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
206 		err = -EFAULT;
207 
208 out_kfree:
209 	kfree(opaque);
210 out:
211 	return err;
212 }
213 
214 static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
215 {
216 	req->cmd_type = REQ_TYPE_LINUX_BLOCK;
217 	req->cmd[0] = REQ_LB_OP_FLUSH;
218 }
219 
220 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
221 			 unsigned cmd, unsigned long data)
222 {
223 	struct gendisk *disk = bdev->bd_disk;
224 	struct virtio_blk *vblk = disk->private_data;
225 	void __user *argp = (void __user *)data;
226 
227 	if (cmd == HDIO_GET_IDENTITY)
228 		return virtblk_identify(disk, argp);
229 
230 	/*
231 	 * Only allow the generic SCSI ioctls if the host can support it.
232 	 */
233 	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
234 		return -ENOTTY;
235 
236 	return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
237 }
238 
239 /* We provide getgeo only to please some old bootloader/partitioning tools */
240 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
241 {
242 	struct virtio_blk *vblk = bd->bd_disk->private_data;
243 	struct virtio_blk_geometry vgeo;
244 	int err;
245 
246 	/* see if the host passed in geometry config */
247 	err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
248 				offsetof(struct virtio_blk_config, geometry),
249 				&vgeo);
250 
251 	if (!err) {
252 		geo->heads = vgeo.heads;
253 		geo->sectors = vgeo.sectors;
254 		geo->cylinders = vgeo.cylinders;
255 	} else {
256 		/* some standard values, similar to sd */
257 		geo->heads = 1 << 6;
258 		geo->sectors = 1 << 5;
259 		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
260 	}
261 	return 0;
262 }
263 
264 static const struct block_device_operations virtblk_fops = {
265 	.locked_ioctl = virtblk_ioctl,
266 	.owner  = THIS_MODULE,
267 	.getgeo = virtblk_getgeo,
268 };
269 
270 static int index_to_minor(int index)
271 {
272 	return index << PART_BITS;
273 }
274 
275 static int __devinit virtblk_probe(struct virtio_device *vdev)
276 {
277 	struct virtio_blk *vblk;
278 	int err;
279 	u64 cap;
280 	u32 v;
281 	u32 blk_size, sg_elems;
282 
283 	if (index_to_minor(index) >= 1 << MINORBITS)
284 		return -ENOSPC;
285 
286 	/* We need to know how many segments before we allocate. */
287 	err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
288 				offsetof(struct virtio_blk_config, seg_max),
289 				&sg_elems);
290 	if (err)
291 		sg_elems = 1;
292 
293 	/* We need an extra sg elements at head and tail. */
294 	sg_elems += 2;
295 	vdev->priv = vblk = kmalloc(sizeof(*vblk) +
296 				    sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
297 	if (!vblk) {
298 		err = -ENOMEM;
299 		goto out;
300 	}
301 
302 	INIT_LIST_HEAD(&vblk->reqs);
303 	spin_lock_init(&vblk->lock);
304 	vblk->vdev = vdev;
305 	vblk->sg_elems = sg_elems;
306 	sg_init_table(vblk->sg, vblk->sg_elems);
307 
308 	/* We expect one virtqueue, for output. */
309 	vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
310 	if (IS_ERR(vblk->vq)) {
311 		err = PTR_ERR(vblk->vq);
312 		goto out_free_vblk;
313 	}
314 
315 	vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
316 	if (!vblk->pool) {
317 		err = -ENOMEM;
318 		goto out_free_vq;
319 	}
320 
321 	/* FIXME: How many partitions?  How long is a piece of string? */
322 	vblk->disk = alloc_disk(1 << PART_BITS);
323 	if (!vblk->disk) {
324 		err = -ENOMEM;
325 		goto out_mempool;
326 	}
327 
328 	vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
329 	if (!vblk->disk->queue) {
330 		err = -ENOMEM;
331 		goto out_put_disk;
332 	}
333 
334 	vblk->disk->queue->queuedata = vblk;
335 	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
336 
337 	if (index < 26) {
338 		sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
339 	} else if (index < (26 + 1) * 26) {
340 		sprintf(vblk->disk->disk_name, "vd%c%c",
341 			'a' + index / 26 - 1, 'a' + index % 26);
342 	} else {
343 		const unsigned int m1 = (index / 26 - 1) / 26 - 1;
344 		const unsigned int m2 = (index / 26 - 1) % 26;
345 		const unsigned int m3 =  index % 26;
346 		sprintf(vblk->disk->disk_name, "vd%c%c%c",
347 			'a' + m1, 'a' + m2, 'a' + m3);
348 	}
349 
350 	vblk->disk->major = major;
351 	vblk->disk->first_minor = index_to_minor(index);
352 	vblk->disk->private_data = vblk;
353 	vblk->disk->fops = &virtblk_fops;
354 	vblk->disk->driverfs_dev = &vdev->dev;
355 	index++;
356 
357 	/* If barriers are supported, tell block layer that queue is ordered */
358 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
359 		blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
360 				  virtblk_prepare_flush);
361 	else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
362 		blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
363 
364 	/* If disk is read-only in the host, the guest should obey */
365 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
366 		set_disk_ro(vblk->disk, 1);
367 
368 	/* Host must always specify the capacity. */
369 	vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
370 			  &cap, sizeof(cap));
371 
372 	/* If capacity is too big, truncate with warning. */
373 	if ((sector_t)cap != cap) {
374 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
375 			 (unsigned long long)cap);
376 		cap = (sector_t)-1;
377 	}
378 	set_capacity(vblk->disk, cap);
379 
380 	/* We can handle whatever the host told us to handle. */
381 	blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
382 	blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
383 
384 	/* No need to bounce any requests */
385 	blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
386 
387 	/* No real sector limit. */
388 	blk_queue_max_sectors(vblk->disk->queue, -1U);
389 
390 	/* Host can optionally specify maximum segment size and number of
391 	 * segments. */
392 	err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
393 				offsetof(struct virtio_blk_config, size_max),
394 				&v);
395 	if (!err)
396 		blk_queue_max_segment_size(vblk->disk->queue, v);
397 	else
398 		blk_queue_max_segment_size(vblk->disk->queue, -1U);
399 
400 	/* Host can optionally specify the block size of the device */
401 	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
402 				offsetof(struct virtio_blk_config, blk_size),
403 				&blk_size);
404 	if (!err)
405 		blk_queue_logical_block_size(vblk->disk->queue, blk_size);
406 
407 	add_disk(vblk->disk);
408 	return 0;
409 
410 out_put_disk:
411 	put_disk(vblk->disk);
412 out_mempool:
413 	mempool_destroy(vblk->pool);
414 out_free_vq:
415 	vdev->config->del_vqs(vdev);
416 out_free_vblk:
417 	kfree(vblk);
418 out:
419 	return err;
420 }
421 
422 static void __devexit virtblk_remove(struct virtio_device *vdev)
423 {
424 	struct virtio_blk *vblk = vdev->priv;
425 
426 	/* Nothing should be pending. */
427 	BUG_ON(!list_empty(&vblk->reqs));
428 
429 	/* Stop all the virtqueues. */
430 	vdev->config->reset(vdev);
431 
432 	del_gendisk(vblk->disk);
433 	blk_cleanup_queue(vblk->disk->queue);
434 	put_disk(vblk->disk);
435 	mempool_destroy(vblk->pool);
436 	vdev->config->del_vqs(vdev);
437 	kfree(vblk);
438 }
439 
440 static struct virtio_device_id id_table[] = {
441 	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
442 	{ 0 },
443 };
444 
445 static unsigned int features[] = {
446 	VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
447 	VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
448 	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH
449 };
450 
451 /*
452  * virtio_blk causes spurious section mismatch warning by
453  * simultaneously referring to a __devinit and a __devexit function.
454  * Use __refdata to avoid this warning.
455  */
456 static struct virtio_driver __refdata virtio_blk = {
457 	.feature_table = features,
458 	.feature_table_size = ARRAY_SIZE(features),
459 	.driver.name =	KBUILD_MODNAME,
460 	.driver.owner =	THIS_MODULE,
461 	.id_table =	id_table,
462 	.probe =	virtblk_probe,
463 	.remove =	__devexit_p(virtblk_remove),
464 };
465 
466 static int __init init(void)
467 {
468 	major = register_blkdev(0, "virtblk");
469 	if (major < 0)
470 		return major;
471 	return register_virtio_driver(&virtio_blk);
472 }
473 
474 static void __exit fini(void)
475 {
476 	unregister_blkdev(major, "virtblk");
477 	unregister_virtio_driver(&virtio_blk);
478 }
479 module_init(init);
480 module_exit(fini);
481 
482 MODULE_DEVICE_TABLE(virtio, id_table);
483 MODULE_DESCRIPTION("Virtio block driver");
484 MODULE_LICENSE("GPL");
485