1 //#define DEBUG 2 #include <linux/spinlock.h> 3 #include <linux/blkdev.h> 4 #include <linux/hdreg.h> 5 #include <linux/virtio.h> 6 #include <linux/virtio_blk.h> 7 #include <linux/scatterlist.h> 8 9 #define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) 10 11 static unsigned char virtblk_index = 'a'; 12 struct virtio_blk 13 { 14 spinlock_t lock; 15 16 struct virtio_device *vdev; 17 struct virtqueue *vq; 18 19 /* The disk structure for the kernel. */ 20 struct gendisk *disk; 21 22 /* Request tracking. */ 23 struct list_head reqs; 24 25 mempool_t *pool; 26 27 /* Scatterlist: can be too big for stack. */ 28 struct scatterlist sg[VIRTIO_MAX_SG]; 29 }; 30 31 struct virtblk_req 32 { 33 struct list_head list; 34 struct request *req; 35 struct virtio_blk_outhdr out_hdr; 36 struct virtio_blk_inhdr in_hdr; 37 }; 38 39 static bool blk_done(struct virtqueue *vq) 40 { 41 struct virtio_blk *vblk = vq->vdev->priv; 42 struct virtblk_req *vbr; 43 unsigned int len; 44 unsigned long flags; 45 46 spin_lock_irqsave(&vblk->lock, flags); 47 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 48 int uptodate; 49 switch (vbr->in_hdr.status) { 50 case VIRTIO_BLK_S_OK: 51 uptodate = 1; 52 break; 53 case VIRTIO_BLK_S_UNSUPP: 54 uptodate = -ENOTTY; 55 break; 56 default: 57 uptodate = 0; 58 break; 59 } 60 61 end_dequeued_request(vbr->req, uptodate); 62 list_del(&vbr->list); 63 mempool_free(vbr, vblk->pool); 64 } 65 /* In case queue is stopped waiting for more buffers. */ 66 blk_start_queue(vblk->disk->queue); 67 spin_unlock_irqrestore(&vblk->lock, flags); 68 return true; 69 } 70 71 static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 72 struct request *req) 73 { 74 unsigned long num, out, in; 75 struct virtblk_req *vbr; 76 77 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); 78 if (!vbr) 79 /* When another request finishes we'll try again. */ 80 return false; 81 82 vbr->req = req; 83 if (blk_fs_request(vbr->req)) { 84 vbr->out_hdr.type = 0; 85 vbr->out_hdr.sector = vbr->req->sector; 86 vbr->out_hdr.ioprio = vbr->req->ioprio; 87 } else if (blk_pc_request(vbr->req)) { 88 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 89 vbr->out_hdr.sector = 0; 90 vbr->out_hdr.ioprio = vbr->req->ioprio; 91 } else { 92 /* We don't put anything else in the queue. */ 93 BUG(); 94 } 95 96 if (blk_barrier_rq(vbr->req)) 97 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 98 99 /* This init could be done at vblk creation time */ 100 sg_init_table(vblk->sg, VIRTIO_MAX_SG); 101 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 102 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); 103 sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr)); 104 105 if (rq_data_dir(vbr->req) == WRITE) { 106 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 107 out = 1 + num; 108 in = 1; 109 } else { 110 vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 111 out = 1; 112 in = 1 + num; 113 } 114 115 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { 116 mempool_free(vbr, vblk->pool); 117 return false; 118 } 119 120 list_add_tail(&vbr->list, &vblk->reqs); 121 return true; 122 } 123 124 static void do_virtblk_request(struct request_queue *q) 125 { 126 struct virtio_blk *vblk = NULL; 127 struct request *req; 128 unsigned int issued = 0; 129 130 while ((req = elv_next_request(q)) != NULL) { 131 vblk = req->rq_disk->private_data; 132 BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); 133 134 /* If this request fails, stop queue and wait for something to 135 finish to restart it. */ 136 if (!do_req(q, vblk, req)) { 137 blk_stop_queue(q); 138 break; 139 } 140 blkdev_dequeue_request(req); 141 issued++; 142 } 143 144 if (issued) 145 vblk->vq->vq_ops->kick(vblk->vq); 146 } 147 148 static int virtblk_ioctl(struct inode *inode, struct file *filp, 149 unsigned cmd, unsigned long data) 150 { 151 return scsi_cmd_ioctl(filp, inode->i_bdev->bd_disk->queue, 152 inode->i_bdev->bd_disk, cmd, 153 (void __user *)data); 154 } 155 156 static struct block_device_operations virtblk_fops = { 157 .ioctl = virtblk_ioctl, 158 .owner = THIS_MODULE, 159 }; 160 161 static int virtblk_probe(struct virtio_device *vdev) 162 { 163 struct virtio_blk *vblk; 164 int err, major; 165 void *token; 166 unsigned int len; 167 u64 cap; 168 u32 v; 169 170 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); 171 if (!vblk) { 172 err = -ENOMEM; 173 goto out; 174 } 175 176 INIT_LIST_HEAD(&vblk->reqs); 177 spin_lock_init(&vblk->lock); 178 vblk->vdev = vdev; 179 180 /* We expect one virtqueue, for output. */ 181 vblk->vq = vdev->config->find_vq(vdev, blk_done); 182 if (IS_ERR(vblk->vq)) { 183 err = PTR_ERR(vblk->vq); 184 goto out_free_vblk; 185 } 186 187 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); 188 if (!vblk->pool) { 189 err = -ENOMEM; 190 goto out_free_vq; 191 } 192 193 major = register_blkdev(0, "virtblk"); 194 if (major < 0) { 195 err = major; 196 goto out_mempool; 197 } 198 199 /* FIXME: How many partitions? How long is a piece of string? */ 200 vblk->disk = alloc_disk(1 << 4); 201 if (!vblk->disk) { 202 err = -ENOMEM; 203 goto out_unregister_blkdev; 204 } 205 206 vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); 207 if (!vblk->disk->queue) { 208 err = -ENOMEM; 209 goto out_put_disk; 210 } 211 212 sprintf(vblk->disk->disk_name, "vd%c", virtblk_index++); 213 vblk->disk->major = major; 214 vblk->disk->first_minor = 0; 215 vblk->disk->private_data = vblk; 216 vblk->disk->fops = &virtblk_fops; 217 218 /* If barriers are supported, tell block layer that queue is ordered */ 219 token = vdev->config->find(vdev, VIRTIO_CONFIG_BLK_F, &len); 220 if (virtio_use_bit(vdev, token, len, VIRTIO_BLK_F_BARRIER)) 221 blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); 222 223 err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_CAPACITY, &cap); 224 if (err) { 225 dev_err(&vdev->dev, "Bad/missing capacity in config\n"); 226 goto out_put_disk; 227 } 228 229 /* If capacity is too big, truncate with warning. */ 230 if ((sector_t)cap != cap) { 231 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", 232 (unsigned long long)cap); 233 cap = (sector_t)-1; 234 } 235 set_capacity(vblk->disk, cap); 236 237 err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_SIZE_MAX, &v); 238 if (!err) 239 blk_queue_max_segment_size(vblk->disk->queue, v); 240 else if (err != -ENOENT) { 241 dev_err(&vdev->dev, "Bad SIZE_MAX in config\n"); 242 goto out_put_disk; 243 } 244 245 err = virtio_config_val(vdev, VIRTIO_CONFIG_BLK_F_SEG_MAX, &v); 246 if (!err) 247 blk_queue_max_hw_segments(vblk->disk->queue, v); 248 else if (err != -ENOENT) { 249 dev_err(&vdev->dev, "Bad SEG_MAX in config\n"); 250 goto out_put_disk; 251 } 252 253 add_disk(vblk->disk); 254 return 0; 255 256 out_put_disk: 257 put_disk(vblk->disk); 258 out_unregister_blkdev: 259 unregister_blkdev(major, "virtblk"); 260 out_mempool: 261 mempool_destroy(vblk->pool); 262 out_free_vq: 263 vdev->config->del_vq(vblk->vq); 264 out_free_vblk: 265 kfree(vblk); 266 out: 267 return err; 268 } 269 270 static void virtblk_remove(struct virtio_device *vdev) 271 { 272 struct virtio_blk *vblk = vdev->priv; 273 int major = vblk->disk->major; 274 275 BUG_ON(!list_empty(&vblk->reqs)); 276 blk_cleanup_queue(vblk->disk->queue); 277 put_disk(vblk->disk); 278 unregister_blkdev(major, "virtblk"); 279 mempool_destroy(vblk->pool); 280 kfree(vblk); 281 } 282 283 static struct virtio_device_id id_table[] = { 284 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, 285 { 0 }, 286 }; 287 288 static struct virtio_driver virtio_blk = { 289 .driver.name = KBUILD_MODNAME, 290 .driver.owner = THIS_MODULE, 291 .id_table = id_table, 292 .probe = virtblk_probe, 293 .remove = __devexit_p(virtblk_remove), 294 }; 295 296 static int __init init(void) 297 { 298 return register_virtio_driver(&virtio_blk); 299 } 300 301 static void __exit fini(void) 302 { 303 unregister_virtio_driver(&virtio_blk); 304 } 305 module_init(init); 306 module_exit(fini); 307 308 MODULE_DEVICE_TABLE(virtio, id_table); 309 MODULE_DESCRIPTION("Virtio block driver"); 310 MODULE_LICENSE("GPL"); 311