xref: /linux/drivers/block/xen-blkfront.c (revision a1087ef6abedf0bfd60e5e3fddf33192cb2c1325)
1 /*
2  * blkfront.c
3  *
4  * XenLinux virtual block device driver.
5  *
6  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8  * Copyright (c) 2004, Christian Limpach
9  * Copyright (c) 2004, Andrew Warfield
10  * Copyright (c) 2005, Christopher Clark
11  * Copyright (c) 2005, XenSource Ltd
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37 
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/hdreg.h>
41 #include <linux/cdrom.h>
42 #include <linux/module.h>
43 #include <linux/slab.h>
44 #include <linux/mutex.h>
45 #include <linux/scatterlist.h>
46 
47 #include <xen/xen.h>
48 #include <xen/xenbus.h>
49 #include <xen/grant_table.h>
50 #include <xen/events.h>
51 #include <xen/page.h>
52 #include <xen/platform_pci.h>
53 
54 #include <xen/interface/grant_table.h>
55 #include <xen/interface/io/blkif.h>
56 #include <xen/interface/io/protocols.h>
57 
58 #include <asm/xen/hypervisor.h>
59 
60 enum blkif_state {
61 	BLKIF_STATE_DISCONNECTED,
62 	BLKIF_STATE_CONNECTED,
63 	BLKIF_STATE_SUSPENDED,
64 };
65 
66 struct blk_shadow {
67 	struct blkif_request req;
68 	unsigned long request;
69 	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
70 };
71 
72 static DEFINE_MUTEX(blkfront_mutex);
73 static const struct block_device_operations xlvbd_block_fops;
74 
75 #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
76 
77 /*
78  * We have one of these per vbd, whether ide, scsi or 'other'.  They
79  * hang in private_data off the gendisk structure. We may end up
80  * putting all kinds of interesting stuff here :-)
81  */
82 struct blkfront_info
83 {
84 	struct mutex mutex;
85 	struct xenbus_device *xbdev;
86 	struct gendisk *gd;
87 	int vdevice;
88 	blkif_vdev_t handle;
89 	enum blkif_state connected;
90 	int ring_ref;
91 	struct blkif_front_ring ring;
92 	struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
93 	unsigned int evtchn, irq;
94 	struct request_queue *rq;
95 	struct work_struct work;
96 	struct gnttab_free_callback callback;
97 	struct blk_shadow shadow[BLK_RING_SIZE];
98 	unsigned long shadow_free;
99 	unsigned int feature_flush;
100 	int is_ready;
101 };
102 
103 static DEFINE_SPINLOCK(blkif_io_lock);
104 
105 static unsigned int nr_minors;
106 static unsigned long *minors;
107 static DEFINE_SPINLOCK(minor_lock);
108 
109 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
110 	(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
111 #define GRANT_INVALID_REF	0
112 
113 #define PARTS_PER_DISK		16
114 #define PARTS_PER_EXT_DISK      256
115 
116 #define BLKIF_MAJOR(dev) ((dev)>>8)
117 #define BLKIF_MINOR(dev) ((dev) & 0xff)
118 
119 #define EXT_SHIFT 28
120 #define EXTENDED (1<<EXT_SHIFT)
121 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
122 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
123 
124 #define DEV_NAME	"xvd"	/* name in /dev */
125 
126 static int get_id_from_freelist(struct blkfront_info *info)
127 {
128 	unsigned long free = info->shadow_free;
129 	BUG_ON(free >= BLK_RING_SIZE);
130 	info->shadow_free = info->shadow[free].req.id;
131 	info->shadow[free].req.id = 0x0fffffee; /* debug */
132 	return free;
133 }
134 
135 static void add_id_to_freelist(struct blkfront_info *info,
136 			       unsigned long id)
137 {
138 	info->shadow[id].req.id  = info->shadow_free;
139 	info->shadow[id].request = 0;
140 	info->shadow_free = id;
141 }
142 
143 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
144 {
145 	unsigned int end = minor + nr;
146 	int rc;
147 
148 	if (end > nr_minors) {
149 		unsigned long *bitmap, *old;
150 
151 		bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
152 				 GFP_KERNEL);
153 		if (bitmap == NULL)
154 			return -ENOMEM;
155 
156 		spin_lock(&minor_lock);
157 		if (end > nr_minors) {
158 			old = minors;
159 			memcpy(bitmap, minors,
160 			       BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
161 			minors = bitmap;
162 			nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
163 		} else
164 			old = bitmap;
165 		spin_unlock(&minor_lock);
166 		kfree(old);
167 	}
168 
169 	spin_lock(&minor_lock);
170 	if (find_next_bit(minors, end, minor) >= end) {
171 		for (; minor < end; ++minor)
172 			__set_bit(minor, minors);
173 		rc = 0;
174 	} else
175 		rc = -EBUSY;
176 	spin_unlock(&minor_lock);
177 
178 	return rc;
179 }
180 
181 static void xlbd_release_minors(unsigned int minor, unsigned int nr)
182 {
183 	unsigned int end = minor + nr;
184 
185 	BUG_ON(end > nr_minors);
186 	spin_lock(&minor_lock);
187 	for (; minor < end; ++minor)
188 		__clear_bit(minor, minors);
189 	spin_unlock(&minor_lock);
190 }
191 
192 static void blkif_restart_queue_callback(void *arg)
193 {
194 	struct blkfront_info *info = (struct blkfront_info *)arg;
195 	schedule_work(&info->work);
196 }
197 
198 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
199 {
200 	/* We don't have real geometry info, but let's at least return
201 	   values consistent with the size of the device */
202 	sector_t nsect = get_capacity(bd->bd_disk);
203 	sector_t cylinders = nsect;
204 
205 	hg->heads = 0xff;
206 	hg->sectors = 0x3f;
207 	sector_div(cylinders, hg->heads * hg->sectors);
208 	hg->cylinders = cylinders;
209 	if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
210 		hg->cylinders = 0xffff;
211 	return 0;
212 }
213 
214 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
215 		       unsigned command, unsigned long argument)
216 {
217 	struct blkfront_info *info = bdev->bd_disk->private_data;
218 	int i;
219 
220 	dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
221 		command, (long)argument);
222 
223 	switch (command) {
224 	case CDROMMULTISESSION:
225 		dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
226 		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
227 			if (put_user(0, (char __user *)(argument + i)))
228 				return -EFAULT;
229 		return 0;
230 
231 	case CDROM_GET_CAPABILITY: {
232 		struct gendisk *gd = info->gd;
233 		if (gd->flags & GENHD_FL_CD)
234 			return 0;
235 		return -EINVAL;
236 	}
237 
238 	default:
239 		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
240 		  command);*/
241 		return -EINVAL; /* same return as native Linux */
242 	}
243 
244 	return 0;
245 }
246 
247 /*
248  * blkif_queue_request
249  *
250  * request block io
251  *
252  * id: for guest use only.
253  * operation: BLKIF_OP_{READ,WRITE,PROBE}
254  * buffer: buffer to read/write into. this should be a
255  *   virtual address in the guest os.
256  */
257 static int blkif_queue_request(struct request *req)
258 {
259 	struct blkfront_info *info = req->rq_disk->private_data;
260 	unsigned long buffer_mfn;
261 	struct blkif_request *ring_req;
262 	unsigned long id;
263 	unsigned int fsect, lsect;
264 	int i, ref;
265 	grant_ref_t gref_head;
266 	struct scatterlist *sg;
267 
268 	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
269 		return 1;
270 
271 	if (gnttab_alloc_grant_references(
272 		BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
273 		gnttab_request_free_callback(
274 			&info->callback,
275 			blkif_restart_queue_callback,
276 			info,
277 			BLKIF_MAX_SEGMENTS_PER_REQUEST);
278 		return 1;
279 	}
280 
281 	/* Fill out a communications ring structure. */
282 	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
283 	id = get_id_from_freelist(info);
284 	info->shadow[id].request = (unsigned long)req;
285 
286 	ring_req->id = id;
287 	ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
288 	ring_req->handle = info->handle;
289 
290 	ring_req->operation = rq_data_dir(req) ?
291 		BLKIF_OP_WRITE : BLKIF_OP_READ;
292 	if (req->cmd_flags & REQ_HARDBARRIER)
293 		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
294 
295 	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
296 	BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
297 
298 	for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
299 		buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
300 		fsect = sg->offset >> 9;
301 		lsect = fsect + (sg->length >> 9) - 1;
302 		/* install a grant reference. */
303 		ref = gnttab_claim_grant_reference(&gref_head);
304 		BUG_ON(ref == -ENOSPC);
305 
306 		gnttab_grant_foreign_access_ref(
307 				ref,
308 				info->xbdev->otherend_id,
309 				buffer_mfn,
310 				rq_data_dir(req) );
311 
312 		info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
313 		ring_req->seg[i] =
314 				(struct blkif_request_segment) {
315 					.gref       = ref,
316 					.first_sect = fsect,
317 					.last_sect  = lsect };
318 	}
319 
320 	info->ring.req_prod_pvt++;
321 
322 	/* Keep a private copy so we can reissue requests when recovering. */
323 	info->shadow[id].req = *ring_req;
324 
325 	gnttab_free_grant_references(gref_head);
326 
327 	return 0;
328 }
329 
330 
331 static inline void flush_requests(struct blkfront_info *info)
332 {
333 	int notify;
334 
335 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
336 
337 	if (notify)
338 		notify_remote_via_irq(info->irq);
339 }
340 
341 /*
342  * do_blkif_request
343  *  read a block; request is in a request queue
344  */
345 static void do_blkif_request(struct request_queue *rq)
346 {
347 	struct blkfront_info *info = NULL;
348 	struct request *req;
349 	int queued;
350 
351 	pr_debug("Entered do_blkif_request\n");
352 
353 	queued = 0;
354 
355 	while ((req = blk_peek_request(rq)) != NULL) {
356 		info = req->rq_disk->private_data;
357 
358 		if (RING_FULL(&info->ring))
359 			goto wait;
360 
361 		blk_start_request(req);
362 
363 		if (req->cmd_type != REQ_TYPE_FS) {
364 			__blk_end_request_all(req, -EIO);
365 			continue;
366 		}
367 
368 		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
369 			 "(%u/%u) buffer:%p [%s]\n",
370 			 req, req->cmd, (unsigned long)blk_rq_pos(req),
371 			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
372 			 req->buffer, rq_data_dir(req) ? "write" : "read");
373 
374 		if (blkif_queue_request(req)) {
375 			blk_requeue_request(rq, req);
376 wait:
377 			/* Avoid pointless unplugs. */
378 			blk_stop_queue(rq);
379 			break;
380 		}
381 
382 		queued++;
383 	}
384 
385 	if (queued != 0)
386 		flush_requests(info);
387 }
388 
389 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
390 {
391 	struct request_queue *rq;
392 
393 	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
394 	if (rq == NULL)
395 		return -1;
396 
397 	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
398 
399 	/* Hard sector size and max sectors impersonate the equiv. hardware. */
400 	blk_queue_logical_block_size(rq, sector_size);
401 	blk_queue_max_hw_sectors(rq, 512);
402 
403 	/* Each segment in a request is up to an aligned page in size. */
404 	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
405 	blk_queue_max_segment_size(rq, PAGE_SIZE);
406 
407 	/* Ensure a merged request will fit in a single I/O ring slot. */
408 	blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
409 
410 	/* Make sure buffer addresses are sector-aligned. */
411 	blk_queue_dma_alignment(rq, 511);
412 
413 	/* Make sure we don't use bounce buffers. */
414 	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
415 
416 	gd->queue = rq;
417 
418 	return 0;
419 }
420 
421 
422 static void xlvbd_flush(struct blkfront_info *info)
423 {
424 	blk_queue_flush(info->rq, info->feature_flush);
425 	printk(KERN_INFO "blkfront: %s: barriers %s\n",
426 	       info->gd->disk_name,
427 	       info->feature_flush ? "enabled" : "disabled");
428 }
429 
430 
431 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
432 			       struct blkfront_info *info,
433 			       u16 vdisk_info, u16 sector_size)
434 {
435 	struct gendisk *gd;
436 	int nr_minors = 1;
437 	int err = -ENODEV;
438 	unsigned int offset;
439 	int minor;
440 	int nr_parts;
441 
442 	BUG_ON(info->gd != NULL);
443 	BUG_ON(info->rq != NULL);
444 
445 	if ((info->vdevice>>EXT_SHIFT) > 1) {
446 		/* this is above the extended range; something is wrong */
447 		printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
448 		return -ENODEV;
449 	}
450 
451 	if (!VDEV_IS_EXTENDED(info->vdevice)) {
452 		minor = BLKIF_MINOR(info->vdevice);
453 		nr_parts = PARTS_PER_DISK;
454 	} else {
455 		minor = BLKIF_MINOR_EXT(info->vdevice);
456 		nr_parts = PARTS_PER_EXT_DISK;
457 	}
458 
459 	if ((minor % nr_parts) == 0)
460 		nr_minors = nr_parts;
461 
462 	err = xlbd_reserve_minors(minor, nr_minors);
463 	if (err)
464 		goto out;
465 	err = -ENODEV;
466 
467 	gd = alloc_disk(nr_minors);
468 	if (gd == NULL)
469 		goto release;
470 
471 	offset = minor / nr_parts;
472 
473 	if (nr_minors > 1) {
474 		if (offset < 26)
475 			sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
476 		else
477 			sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
478 				'a' + ((offset / 26)-1), 'a' + (offset % 26));
479 	} else {
480 		if (offset < 26)
481 			sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
482 				'a' + offset,
483 				minor & (nr_parts - 1));
484 		else
485 			sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
486 				'a' + ((offset / 26) - 1),
487 				'a' + (offset % 26),
488 				minor & (nr_parts - 1));
489 	}
490 
491 	gd->major = XENVBD_MAJOR;
492 	gd->first_minor = minor;
493 	gd->fops = &xlvbd_block_fops;
494 	gd->private_data = info;
495 	gd->driverfs_dev = &(info->xbdev->dev);
496 	set_capacity(gd, capacity);
497 
498 	if (xlvbd_init_blk_queue(gd, sector_size)) {
499 		del_gendisk(gd);
500 		goto release;
501 	}
502 
503 	info->rq = gd->queue;
504 	info->gd = gd;
505 
506 	xlvbd_flush(info);
507 
508 	if (vdisk_info & VDISK_READONLY)
509 		set_disk_ro(gd, 1);
510 
511 	if (vdisk_info & VDISK_REMOVABLE)
512 		gd->flags |= GENHD_FL_REMOVABLE;
513 
514 	if (vdisk_info & VDISK_CDROM)
515 		gd->flags |= GENHD_FL_CD;
516 
517 	return 0;
518 
519  release:
520 	xlbd_release_minors(minor, nr_minors);
521  out:
522 	return err;
523 }
524 
525 static void xlvbd_release_gendisk(struct blkfront_info *info)
526 {
527 	unsigned int minor, nr_minors;
528 	unsigned long flags;
529 
530 	if (info->rq == NULL)
531 		return;
532 
533 	spin_lock_irqsave(&blkif_io_lock, flags);
534 
535 	/* No more blkif_request(). */
536 	blk_stop_queue(info->rq);
537 
538 	/* No more gnttab callback work. */
539 	gnttab_cancel_free_callback(&info->callback);
540 	spin_unlock_irqrestore(&blkif_io_lock, flags);
541 
542 	/* Flush gnttab callback work. Must be done with no locks held. */
543 	flush_scheduled_work();
544 
545 	del_gendisk(info->gd);
546 
547 	minor = info->gd->first_minor;
548 	nr_minors = info->gd->minors;
549 	xlbd_release_minors(minor, nr_minors);
550 
551 	blk_cleanup_queue(info->rq);
552 	info->rq = NULL;
553 
554 	put_disk(info->gd);
555 	info->gd = NULL;
556 }
557 
558 static void kick_pending_request_queues(struct blkfront_info *info)
559 {
560 	if (!RING_FULL(&info->ring)) {
561 		/* Re-enable calldowns. */
562 		blk_start_queue(info->rq);
563 		/* Kick things off immediately. */
564 		do_blkif_request(info->rq);
565 	}
566 }
567 
568 static void blkif_restart_queue(struct work_struct *work)
569 {
570 	struct blkfront_info *info = container_of(work, struct blkfront_info, work);
571 
572 	spin_lock_irq(&blkif_io_lock);
573 	if (info->connected == BLKIF_STATE_CONNECTED)
574 		kick_pending_request_queues(info);
575 	spin_unlock_irq(&blkif_io_lock);
576 }
577 
578 static void blkif_free(struct blkfront_info *info, int suspend)
579 {
580 	/* Prevent new requests being issued until we fix things up. */
581 	spin_lock_irq(&blkif_io_lock);
582 	info->connected = suspend ?
583 		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
584 	/* No more blkif_request(). */
585 	if (info->rq)
586 		blk_stop_queue(info->rq);
587 	/* No more gnttab callback work. */
588 	gnttab_cancel_free_callback(&info->callback);
589 	spin_unlock_irq(&blkif_io_lock);
590 
591 	/* Flush gnttab callback work. Must be done with no locks held. */
592 	flush_scheduled_work();
593 
594 	/* Free resources associated with old device channel. */
595 	if (info->ring_ref != GRANT_INVALID_REF) {
596 		gnttab_end_foreign_access(info->ring_ref, 0,
597 					  (unsigned long)info->ring.sring);
598 		info->ring_ref = GRANT_INVALID_REF;
599 		info->ring.sring = NULL;
600 	}
601 	if (info->irq)
602 		unbind_from_irqhandler(info->irq, info);
603 	info->evtchn = info->irq = 0;
604 
605 }
606 
607 static void blkif_completion(struct blk_shadow *s)
608 {
609 	int i;
610 	for (i = 0; i < s->req.nr_segments; i++)
611 		gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
612 }
613 
614 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
615 {
616 	struct request *req;
617 	struct blkif_response *bret;
618 	RING_IDX i, rp;
619 	unsigned long flags;
620 	struct blkfront_info *info = (struct blkfront_info *)dev_id;
621 	int error;
622 
623 	spin_lock_irqsave(&blkif_io_lock, flags);
624 
625 	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
626 		spin_unlock_irqrestore(&blkif_io_lock, flags);
627 		return IRQ_HANDLED;
628 	}
629 
630  again:
631 	rp = info->ring.sring->rsp_prod;
632 	rmb(); /* Ensure we see queued responses up to 'rp'. */
633 
634 	for (i = info->ring.rsp_cons; i != rp; i++) {
635 		unsigned long id;
636 
637 		bret = RING_GET_RESPONSE(&info->ring, i);
638 		id   = bret->id;
639 		req  = (struct request *)info->shadow[id].request;
640 
641 		blkif_completion(&info->shadow[id]);
642 
643 		add_id_to_freelist(info, id);
644 
645 		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
646 		switch (bret->operation) {
647 		case BLKIF_OP_WRITE_BARRIER:
648 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
649 				printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
650 				       info->gd->disk_name);
651 				error = -EOPNOTSUPP;
652 				info->feature_flush = 0;
653 				xlvbd_flush(info);
654 			}
655 			/* fall through */
656 		case BLKIF_OP_READ:
657 		case BLKIF_OP_WRITE:
658 			if (unlikely(bret->status != BLKIF_RSP_OKAY))
659 				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
660 					"request: %x\n", bret->status);
661 
662 			__blk_end_request_all(req, error);
663 			break;
664 		default:
665 			BUG();
666 		}
667 	}
668 
669 	info->ring.rsp_cons = i;
670 
671 	if (i != info->ring.req_prod_pvt) {
672 		int more_to_do;
673 		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
674 		if (more_to_do)
675 			goto again;
676 	} else
677 		info->ring.sring->rsp_event = i + 1;
678 
679 	kick_pending_request_queues(info);
680 
681 	spin_unlock_irqrestore(&blkif_io_lock, flags);
682 
683 	return IRQ_HANDLED;
684 }
685 
686 
687 static int setup_blkring(struct xenbus_device *dev,
688 			 struct blkfront_info *info)
689 {
690 	struct blkif_sring *sring;
691 	int err;
692 
693 	info->ring_ref = GRANT_INVALID_REF;
694 
695 	sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
696 	if (!sring) {
697 		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
698 		return -ENOMEM;
699 	}
700 	SHARED_RING_INIT(sring);
701 	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
702 
703 	sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
704 
705 	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
706 	if (err < 0) {
707 		free_page((unsigned long)sring);
708 		info->ring.sring = NULL;
709 		goto fail;
710 	}
711 	info->ring_ref = err;
712 
713 	err = xenbus_alloc_evtchn(dev, &info->evtchn);
714 	if (err)
715 		goto fail;
716 
717 	err = bind_evtchn_to_irqhandler(info->evtchn,
718 					blkif_interrupt,
719 					IRQF_SAMPLE_RANDOM, "blkif", info);
720 	if (err <= 0) {
721 		xenbus_dev_fatal(dev, err,
722 				 "bind_evtchn_to_irqhandler failed");
723 		goto fail;
724 	}
725 	info->irq = err;
726 
727 	return 0;
728 fail:
729 	blkif_free(info, 0);
730 	return err;
731 }
732 
733 
734 /* Common code used when first setting up, and when resuming. */
735 static int talk_to_blkback(struct xenbus_device *dev,
736 			   struct blkfront_info *info)
737 {
738 	const char *message = NULL;
739 	struct xenbus_transaction xbt;
740 	int err;
741 
742 	/* Create shared ring, alloc event channel. */
743 	err = setup_blkring(dev, info);
744 	if (err)
745 		goto out;
746 
747 again:
748 	err = xenbus_transaction_start(&xbt);
749 	if (err) {
750 		xenbus_dev_fatal(dev, err, "starting transaction");
751 		goto destroy_blkring;
752 	}
753 
754 	err = xenbus_printf(xbt, dev->nodename,
755 			    "ring-ref", "%u", info->ring_ref);
756 	if (err) {
757 		message = "writing ring-ref";
758 		goto abort_transaction;
759 	}
760 	err = xenbus_printf(xbt, dev->nodename,
761 			    "event-channel", "%u", info->evtchn);
762 	if (err) {
763 		message = "writing event-channel";
764 		goto abort_transaction;
765 	}
766 	err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
767 			    XEN_IO_PROTO_ABI_NATIVE);
768 	if (err) {
769 		message = "writing protocol";
770 		goto abort_transaction;
771 	}
772 
773 	err = xenbus_transaction_end(xbt, 0);
774 	if (err) {
775 		if (err == -EAGAIN)
776 			goto again;
777 		xenbus_dev_fatal(dev, err, "completing transaction");
778 		goto destroy_blkring;
779 	}
780 
781 	xenbus_switch_state(dev, XenbusStateInitialised);
782 
783 	return 0;
784 
785  abort_transaction:
786 	xenbus_transaction_end(xbt, 1);
787 	if (message)
788 		xenbus_dev_fatal(dev, err, "%s", message);
789  destroy_blkring:
790 	blkif_free(info, 0);
791  out:
792 	return err;
793 }
794 
795 /**
796  * Entry point to this code when a new device is created.  Allocate the basic
797  * structures and the ring buffer for communication with the backend, and
798  * inform the backend of the appropriate details for those.  Switch to
799  * Initialised state.
800  */
801 static int blkfront_probe(struct xenbus_device *dev,
802 			  const struct xenbus_device_id *id)
803 {
804 	int err, vdevice, i;
805 	struct blkfront_info *info;
806 
807 	/* FIXME: Use dynamic device id if this is not set. */
808 	err = xenbus_scanf(XBT_NIL, dev->nodename,
809 			   "virtual-device", "%i", &vdevice);
810 	if (err != 1) {
811 		/* go looking in the extended area instead */
812 		err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
813 				   "%i", &vdevice);
814 		if (err != 1) {
815 			xenbus_dev_fatal(dev, err, "reading virtual-device");
816 			return err;
817 		}
818 	}
819 
820 	if (xen_hvm_domain()) {
821 		char *type;
822 		int len;
823 		/* no unplug has been done: do not hook devices != xen vbds */
824 		if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
825 			int major;
826 
827 			if (!VDEV_IS_EXTENDED(vdevice))
828 				major = BLKIF_MAJOR(vdevice);
829 			else
830 				major = XENVBD_MAJOR;
831 
832 			if (major != XENVBD_MAJOR) {
833 				printk(KERN_INFO
834 						"%s: HVM does not support vbd %d as xen block device\n",
835 						__FUNCTION__, vdevice);
836 				return -ENODEV;
837 			}
838 		}
839 		/* do not create a PV cdrom device if we are an HVM guest */
840 		type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
841 		if (IS_ERR(type))
842 			return -ENODEV;
843 		if (strncmp(type, "cdrom", 5) == 0) {
844 			kfree(type);
845 			return -ENODEV;
846 		}
847 		kfree(type);
848 	}
849 	info = kzalloc(sizeof(*info), GFP_KERNEL);
850 	if (!info) {
851 		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
852 		return -ENOMEM;
853 	}
854 
855 	mutex_init(&info->mutex);
856 	info->xbdev = dev;
857 	info->vdevice = vdevice;
858 	info->connected = BLKIF_STATE_DISCONNECTED;
859 	INIT_WORK(&info->work, blkif_restart_queue);
860 
861 	for (i = 0; i < BLK_RING_SIZE; i++)
862 		info->shadow[i].req.id = i+1;
863 	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
864 
865 	/* Front end dir is a number, which is used as the id. */
866 	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
867 	dev_set_drvdata(&dev->dev, info);
868 
869 	err = talk_to_blkback(dev, info);
870 	if (err) {
871 		kfree(info);
872 		dev_set_drvdata(&dev->dev, NULL);
873 		return err;
874 	}
875 
876 	return 0;
877 }
878 
879 
880 static int blkif_recover(struct blkfront_info *info)
881 {
882 	int i;
883 	struct blkif_request *req;
884 	struct blk_shadow *copy;
885 	int j;
886 
887 	/* Stage 1: Make a safe copy of the shadow state. */
888 	copy = kmalloc(sizeof(info->shadow),
889 		       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
890 	if (!copy)
891 		return -ENOMEM;
892 	memcpy(copy, info->shadow, sizeof(info->shadow));
893 
894 	/* Stage 2: Set up free list. */
895 	memset(&info->shadow, 0, sizeof(info->shadow));
896 	for (i = 0; i < BLK_RING_SIZE; i++)
897 		info->shadow[i].req.id = i+1;
898 	info->shadow_free = info->ring.req_prod_pvt;
899 	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
900 
901 	/* Stage 3: Find pending requests and requeue them. */
902 	for (i = 0; i < BLK_RING_SIZE; i++) {
903 		/* Not in use? */
904 		if (copy[i].request == 0)
905 			continue;
906 
907 		/* Grab a request slot and copy shadow state into it. */
908 		req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
909 		*req = copy[i].req;
910 
911 		/* We get a new request id, and must reset the shadow state. */
912 		req->id = get_id_from_freelist(info);
913 		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
914 
915 		/* Rewrite any grant references invalidated by susp/resume. */
916 		for (j = 0; j < req->nr_segments; j++)
917 			gnttab_grant_foreign_access_ref(
918 				req->seg[j].gref,
919 				info->xbdev->otherend_id,
920 				pfn_to_mfn(info->shadow[req->id].frame[j]),
921 				rq_data_dir(
922 					(struct request *)
923 					info->shadow[req->id].request));
924 		info->shadow[req->id].req = *req;
925 
926 		info->ring.req_prod_pvt++;
927 	}
928 
929 	kfree(copy);
930 
931 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
932 
933 	spin_lock_irq(&blkif_io_lock);
934 
935 	/* Now safe for us to use the shared ring */
936 	info->connected = BLKIF_STATE_CONNECTED;
937 
938 	/* Send off requeued requests */
939 	flush_requests(info);
940 
941 	/* Kick any other new requests queued since we resumed */
942 	kick_pending_request_queues(info);
943 
944 	spin_unlock_irq(&blkif_io_lock);
945 
946 	return 0;
947 }
948 
949 /**
950  * We are reconnecting to the backend, due to a suspend/resume, or a backend
951  * driver restart.  We tear down our blkif structure and recreate it, but
952  * leave the device-layer structures intact so that this is transparent to the
953  * rest of the kernel.
954  */
955 static int blkfront_resume(struct xenbus_device *dev)
956 {
957 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
958 	int err;
959 
960 	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
961 
962 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
963 
964 	err = talk_to_blkback(dev, info);
965 	if (info->connected == BLKIF_STATE_SUSPENDED && !err)
966 		err = blkif_recover(info);
967 
968 	return err;
969 }
970 
971 static void
972 blkfront_closing(struct blkfront_info *info)
973 {
974 	struct xenbus_device *xbdev = info->xbdev;
975 	struct block_device *bdev = NULL;
976 
977 	mutex_lock(&info->mutex);
978 
979 	if (xbdev->state == XenbusStateClosing) {
980 		mutex_unlock(&info->mutex);
981 		return;
982 	}
983 
984 	if (info->gd)
985 		bdev = bdget_disk(info->gd, 0);
986 
987 	mutex_unlock(&info->mutex);
988 
989 	if (!bdev) {
990 		xenbus_frontend_closed(xbdev);
991 		return;
992 	}
993 
994 	mutex_lock(&bdev->bd_mutex);
995 
996 	if (bdev->bd_openers) {
997 		xenbus_dev_error(xbdev, -EBUSY,
998 				 "Device in use; refusing to close");
999 		xenbus_switch_state(xbdev, XenbusStateClosing);
1000 	} else {
1001 		xlvbd_release_gendisk(info);
1002 		xenbus_frontend_closed(xbdev);
1003 	}
1004 
1005 	mutex_unlock(&bdev->bd_mutex);
1006 	bdput(bdev);
1007 }
1008 
1009 /*
1010  * Invoked when the backend is finally 'ready' (and has told produced
1011  * the details about the physical device - #sectors, size, etc).
1012  */
1013 static void blkfront_connect(struct blkfront_info *info)
1014 {
1015 	unsigned long long sectors;
1016 	unsigned long sector_size;
1017 	unsigned int binfo;
1018 	int err;
1019 	int barrier;
1020 
1021 	switch (info->connected) {
1022 	case BLKIF_STATE_CONNECTED:
1023 		/*
1024 		 * Potentially, the back-end may be signalling
1025 		 * a capacity change; update the capacity.
1026 		 */
1027 		err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1028 				   "sectors", "%Lu", &sectors);
1029 		if (XENBUS_EXIST_ERR(err))
1030 			return;
1031 		printk(KERN_INFO "Setting capacity to %Lu\n",
1032 		       sectors);
1033 		set_capacity(info->gd, sectors);
1034 		revalidate_disk(info->gd);
1035 
1036 		/* fall through */
1037 	case BLKIF_STATE_SUSPENDED:
1038 		return;
1039 
1040 	default:
1041 		break;
1042 	}
1043 
1044 	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
1045 		__func__, info->xbdev->otherend);
1046 
1047 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1048 			    "sectors", "%llu", &sectors,
1049 			    "info", "%u", &binfo,
1050 			    "sector-size", "%lu", &sector_size,
1051 			    NULL);
1052 	if (err) {
1053 		xenbus_dev_fatal(info->xbdev, err,
1054 				 "reading backend fields at %s",
1055 				 info->xbdev->otherend);
1056 		return;
1057 	}
1058 
1059 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1060 			    "feature-barrier", "%lu", &barrier,
1061 			    NULL);
1062 
1063 	/*
1064 	 * If there's no "feature-barrier" defined, then it means
1065 	 * we're dealing with a very old backend which writes
1066 	 * synchronously; nothing to do.
1067 	 *
1068 	 * If there are barriers, then we use flush.
1069 	 */
1070 	info->feature_flush = 0;
1071 
1072 	/*
1073 	 * The driver doesn't properly handled empty flushes, so
1074 	 * lets disable barrier support for now.
1075 	 */
1076 #if 0
1077 	if (!err && barrier)
1078 		info->feature_flush = REQ_FLUSH;
1079 #endif
1080 
1081 	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1082 	if (err) {
1083 		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
1084 				 info->xbdev->otherend);
1085 		return;
1086 	}
1087 
1088 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
1089 
1090 	/* Kick pending requests. */
1091 	spin_lock_irq(&blkif_io_lock);
1092 	info->connected = BLKIF_STATE_CONNECTED;
1093 	kick_pending_request_queues(info);
1094 	spin_unlock_irq(&blkif_io_lock);
1095 
1096 	add_disk(info->gd);
1097 
1098 	info->is_ready = 1;
1099 }
1100 
1101 /**
1102  * Callback received when the backend's state changes.
1103  */
1104 static void blkback_changed(struct xenbus_device *dev,
1105 			    enum xenbus_state backend_state)
1106 {
1107 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1108 
1109 	dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
1110 
1111 	switch (backend_state) {
1112 	case XenbusStateInitialising:
1113 	case XenbusStateInitWait:
1114 	case XenbusStateInitialised:
1115 	case XenbusStateReconfiguring:
1116 	case XenbusStateReconfigured:
1117 	case XenbusStateUnknown:
1118 	case XenbusStateClosed:
1119 		break;
1120 
1121 	case XenbusStateConnected:
1122 		blkfront_connect(info);
1123 		break;
1124 
1125 	case XenbusStateClosing:
1126 		blkfront_closing(info);
1127 		break;
1128 	}
1129 }
1130 
1131 static int blkfront_remove(struct xenbus_device *xbdev)
1132 {
1133 	struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
1134 	struct block_device *bdev = NULL;
1135 	struct gendisk *disk;
1136 
1137 	dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
1138 
1139 	blkif_free(info, 0);
1140 
1141 	mutex_lock(&info->mutex);
1142 
1143 	disk = info->gd;
1144 	if (disk)
1145 		bdev = bdget_disk(disk, 0);
1146 
1147 	info->xbdev = NULL;
1148 	mutex_unlock(&info->mutex);
1149 
1150 	if (!bdev) {
1151 		kfree(info);
1152 		return 0;
1153 	}
1154 
1155 	/*
1156 	 * The xbdev was removed before we reached the Closed
1157 	 * state. See if it's safe to remove the disk. If the bdev
1158 	 * isn't closed yet, we let release take care of it.
1159 	 */
1160 
1161 	mutex_lock(&bdev->bd_mutex);
1162 	info = disk->private_data;
1163 
1164 	dev_warn(disk_to_dev(disk),
1165 		 "%s was hot-unplugged, %d stale handles\n",
1166 		 xbdev->nodename, bdev->bd_openers);
1167 
1168 	if (info && !bdev->bd_openers) {
1169 		xlvbd_release_gendisk(info);
1170 		disk->private_data = NULL;
1171 		kfree(info);
1172 	}
1173 
1174 	mutex_unlock(&bdev->bd_mutex);
1175 	bdput(bdev);
1176 
1177 	return 0;
1178 }
1179 
1180 static int blkfront_is_ready(struct xenbus_device *dev)
1181 {
1182 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1183 
1184 	return info->is_ready && info->xbdev;
1185 }
1186 
1187 static int blkif_open(struct block_device *bdev, fmode_t mode)
1188 {
1189 	struct gendisk *disk = bdev->bd_disk;
1190 	struct blkfront_info *info;
1191 	int err = 0;
1192 
1193 	mutex_lock(&blkfront_mutex);
1194 
1195 	info = disk->private_data;
1196 	if (!info) {
1197 		/* xbdev gone */
1198 		err = -ERESTARTSYS;
1199 		goto out;
1200 	}
1201 
1202 	mutex_lock(&info->mutex);
1203 
1204 	if (!info->gd)
1205 		/* xbdev is closed */
1206 		err = -ERESTARTSYS;
1207 
1208 	mutex_unlock(&info->mutex);
1209 
1210 out:
1211 	mutex_unlock(&blkfront_mutex);
1212 	return err;
1213 }
1214 
1215 static int blkif_release(struct gendisk *disk, fmode_t mode)
1216 {
1217 	struct blkfront_info *info = disk->private_data;
1218 	struct block_device *bdev;
1219 	struct xenbus_device *xbdev;
1220 
1221 	mutex_lock(&blkfront_mutex);
1222 
1223 	bdev = bdget_disk(disk, 0);
1224 	bdput(bdev);
1225 
1226 	if (bdev->bd_openers)
1227 		goto out;
1228 
1229 	/*
1230 	 * Check if we have been instructed to close. We will have
1231 	 * deferred this request, because the bdev was still open.
1232 	 */
1233 
1234 	mutex_lock(&info->mutex);
1235 	xbdev = info->xbdev;
1236 
1237 	if (xbdev && xbdev->state == XenbusStateClosing) {
1238 		/* pending switch to state closed */
1239 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
1240 		xlvbd_release_gendisk(info);
1241 		xenbus_frontend_closed(info->xbdev);
1242  	}
1243 
1244 	mutex_unlock(&info->mutex);
1245 
1246 	if (!xbdev) {
1247 		/* sudden device removal */
1248 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
1249 		xlvbd_release_gendisk(info);
1250 		disk->private_data = NULL;
1251 		kfree(info);
1252 	}
1253 
1254 out:
1255 	mutex_unlock(&blkfront_mutex);
1256 	return 0;
1257 }
1258 
1259 static const struct block_device_operations xlvbd_block_fops =
1260 {
1261 	.owner = THIS_MODULE,
1262 	.open = blkif_open,
1263 	.release = blkif_release,
1264 	.getgeo = blkif_getgeo,
1265 	.ioctl = blkif_ioctl,
1266 };
1267 
1268 
1269 static const struct xenbus_device_id blkfront_ids[] = {
1270 	{ "vbd" },
1271 	{ "" }
1272 };
1273 
1274 static struct xenbus_driver blkfront = {
1275 	.name = "vbd",
1276 	.owner = THIS_MODULE,
1277 	.ids = blkfront_ids,
1278 	.probe = blkfront_probe,
1279 	.remove = blkfront_remove,
1280 	.resume = blkfront_resume,
1281 	.otherend_changed = blkback_changed,
1282 	.is_ready = blkfront_is_ready,
1283 };
1284 
1285 static int __init xlblk_init(void)
1286 {
1287 	if (!xen_domain())
1288 		return -ENODEV;
1289 
1290 	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
1291 		printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
1292 		       XENVBD_MAJOR, DEV_NAME);
1293 		return -ENODEV;
1294 	}
1295 
1296 	return xenbus_register_frontend(&blkfront);
1297 }
1298 module_init(xlblk_init);
1299 
1300 
1301 static void __exit xlblk_exit(void)
1302 {
1303 	return xenbus_unregister_driver(&blkfront);
1304 }
1305 module_exit(xlblk_exit);
1306 
1307 MODULE_DESCRIPTION("Xen virtual block device frontend");
1308 MODULE_LICENSE("GPL");
1309 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
1310 MODULE_ALIAS("xen:vbd");
1311 MODULE_ALIAS("xenblk");
1312