1 /* 2 * blkfront.c 3 * 4 * XenLinux virtual block device driver. 5 * 6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 8 * Copyright (c) 2004, Christian Limpach 9 * Copyright (c) 2004, Andrew Warfield 10 * Copyright (c) 2005, Christopher Clark 11 * Copyright (c) 2005, XenSource Ltd 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License version 2 15 * as published by the Free Software Foundation; or, when distributed 16 * separately from the Linux kernel or incorporated into other 17 * software packages, subject to the following license: 18 * 19 * Permission is hereby granted, free of charge, to any person obtaining a copy 20 * of this source file (the "Software"), to deal in the Software without 21 * restriction, including without limitation the rights to use, copy, modify, 22 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 23 * and to permit persons to whom the Software is furnished to do so, subject to 24 * the following conditions: 25 * 26 * The above copyright notice and this permission notice shall be included in 27 * all copies or substantial portions of the Software. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 35 * IN THE SOFTWARE. 36 */ 37 38 #include <linux/interrupt.h> 39 #include <linux/blkdev.h> 40 #include <linux/hdreg.h> 41 #include <linux/cdrom.h> 42 #include <linux/module.h> 43 #include <linux/slab.h> 44 #include <linux/mutex.h> 45 #include <linux/scatterlist.h> 46 #include <linux/bitmap.h> 47 48 #include <xen/xen.h> 49 #include <xen/xenbus.h> 50 #include <xen/grant_table.h> 51 #include <xen/events.h> 52 #include <xen/page.h> 53 #include <xen/platform_pci.h> 54 55 #include <xen/interface/grant_table.h> 56 #include <xen/interface/io/blkif.h> 57 #include <xen/interface/io/protocols.h> 58 59 #include <asm/xen/hypervisor.h> 60 61 enum blkif_state { 62 BLKIF_STATE_DISCONNECTED, 63 BLKIF_STATE_CONNECTED, 64 BLKIF_STATE_SUSPENDED, 65 }; 66 67 struct blk_shadow { 68 struct blkif_request req; 69 struct request *request; 70 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 71 }; 72 73 static DEFINE_MUTEX(blkfront_mutex); 74 static const struct block_device_operations xlvbd_block_fops; 75 76 #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) 77 78 /* 79 * We have one of these per vbd, whether ide, scsi or 'other'. They 80 * hang in private_data off the gendisk structure. We may end up 81 * putting all kinds of interesting stuff here :-) 82 */ 83 struct blkfront_info 84 { 85 spinlock_t io_lock; 86 struct mutex mutex; 87 struct xenbus_device *xbdev; 88 struct gendisk *gd; 89 int vdevice; 90 blkif_vdev_t handle; 91 enum blkif_state connected; 92 int ring_ref; 93 struct blkif_front_ring ring; 94 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 95 unsigned int evtchn, irq; 96 struct request_queue *rq; 97 struct work_struct work; 98 struct gnttab_free_callback callback; 99 struct blk_shadow shadow[BLK_RING_SIZE]; 100 unsigned long shadow_free; 101 unsigned int feature_flush; 102 unsigned int flush_op; 103 unsigned int feature_discard:1; 104 unsigned int feature_secdiscard:1; 105 unsigned int discard_granularity; 106 unsigned int discard_alignment; 107 int is_ready; 108 }; 109 110 static unsigned int nr_minors; 111 static unsigned long *minors; 112 static DEFINE_SPINLOCK(minor_lock); 113 114 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ 115 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) 116 #define GRANT_INVALID_REF 0 117 118 #define PARTS_PER_DISK 16 119 #define PARTS_PER_EXT_DISK 256 120 121 #define BLKIF_MAJOR(dev) ((dev)>>8) 122 #define BLKIF_MINOR(dev) ((dev) & 0xff) 123 124 #define EXT_SHIFT 28 125 #define EXTENDED (1<<EXT_SHIFT) 126 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) 127 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 128 #define EMULATED_HD_DISK_MINOR_OFFSET (0) 129 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) 130 #define EMULATED_SD_DISK_MINOR_OFFSET (0) 131 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256) 132 133 #define DEV_NAME "xvd" /* name in /dev */ 134 135 static int get_id_from_freelist(struct blkfront_info *info) 136 { 137 unsigned long free = info->shadow_free; 138 BUG_ON(free >= BLK_RING_SIZE); 139 info->shadow_free = info->shadow[free].req.u.rw.id; 140 info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ 141 return free; 142 } 143 144 static void add_id_to_freelist(struct blkfront_info *info, 145 unsigned long id) 146 { 147 info->shadow[id].req.u.rw.id = info->shadow_free; 148 info->shadow[id].request = NULL; 149 info->shadow_free = id; 150 } 151 152 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) 153 { 154 unsigned int end = minor + nr; 155 int rc; 156 157 if (end > nr_minors) { 158 unsigned long *bitmap, *old; 159 160 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap), 161 GFP_KERNEL); 162 if (bitmap == NULL) 163 return -ENOMEM; 164 165 spin_lock(&minor_lock); 166 if (end > nr_minors) { 167 old = minors; 168 memcpy(bitmap, minors, 169 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); 170 minors = bitmap; 171 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; 172 } else 173 old = bitmap; 174 spin_unlock(&minor_lock); 175 kfree(old); 176 } 177 178 spin_lock(&minor_lock); 179 if (find_next_bit(minors, end, minor) >= end) { 180 bitmap_set(minors, minor, nr); 181 rc = 0; 182 } else 183 rc = -EBUSY; 184 spin_unlock(&minor_lock); 185 186 return rc; 187 } 188 189 static void xlbd_release_minors(unsigned int minor, unsigned int nr) 190 { 191 unsigned int end = minor + nr; 192 193 BUG_ON(end > nr_minors); 194 spin_lock(&minor_lock); 195 bitmap_clear(minors, minor, nr); 196 spin_unlock(&minor_lock); 197 } 198 199 static void blkif_restart_queue_callback(void *arg) 200 { 201 struct blkfront_info *info = (struct blkfront_info *)arg; 202 schedule_work(&info->work); 203 } 204 205 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) 206 { 207 /* We don't have real geometry info, but let's at least return 208 values consistent with the size of the device */ 209 sector_t nsect = get_capacity(bd->bd_disk); 210 sector_t cylinders = nsect; 211 212 hg->heads = 0xff; 213 hg->sectors = 0x3f; 214 sector_div(cylinders, hg->heads * hg->sectors); 215 hg->cylinders = cylinders; 216 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) 217 hg->cylinders = 0xffff; 218 return 0; 219 } 220 221 static int blkif_ioctl(struct block_device *bdev, fmode_t mode, 222 unsigned command, unsigned long argument) 223 { 224 struct blkfront_info *info = bdev->bd_disk->private_data; 225 int i; 226 227 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", 228 command, (long)argument); 229 230 switch (command) { 231 case CDROMMULTISESSION: 232 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); 233 for (i = 0; i < sizeof(struct cdrom_multisession); i++) 234 if (put_user(0, (char __user *)(argument + i))) 235 return -EFAULT; 236 return 0; 237 238 case CDROM_GET_CAPABILITY: { 239 struct gendisk *gd = info->gd; 240 if (gd->flags & GENHD_FL_CD) 241 return 0; 242 return -EINVAL; 243 } 244 245 default: 246 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", 247 command);*/ 248 return -EINVAL; /* same return as native Linux */ 249 } 250 251 return 0; 252 } 253 254 /* 255 * Generate a Xen blkfront IO request from a blk layer request. Reads 256 * and writes are handled as expected. 257 * 258 * @req: a request struct 259 */ 260 static int blkif_queue_request(struct request *req) 261 { 262 struct blkfront_info *info = req->rq_disk->private_data; 263 unsigned long buffer_mfn; 264 struct blkif_request *ring_req; 265 unsigned long id; 266 unsigned int fsect, lsect; 267 int i, ref; 268 grant_ref_t gref_head; 269 struct scatterlist *sg; 270 271 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 272 return 1; 273 274 if (gnttab_alloc_grant_references( 275 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { 276 gnttab_request_free_callback( 277 &info->callback, 278 blkif_restart_queue_callback, 279 info, 280 BLKIF_MAX_SEGMENTS_PER_REQUEST); 281 return 1; 282 } 283 284 /* Fill out a communications ring structure. */ 285 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 286 id = get_id_from_freelist(info); 287 info->shadow[id].request = req; 288 289 ring_req->u.rw.id = id; 290 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); 291 ring_req->u.rw.handle = info->handle; 292 293 ring_req->operation = rq_data_dir(req) ? 294 BLKIF_OP_WRITE : BLKIF_OP_READ; 295 296 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 297 /* 298 * Ideally we can do an unordered flush-to-disk. In case the 299 * backend onlysupports barriers, use that. A barrier request 300 * a superset of FUA, so we can implement it the same 301 * way. (It's also a FLUSH+FUA, since it is 302 * guaranteed ordered WRT previous writes.) 303 */ 304 ring_req->operation = info->flush_op; 305 } 306 307 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { 308 /* id, sector_number and handle are set above. */ 309 ring_req->operation = BLKIF_OP_DISCARD; 310 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 311 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) 312 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; 313 else 314 ring_req->u.discard.flag = 0; 315 } else { 316 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, 317 info->sg); 318 BUG_ON(ring_req->u.rw.nr_segments > 319 BLKIF_MAX_SEGMENTS_PER_REQUEST); 320 321 for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) { 322 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); 323 fsect = sg->offset >> 9; 324 lsect = fsect + (sg->length >> 9) - 1; 325 /* install a grant reference. */ 326 ref = gnttab_claim_grant_reference(&gref_head); 327 BUG_ON(ref == -ENOSPC); 328 329 gnttab_grant_foreign_access_ref( 330 ref, 331 info->xbdev->otherend_id, 332 buffer_mfn, 333 rq_data_dir(req)); 334 335 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); 336 ring_req->u.rw.seg[i] = 337 (struct blkif_request_segment) { 338 .gref = ref, 339 .first_sect = fsect, 340 .last_sect = lsect }; 341 } 342 } 343 344 info->ring.req_prod_pvt++; 345 346 /* Keep a private copy so we can reissue requests when recovering. */ 347 info->shadow[id].req = *ring_req; 348 349 gnttab_free_grant_references(gref_head); 350 351 return 0; 352 } 353 354 355 static inline void flush_requests(struct blkfront_info *info) 356 { 357 int notify; 358 359 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); 360 361 if (notify) 362 notify_remote_via_irq(info->irq); 363 } 364 365 /* 366 * do_blkif_request 367 * read a block; request is in a request queue 368 */ 369 static void do_blkif_request(struct request_queue *rq) 370 { 371 struct blkfront_info *info = NULL; 372 struct request *req; 373 int queued; 374 375 pr_debug("Entered do_blkif_request\n"); 376 377 queued = 0; 378 379 while ((req = blk_peek_request(rq)) != NULL) { 380 info = req->rq_disk->private_data; 381 382 if (RING_FULL(&info->ring)) 383 goto wait; 384 385 blk_start_request(req); 386 387 if ((req->cmd_type != REQ_TYPE_FS) || 388 ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && 389 !info->flush_op)) { 390 __blk_end_request_all(req, -EIO); 391 continue; 392 } 393 394 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 395 "(%u/%u) buffer:%p [%s]\n", 396 req, req->cmd, (unsigned long)blk_rq_pos(req), 397 blk_rq_cur_sectors(req), blk_rq_sectors(req), 398 req->buffer, rq_data_dir(req) ? "write" : "read"); 399 400 if (blkif_queue_request(req)) { 401 blk_requeue_request(rq, req); 402 wait: 403 /* Avoid pointless unplugs. */ 404 blk_stop_queue(rq); 405 break; 406 } 407 408 queued++; 409 } 410 411 if (queued != 0) 412 flush_requests(info); 413 } 414 415 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 416 { 417 struct request_queue *rq; 418 struct blkfront_info *info = gd->private_data; 419 420 rq = blk_init_queue(do_blkif_request, &info->io_lock); 421 if (rq == NULL) 422 return -1; 423 424 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 425 426 if (info->feature_discard) { 427 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); 428 blk_queue_max_discard_sectors(rq, get_capacity(gd)); 429 rq->limits.discard_granularity = info->discard_granularity; 430 rq->limits.discard_alignment = info->discard_alignment; 431 if (info->feature_secdiscard) 432 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); 433 } 434 435 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 436 blk_queue_logical_block_size(rq, sector_size); 437 blk_queue_max_hw_sectors(rq, 512); 438 439 /* Each segment in a request is up to an aligned page in size. */ 440 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 441 blk_queue_max_segment_size(rq, PAGE_SIZE); 442 443 /* Ensure a merged request will fit in a single I/O ring slot. */ 444 blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 445 446 /* Make sure buffer addresses are sector-aligned. */ 447 blk_queue_dma_alignment(rq, 511); 448 449 /* Make sure we don't use bounce buffers. */ 450 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); 451 452 gd->queue = rq; 453 454 return 0; 455 } 456 457 458 static void xlvbd_flush(struct blkfront_info *info) 459 { 460 blk_queue_flush(info->rq, info->feature_flush); 461 printk(KERN_INFO "blkfront: %s: %s: %s\n", 462 info->gd->disk_name, 463 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 464 "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? 465 "flush diskcache" : "barrier or flush"), 466 info->feature_flush ? "enabled" : "disabled"); 467 } 468 469 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) 470 { 471 int major; 472 major = BLKIF_MAJOR(vdevice); 473 *minor = BLKIF_MINOR(vdevice); 474 switch (major) { 475 case XEN_IDE0_MAJOR: 476 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET; 477 *minor = ((*minor / 64) * PARTS_PER_DISK) + 478 EMULATED_HD_DISK_MINOR_OFFSET; 479 break; 480 case XEN_IDE1_MAJOR: 481 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET; 482 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) + 483 EMULATED_HD_DISK_MINOR_OFFSET; 484 break; 485 case XEN_SCSI_DISK0_MAJOR: 486 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET; 487 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET; 488 break; 489 case XEN_SCSI_DISK1_MAJOR: 490 case XEN_SCSI_DISK2_MAJOR: 491 case XEN_SCSI_DISK3_MAJOR: 492 case XEN_SCSI_DISK4_MAJOR: 493 case XEN_SCSI_DISK5_MAJOR: 494 case XEN_SCSI_DISK6_MAJOR: 495 case XEN_SCSI_DISK7_MAJOR: 496 *offset = (*minor / PARTS_PER_DISK) + 497 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) + 498 EMULATED_SD_DISK_NAME_OFFSET; 499 *minor = *minor + 500 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) + 501 EMULATED_SD_DISK_MINOR_OFFSET; 502 break; 503 case XEN_SCSI_DISK8_MAJOR: 504 case XEN_SCSI_DISK9_MAJOR: 505 case XEN_SCSI_DISK10_MAJOR: 506 case XEN_SCSI_DISK11_MAJOR: 507 case XEN_SCSI_DISK12_MAJOR: 508 case XEN_SCSI_DISK13_MAJOR: 509 case XEN_SCSI_DISK14_MAJOR: 510 case XEN_SCSI_DISK15_MAJOR: 511 *offset = (*minor / PARTS_PER_DISK) + 512 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) + 513 EMULATED_SD_DISK_NAME_OFFSET; 514 *minor = *minor + 515 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) + 516 EMULATED_SD_DISK_MINOR_OFFSET; 517 break; 518 case XENVBD_MAJOR: 519 *offset = *minor / PARTS_PER_DISK; 520 break; 521 default: 522 printk(KERN_WARNING "blkfront: your disk configuration is " 523 "incorrect, please use an xvd device instead\n"); 524 return -ENODEV; 525 } 526 return 0; 527 } 528 529 static char *encode_disk_name(char *ptr, unsigned int n) 530 { 531 if (n >= 26) 532 ptr = encode_disk_name(ptr, n / 26 - 1); 533 *ptr = 'a' + n % 26; 534 return ptr + 1; 535 } 536 537 static int xlvbd_alloc_gendisk(blkif_sector_t capacity, 538 struct blkfront_info *info, 539 u16 vdisk_info, u16 sector_size) 540 { 541 struct gendisk *gd; 542 int nr_minors = 1; 543 int err; 544 unsigned int offset; 545 int minor; 546 int nr_parts; 547 char *ptr; 548 549 BUG_ON(info->gd != NULL); 550 BUG_ON(info->rq != NULL); 551 552 if ((info->vdevice>>EXT_SHIFT) > 1) { 553 /* this is above the extended range; something is wrong */ 554 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); 555 return -ENODEV; 556 } 557 558 if (!VDEV_IS_EXTENDED(info->vdevice)) { 559 err = xen_translate_vdev(info->vdevice, &minor, &offset); 560 if (err) 561 return err; 562 nr_parts = PARTS_PER_DISK; 563 } else { 564 minor = BLKIF_MINOR_EXT(info->vdevice); 565 nr_parts = PARTS_PER_EXT_DISK; 566 offset = minor / nr_parts; 567 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) 568 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " 569 "emulated IDE disks,\n\t choose an xvd device name" 570 "from xvde on\n", info->vdevice); 571 } 572 if (minor >> MINORBITS) { 573 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n", 574 info->vdevice, minor); 575 return -ENODEV; 576 } 577 578 if ((minor % nr_parts) == 0) 579 nr_minors = nr_parts; 580 581 err = xlbd_reserve_minors(minor, nr_minors); 582 if (err) 583 goto out; 584 err = -ENODEV; 585 586 gd = alloc_disk(nr_minors); 587 if (gd == NULL) 588 goto release; 589 590 strcpy(gd->disk_name, DEV_NAME); 591 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); 592 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN); 593 if (nr_minors > 1) 594 *ptr = 0; 595 else 596 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr, 597 "%d", minor & (nr_parts - 1)); 598 599 gd->major = XENVBD_MAJOR; 600 gd->first_minor = minor; 601 gd->fops = &xlvbd_block_fops; 602 gd->private_data = info; 603 gd->driverfs_dev = &(info->xbdev->dev); 604 set_capacity(gd, capacity); 605 606 if (xlvbd_init_blk_queue(gd, sector_size)) { 607 del_gendisk(gd); 608 goto release; 609 } 610 611 info->rq = gd->queue; 612 info->gd = gd; 613 614 xlvbd_flush(info); 615 616 if (vdisk_info & VDISK_READONLY) 617 set_disk_ro(gd, 1); 618 619 if (vdisk_info & VDISK_REMOVABLE) 620 gd->flags |= GENHD_FL_REMOVABLE; 621 622 if (vdisk_info & VDISK_CDROM) 623 gd->flags |= GENHD_FL_CD; 624 625 return 0; 626 627 release: 628 xlbd_release_minors(minor, nr_minors); 629 out: 630 return err; 631 } 632 633 static void xlvbd_release_gendisk(struct blkfront_info *info) 634 { 635 unsigned int minor, nr_minors; 636 unsigned long flags; 637 638 if (info->rq == NULL) 639 return; 640 641 spin_lock_irqsave(&info->io_lock, flags); 642 643 /* No more blkif_request(). */ 644 blk_stop_queue(info->rq); 645 646 /* No more gnttab callback work. */ 647 gnttab_cancel_free_callback(&info->callback); 648 spin_unlock_irqrestore(&info->io_lock, flags); 649 650 /* Flush gnttab callback work. Must be done with no locks held. */ 651 flush_work_sync(&info->work); 652 653 del_gendisk(info->gd); 654 655 minor = info->gd->first_minor; 656 nr_minors = info->gd->minors; 657 xlbd_release_minors(minor, nr_minors); 658 659 blk_cleanup_queue(info->rq); 660 info->rq = NULL; 661 662 put_disk(info->gd); 663 info->gd = NULL; 664 } 665 666 static void kick_pending_request_queues(struct blkfront_info *info) 667 { 668 if (!RING_FULL(&info->ring)) { 669 /* Re-enable calldowns. */ 670 blk_start_queue(info->rq); 671 /* Kick things off immediately. */ 672 do_blkif_request(info->rq); 673 } 674 } 675 676 static void blkif_restart_queue(struct work_struct *work) 677 { 678 struct blkfront_info *info = container_of(work, struct blkfront_info, work); 679 680 spin_lock_irq(&info->io_lock); 681 if (info->connected == BLKIF_STATE_CONNECTED) 682 kick_pending_request_queues(info); 683 spin_unlock_irq(&info->io_lock); 684 } 685 686 static void blkif_free(struct blkfront_info *info, int suspend) 687 { 688 /* Prevent new requests being issued until we fix things up. */ 689 spin_lock_irq(&info->io_lock); 690 info->connected = suspend ? 691 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; 692 /* No more blkif_request(). */ 693 if (info->rq) 694 blk_stop_queue(info->rq); 695 /* No more gnttab callback work. */ 696 gnttab_cancel_free_callback(&info->callback); 697 spin_unlock_irq(&info->io_lock); 698 699 /* Flush gnttab callback work. Must be done with no locks held. */ 700 flush_work_sync(&info->work); 701 702 /* Free resources associated with old device channel. */ 703 if (info->ring_ref != GRANT_INVALID_REF) { 704 gnttab_end_foreign_access(info->ring_ref, 0, 705 (unsigned long)info->ring.sring); 706 info->ring_ref = GRANT_INVALID_REF; 707 info->ring.sring = NULL; 708 } 709 if (info->irq) 710 unbind_from_irqhandler(info->irq, info); 711 info->evtchn = info->irq = 0; 712 713 } 714 715 static void blkif_completion(struct blk_shadow *s) 716 { 717 int i; 718 /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place 719 * flag. */ 720 for (i = 0; i < s->req.u.rw.nr_segments; i++) 721 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL); 722 } 723 724 static irqreturn_t blkif_interrupt(int irq, void *dev_id) 725 { 726 struct request *req; 727 struct blkif_response *bret; 728 RING_IDX i, rp; 729 unsigned long flags; 730 struct blkfront_info *info = (struct blkfront_info *)dev_id; 731 int error; 732 733 spin_lock_irqsave(&info->io_lock, flags); 734 735 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { 736 spin_unlock_irqrestore(&info->io_lock, flags); 737 return IRQ_HANDLED; 738 } 739 740 again: 741 rp = info->ring.sring->rsp_prod; 742 rmb(); /* Ensure we see queued responses up to 'rp'. */ 743 744 for (i = info->ring.rsp_cons; i != rp; i++) { 745 unsigned long id; 746 747 bret = RING_GET_RESPONSE(&info->ring, i); 748 id = bret->id; 749 req = info->shadow[id].request; 750 751 if (bret->operation != BLKIF_OP_DISCARD) 752 blkif_completion(&info->shadow[id]); 753 754 add_id_to_freelist(info, id); 755 756 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 757 switch (bret->operation) { 758 case BLKIF_OP_DISCARD: 759 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 760 struct request_queue *rq = info->rq; 761 printk(KERN_WARNING "blkfront: %s: discard op failed\n", 762 info->gd->disk_name); 763 error = -EOPNOTSUPP; 764 info->feature_discard = 0; 765 info->feature_secdiscard = 0; 766 queue_flag_clear(QUEUE_FLAG_DISCARD, rq); 767 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); 768 } 769 __blk_end_request_all(req, error); 770 break; 771 case BLKIF_OP_FLUSH_DISKCACHE: 772 case BLKIF_OP_WRITE_BARRIER: 773 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 774 printk(KERN_WARNING "blkfront: %s: write %s op failed\n", 775 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 776 "barrier" : "flush disk cache", 777 info->gd->disk_name); 778 error = -EOPNOTSUPP; 779 } 780 if (unlikely(bret->status == BLKIF_RSP_ERROR && 781 info->shadow[id].req.u.rw.nr_segments == 0)) { 782 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", 783 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 784 "barrier" : "flush disk cache", 785 info->gd->disk_name); 786 error = -EOPNOTSUPP; 787 } 788 if (unlikely(error)) { 789 if (error == -EOPNOTSUPP) 790 error = 0; 791 info->feature_flush = 0; 792 info->flush_op = 0; 793 xlvbd_flush(info); 794 } 795 /* fall through */ 796 case BLKIF_OP_READ: 797 case BLKIF_OP_WRITE: 798 if (unlikely(bret->status != BLKIF_RSP_OKAY)) 799 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 800 "request: %x\n", bret->status); 801 802 __blk_end_request_all(req, error); 803 break; 804 default: 805 BUG(); 806 } 807 } 808 809 info->ring.rsp_cons = i; 810 811 if (i != info->ring.req_prod_pvt) { 812 int more_to_do; 813 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); 814 if (more_to_do) 815 goto again; 816 } else 817 info->ring.sring->rsp_event = i + 1; 818 819 kick_pending_request_queues(info); 820 821 spin_unlock_irqrestore(&info->io_lock, flags); 822 823 return IRQ_HANDLED; 824 } 825 826 827 static int setup_blkring(struct xenbus_device *dev, 828 struct blkfront_info *info) 829 { 830 struct blkif_sring *sring; 831 int err; 832 833 info->ring_ref = GRANT_INVALID_REF; 834 835 sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); 836 if (!sring) { 837 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 838 return -ENOMEM; 839 } 840 SHARED_RING_INIT(sring); 841 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 842 843 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 844 845 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 846 if (err < 0) { 847 free_page((unsigned long)sring); 848 info->ring.sring = NULL; 849 goto fail; 850 } 851 info->ring_ref = err; 852 853 err = xenbus_alloc_evtchn(dev, &info->evtchn); 854 if (err) 855 goto fail; 856 857 err = bind_evtchn_to_irqhandler(info->evtchn, 858 blkif_interrupt, 859 IRQF_SAMPLE_RANDOM, "blkif", info); 860 if (err <= 0) { 861 xenbus_dev_fatal(dev, err, 862 "bind_evtchn_to_irqhandler failed"); 863 goto fail; 864 } 865 info->irq = err; 866 867 return 0; 868 fail: 869 blkif_free(info, 0); 870 return err; 871 } 872 873 874 /* Common code used when first setting up, and when resuming. */ 875 static int talk_to_blkback(struct xenbus_device *dev, 876 struct blkfront_info *info) 877 { 878 const char *message = NULL; 879 struct xenbus_transaction xbt; 880 int err; 881 882 /* Create shared ring, alloc event channel. */ 883 err = setup_blkring(dev, info); 884 if (err) 885 goto out; 886 887 again: 888 err = xenbus_transaction_start(&xbt); 889 if (err) { 890 xenbus_dev_fatal(dev, err, "starting transaction"); 891 goto destroy_blkring; 892 } 893 894 err = xenbus_printf(xbt, dev->nodename, 895 "ring-ref", "%u", info->ring_ref); 896 if (err) { 897 message = "writing ring-ref"; 898 goto abort_transaction; 899 } 900 err = xenbus_printf(xbt, dev->nodename, 901 "event-channel", "%u", info->evtchn); 902 if (err) { 903 message = "writing event-channel"; 904 goto abort_transaction; 905 } 906 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", 907 XEN_IO_PROTO_ABI_NATIVE); 908 if (err) { 909 message = "writing protocol"; 910 goto abort_transaction; 911 } 912 913 err = xenbus_transaction_end(xbt, 0); 914 if (err) { 915 if (err == -EAGAIN) 916 goto again; 917 xenbus_dev_fatal(dev, err, "completing transaction"); 918 goto destroy_blkring; 919 } 920 921 xenbus_switch_state(dev, XenbusStateInitialised); 922 923 return 0; 924 925 abort_transaction: 926 xenbus_transaction_end(xbt, 1); 927 if (message) 928 xenbus_dev_fatal(dev, err, "%s", message); 929 destroy_blkring: 930 blkif_free(info, 0); 931 out: 932 return err; 933 } 934 935 /** 936 * Entry point to this code when a new device is created. Allocate the basic 937 * structures and the ring buffer for communication with the backend, and 938 * inform the backend of the appropriate details for those. Switch to 939 * Initialised state. 940 */ 941 static int blkfront_probe(struct xenbus_device *dev, 942 const struct xenbus_device_id *id) 943 { 944 int err, vdevice, i; 945 struct blkfront_info *info; 946 947 /* FIXME: Use dynamic device id if this is not set. */ 948 err = xenbus_scanf(XBT_NIL, dev->nodename, 949 "virtual-device", "%i", &vdevice); 950 if (err != 1) { 951 /* go looking in the extended area instead */ 952 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", 953 "%i", &vdevice); 954 if (err != 1) { 955 xenbus_dev_fatal(dev, err, "reading virtual-device"); 956 return err; 957 } 958 } 959 960 if (xen_hvm_domain()) { 961 char *type; 962 int len; 963 /* no unplug has been done: do not hook devices != xen vbds */ 964 if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { 965 int major; 966 967 if (!VDEV_IS_EXTENDED(vdevice)) 968 major = BLKIF_MAJOR(vdevice); 969 else 970 major = XENVBD_MAJOR; 971 972 if (major != XENVBD_MAJOR) { 973 printk(KERN_INFO 974 "%s: HVM does not support vbd %d as xen block device\n", 975 __FUNCTION__, vdevice); 976 return -ENODEV; 977 } 978 } 979 /* do not create a PV cdrom device if we are an HVM guest */ 980 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); 981 if (IS_ERR(type)) 982 return -ENODEV; 983 if (strncmp(type, "cdrom", 5) == 0) { 984 kfree(type); 985 return -ENODEV; 986 } 987 kfree(type); 988 } 989 info = kzalloc(sizeof(*info), GFP_KERNEL); 990 if (!info) { 991 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 992 return -ENOMEM; 993 } 994 995 mutex_init(&info->mutex); 996 spin_lock_init(&info->io_lock); 997 info->xbdev = dev; 998 info->vdevice = vdevice; 999 info->connected = BLKIF_STATE_DISCONNECTED; 1000 INIT_WORK(&info->work, blkif_restart_queue); 1001 1002 for (i = 0; i < BLK_RING_SIZE; i++) 1003 info->shadow[i].req.u.rw.id = i+1; 1004 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; 1005 1006 /* Front end dir is a number, which is used as the id. */ 1007 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 1008 dev_set_drvdata(&dev->dev, info); 1009 1010 err = talk_to_blkback(dev, info); 1011 if (err) { 1012 kfree(info); 1013 dev_set_drvdata(&dev->dev, NULL); 1014 return err; 1015 } 1016 1017 return 0; 1018 } 1019 1020 1021 static int blkif_recover(struct blkfront_info *info) 1022 { 1023 int i; 1024 struct blkif_request *req; 1025 struct blk_shadow *copy; 1026 int j; 1027 1028 /* Stage 1: Make a safe copy of the shadow state. */ 1029 copy = kmalloc(sizeof(info->shadow), 1030 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); 1031 if (!copy) 1032 return -ENOMEM; 1033 memcpy(copy, info->shadow, sizeof(info->shadow)); 1034 1035 /* Stage 2: Set up free list. */ 1036 memset(&info->shadow, 0, sizeof(info->shadow)); 1037 for (i = 0; i < BLK_RING_SIZE; i++) 1038 info->shadow[i].req.u.rw.id = i+1; 1039 info->shadow_free = info->ring.req_prod_pvt; 1040 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; 1041 1042 /* Stage 3: Find pending requests and requeue them. */ 1043 for (i = 0; i < BLK_RING_SIZE; i++) { 1044 /* Not in use? */ 1045 if (!copy[i].request) 1046 continue; 1047 1048 /* Grab a request slot and copy shadow state into it. */ 1049 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 1050 *req = copy[i].req; 1051 1052 /* We get a new request id, and must reset the shadow state. */ 1053 req->u.rw.id = get_id_from_freelist(info); 1054 memcpy(&info->shadow[req->u.rw.id], ©[i], sizeof(copy[i])); 1055 1056 if (req->operation != BLKIF_OP_DISCARD) { 1057 /* Rewrite any grant references invalidated by susp/resume. */ 1058 for (j = 0; j < req->u.rw.nr_segments; j++) 1059 gnttab_grant_foreign_access_ref( 1060 req->u.rw.seg[j].gref, 1061 info->xbdev->otherend_id, 1062 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), 1063 rq_data_dir(info->shadow[req->u.rw.id].request)); 1064 } 1065 info->shadow[req->u.rw.id].req = *req; 1066 1067 info->ring.req_prod_pvt++; 1068 } 1069 1070 kfree(copy); 1071 1072 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1073 1074 spin_lock_irq(&info->io_lock); 1075 1076 /* Now safe for us to use the shared ring */ 1077 info->connected = BLKIF_STATE_CONNECTED; 1078 1079 /* Send off requeued requests */ 1080 flush_requests(info); 1081 1082 /* Kick any other new requests queued since we resumed */ 1083 kick_pending_request_queues(info); 1084 1085 spin_unlock_irq(&info->io_lock); 1086 1087 return 0; 1088 } 1089 1090 /** 1091 * We are reconnecting to the backend, due to a suspend/resume, or a backend 1092 * driver restart. We tear down our blkif structure and recreate it, but 1093 * leave the device-layer structures intact so that this is transparent to the 1094 * rest of the kernel. 1095 */ 1096 static int blkfront_resume(struct xenbus_device *dev) 1097 { 1098 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1099 int err; 1100 1101 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 1102 1103 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 1104 1105 err = talk_to_blkback(dev, info); 1106 if (info->connected == BLKIF_STATE_SUSPENDED && !err) 1107 err = blkif_recover(info); 1108 1109 return err; 1110 } 1111 1112 static void 1113 blkfront_closing(struct blkfront_info *info) 1114 { 1115 struct xenbus_device *xbdev = info->xbdev; 1116 struct block_device *bdev = NULL; 1117 1118 mutex_lock(&info->mutex); 1119 1120 if (xbdev->state == XenbusStateClosing) { 1121 mutex_unlock(&info->mutex); 1122 return; 1123 } 1124 1125 if (info->gd) 1126 bdev = bdget_disk(info->gd, 0); 1127 1128 mutex_unlock(&info->mutex); 1129 1130 if (!bdev) { 1131 xenbus_frontend_closed(xbdev); 1132 return; 1133 } 1134 1135 mutex_lock(&bdev->bd_mutex); 1136 1137 if (bdev->bd_openers) { 1138 xenbus_dev_error(xbdev, -EBUSY, 1139 "Device in use; refusing to close"); 1140 xenbus_switch_state(xbdev, XenbusStateClosing); 1141 } else { 1142 xlvbd_release_gendisk(info); 1143 xenbus_frontend_closed(xbdev); 1144 } 1145 1146 mutex_unlock(&bdev->bd_mutex); 1147 bdput(bdev); 1148 } 1149 1150 static void blkfront_setup_discard(struct blkfront_info *info) 1151 { 1152 int err; 1153 char *type; 1154 unsigned int discard_granularity; 1155 unsigned int discard_alignment; 1156 unsigned int discard_secure; 1157 1158 type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); 1159 if (IS_ERR(type)) 1160 return; 1161 1162 info->feature_secdiscard = 0; 1163 if (strncmp(type, "phy", 3) == 0) { 1164 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1165 "discard-granularity", "%u", &discard_granularity, 1166 "discard-alignment", "%u", &discard_alignment, 1167 NULL); 1168 if (!err) { 1169 info->feature_discard = 1; 1170 info->discard_granularity = discard_granularity; 1171 info->discard_alignment = discard_alignment; 1172 } 1173 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1174 "discard-secure", "%d", &discard_secure, 1175 NULL); 1176 if (!err) 1177 info->feature_secdiscard = discard_secure; 1178 1179 } else if (strncmp(type, "file", 4) == 0) 1180 info->feature_discard = 1; 1181 1182 kfree(type); 1183 } 1184 1185 /* 1186 * Invoked when the backend is finally 'ready' (and has told produced 1187 * the details about the physical device - #sectors, size, etc). 1188 */ 1189 static void blkfront_connect(struct blkfront_info *info) 1190 { 1191 unsigned long long sectors; 1192 unsigned long sector_size; 1193 unsigned int binfo; 1194 int err; 1195 int barrier, flush, discard; 1196 1197 switch (info->connected) { 1198 case BLKIF_STATE_CONNECTED: 1199 /* 1200 * Potentially, the back-end may be signalling 1201 * a capacity change; update the capacity. 1202 */ 1203 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1204 "sectors", "%Lu", §ors); 1205 if (XENBUS_EXIST_ERR(err)) 1206 return; 1207 printk(KERN_INFO "Setting capacity to %Lu\n", 1208 sectors); 1209 set_capacity(info->gd, sectors); 1210 revalidate_disk(info->gd); 1211 1212 /* fall through */ 1213 case BLKIF_STATE_SUSPENDED: 1214 return; 1215 1216 default: 1217 break; 1218 } 1219 1220 dev_dbg(&info->xbdev->dev, "%s:%s.\n", 1221 __func__, info->xbdev->otherend); 1222 1223 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1224 "sectors", "%llu", §ors, 1225 "info", "%u", &binfo, 1226 "sector-size", "%lu", §or_size, 1227 NULL); 1228 if (err) { 1229 xenbus_dev_fatal(info->xbdev, err, 1230 "reading backend fields at %s", 1231 info->xbdev->otherend); 1232 return; 1233 } 1234 1235 info->feature_flush = 0; 1236 info->flush_op = 0; 1237 1238 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1239 "feature-barrier", "%d", &barrier, 1240 NULL); 1241 1242 /* 1243 * If there's no "feature-barrier" defined, then it means 1244 * we're dealing with a very old backend which writes 1245 * synchronously; nothing to do. 1246 * 1247 * If there are barriers, then we use flush. 1248 */ 1249 if (!err && barrier) { 1250 info->feature_flush = REQ_FLUSH | REQ_FUA; 1251 info->flush_op = BLKIF_OP_WRITE_BARRIER; 1252 } 1253 /* 1254 * And if there is "feature-flush-cache" use that above 1255 * barriers. 1256 */ 1257 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1258 "feature-flush-cache", "%d", &flush, 1259 NULL); 1260 1261 if (!err && flush) { 1262 info->feature_flush = REQ_FLUSH; 1263 info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; 1264 } 1265 1266 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1267 "feature-discard", "%d", &discard, 1268 NULL); 1269 1270 if (!err && discard) 1271 blkfront_setup_discard(info); 1272 1273 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1274 if (err) { 1275 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 1276 info->xbdev->otherend); 1277 return; 1278 } 1279 1280 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1281 1282 /* Kick pending requests. */ 1283 spin_lock_irq(&info->io_lock); 1284 info->connected = BLKIF_STATE_CONNECTED; 1285 kick_pending_request_queues(info); 1286 spin_unlock_irq(&info->io_lock); 1287 1288 add_disk(info->gd); 1289 1290 info->is_ready = 1; 1291 } 1292 1293 /** 1294 * Callback received when the backend's state changes. 1295 */ 1296 static void blkback_changed(struct xenbus_device *dev, 1297 enum xenbus_state backend_state) 1298 { 1299 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1300 1301 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); 1302 1303 switch (backend_state) { 1304 case XenbusStateInitialising: 1305 case XenbusStateInitWait: 1306 case XenbusStateInitialised: 1307 case XenbusStateReconfiguring: 1308 case XenbusStateReconfigured: 1309 case XenbusStateUnknown: 1310 case XenbusStateClosed: 1311 break; 1312 1313 case XenbusStateConnected: 1314 blkfront_connect(info); 1315 break; 1316 1317 case XenbusStateClosing: 1318 blkfront_closing(info); 1319 break; 1320 } 1321 } 1322 1323 static int blkfront_remove(struct xenbus_device *xbdev) 1324 { 1325 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); 1326 struct block_device *bdev = NULL; 1327 struct gendisk *disk; 1328 1329 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); 1330 1331 blkif_free(info, 0); 1332 1333 mutex_lock(&info->mutex); 1334 1335 disk = info->gd; 1336 if (disk) 1337 bdev = bdget_disk(disk, 0); 1338 1339 info->xbdev = NULL; 1340 mutex_unlock(&info->mutex); 1341 1342 if (!bdev) { 1343 kfree(info); 1344 return 0; 1345 } 1346 1347 /* 1348 * The xbdev was removed before we reached the Closed 1349 * state. See if it's safe to remove the disk. If the bdev 1350 * isn't closed yet, we let release take care of it. 1351 */ 1352 1353 mutex_lock(&bdev->bd_mutex); 1354 info = disk->private_data; 1355 1356 dev_warn(disk_to_dev(disk), 1357 "%s was hot-unplugged, %d stale handles\n", 1358 xbdev->nodename, bdev->bd_openers); 1359 1360 if (info && !bdev->bd_openers) { 1361 xlvbd_release_gendisk(info); 1362 disk->private_data = NULL; 1363 kfree(info); 1364 } 1365 1366 mutex_unlock(&bdev->bd_mutex); 1367 bdput(bdev); 1368 1369 return 0; 1370 } 1371 1372 static int blkfront_is_ready(struct xenbus_device *dev) 1373 { 1374 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1375 1376 return info->is_ready && info->xbdev; 1377 } 1378 1379 static int blkif_open(struct block_device *bdev, fmode_t mode) 1380 { 1381 struct gendisk *disk = bdev->bd_disk; 1382 struct blkfront_info *info; 1383 int err = 0; 1384 1385 mutex_lock(&blkfront_mutex); 1386 1387 info = disk->private_data; 1388 if (!info) { 1389 /* xbdev gone */ 1390 err = -ERESTARTSYS; 1391 goto out; 1392 } 1393 1394 mutex_lock(&info->mutex); 1395 1396 if (!info->gd) 1397 /* xbdev is closed */ 1398 err = -ERESTARTSYS; 1399 1400 mutex_unlock(&info->mutex); 1401 1402 out: 1403 mutex_unlock(&blkfront_mutex); 1404 return err; 1405 } 1406 1407 static int blkif_release(struct gendisk *disk, fmode_t mode) 1408 { 1409 struct blkfront_info *info = disk->private_data; 1410 struct block_device *bdev; 1411 struct xenbus_device *xbdev; 1412 1413 mutex_lock(&blkfront_mutex); 1414 1415 bdev = bdget_disk(disk, 0); 1416 1417 if (bdev->bd_openers) 1418 goto out; 1419 1420 /* 1421 * Check if we have been instructed to close. We will have 1422 * deferred this request, because the bdev was still open. 1423 */ 1424 1425 mutex_lock(&info->mutex); 1426 xbdev = info->xbdev; 1427 1428 if (xbdev && xbdev->state == XenbusStateClosing) { 1429 /* pending switch to state closed */ 1430 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 1431 xlvbd_release_gendisk(info); 1432 xenbus_frontend_closed(info->xbdev); 1433 } 1434 1435 mutex_unlock(&info->mutex); 1436 1437 if (!xbdev) { 1438 /* sudden device removal */ 1439 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 1440 xlvbd_release_gendisk(info); 1441 disk->private_data = NULL; 1442 kfree(info); 1443 } 1444 1445 out: 1446 bdput(bdev); 1447 mutex_unlock(&blkfront_mutex); 1448 return 0; 1449 } 1450 1451 static const struct block_device_operations xlvbd_block_fops = 1452 { 1453 .owner = THIS_MODULE, 1454 .open = blkif_open, 1455 .release = blkif_release, 1456 .getgeo = blkif_getgeo, 1457 .ioctl = blkif_ioctl, 1458 }; 1459 1460 1461 static const struct xenbus_device_id blkfront_ids[] = { 1462 { "vbd" }, 1463 { "" } 1464 }; 1465 1466 static DEFINE_XENBUS_DRIVER(blkfront, , 1467 .probe = blkfront_probe, 1468 .remove = blkfront_remove, 1469 .resume = blkfront_resume, 1470 .otherend_changed = blkback_changed, 1471 .is_ready = blkfront_is_ready, 1472 ); 1473 1474 static int __init xlblk_init(void) 1475 { 1476 int ret; 1477 1478 if (!xen_domain()) 1479 return -ENODEV; 1480 1481 if (xen_hvm_domain() && !xen_platform_pci_unplug) 1482 return -ENODEV; 1483 1484 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { 1485 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", 1486 XENVBD_MAJOR, DEV_NAME); 1487 return -ENODEV; 1488 } 1489 1490 ret = xenbus_register_frontend(&blkfront_driver); 1491 if (ret) { 1492 unregister_blkdev(XENVBD_MAJOR, DEV_NAME); 1493 return ret; 1494 } 1495 1496 return 0; 1497 } 1498 module_init(xlblk_init); 1499 1500 1501 static void __exit xlblk_exit(void) 1502 { 1503 xenbus_unregister_driver(&blkfront_driver); 1504 unregister_blkdev(XENVBD_MAJOR, DEV_NAME); 1505 kfree(minors); 1506 } 1507 module_exit(xlblk_exit); 1508 1509 MODULE_DESCRIPTION("Xen virtual block device frontend"); 1510 MODULE_LICENSE("GPL"); 1511 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); 1512 MODULE_ALIAS("xen:vbd"); 1513 MODULE_ALIAS("xenblk"); 1514