1 /* 2 * XenBSD block device driver 3 * 4 * Copyright (c) 2009 Scott Long, Yahoo! 5 * Copyright (c) 2009 Frank Suchomel, Citrix 6 * Copyright (c) 2009 Doug F. Rabson, Citrix 7 * Copyright (c) 2005 Kip Macy 8 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 9 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 10 * 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a copy 13 * of this software and associated documentation files (the "Software"), to 14 * deal in the Software without restriction, including without limitation the 15 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 16 * sell copies of the Software, and to permit persons to whom the Software is 17 * furnished to do so, subject to the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included in 20 * all copies or substantial portions of the Software. 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 27 * DEALINGS IN THE SOFTWARE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <vm/vm.h> 38 #include <vm/pmap.h> 39 40 #include <sys/bio.h> 41 #include <sys/bus.h> 42 #include <sys/conf.h> 43 #include <sys/module.h> 44 #include <sys/sysctl.h> 45 46 #include <machine/bus.h> 47 #include <sys/rman.h> 48 #include <machine/resource.h> 49 #include <machine/intr_machdep.h> 50 #include <machine/vmparam.h> 51 #include <sys/bus_dma.h> 52 53 #include <machine/_inttypes.h> 54 #include <machine/xen/xen-os.h> 55 #include <machine/xen/xenvar.h> 56 #include <machine/xen/xenfunc.h> 57 58 #include <xen/hypervisor.h> 59 #include <xen/xen_intr.h> 60 #include <xen/evtchn.h> 61 #include <xen/gnttab.h> 62 #include <xen/interface/grant_table.h> 63 #include <xen/interface/io/protocols.h> 64 #include <xen/xenbus/xenbusvar.h> 65 66 #include <geom/geom_disk.h> 67 68 #include <dev/xen/blkfront/block.h> 69 70 #include "xenbus_if.h" 71 72 /* prototypes */ 73 static void xb_free_command(struct xb_command *cm); 74 static void xb_startio(struct xb_softc *sc); 75 static void blkfront_connect(struct xb_softc *); 76 static void blkfront_closing(device_t); 77 static int blkfront_detach(device_t); 78 static int setup_blkring(struct xb_softc *); 79 static void blkif_int(void *); 80 static void blkfront_initialize(struct xb_softc *); 81 static int blkif_completion(struct xb_command *); 82 static void blkif_free(struct xb_softc *); 83 static void blkif_queue_cb(void *, bus_dma_segment_t *, int, int); 84 85 static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data"); 86 87 #define GRANT_INVALID_REF 0 88 89 /* Control whether runtime update of vbds is enabled. */ 90 #define ENABLE_VBD_UPDATE 0 91 92 #if ENABLE_VBD_UPDATE 93 static void vbd_update(void); 94 #endif 95 96 #define BLKIF_STATE_DISCONNECTED 0 97 #define BLKIF_STATE_CONNECTED 1 98 #define BLKIF_STATE_SUSPENDED 2 99 100 #ifdef notyet 101 static char *blkif_state_name[] = { 102 [BLKIF_STATE_DISCONNECTED] = "disconnected", 103 [BLKIF_STATE_CONNECTED] = "connected", 104 [BLKIF_STATE_SUSPENDED] = "closed", 105 }; 106 107 static char * blkif_status_name[] = { 108 [BLKIF_INTERFACE_STATUS_CLOSED] = "closed", 109 [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected", 110 [BLKIF_INTERFACE_STATUS_CONNECTED] = "connected", 111 [BLKIF_INTERFACE_STATUS_CHANGED] = "changed", 112 }; 113 #endif 114 115 #if 0 116 #define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args) 117 #else 118 #define DPRINTK(fmt, args...) 119 #endif 120 121 static int blkif_open(struct disk *dp); 122 static int blkif_close(struct disk *dp); 123 static int blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td); 124 static int blkif_queue_request(struct xb_softc *sc, struct xb_command *cm); 125 static void xb_strategy(struct bio *bp); 126 127 // In order to quiesce the device during kernel dumps, outstanding requests to 128 // DOM0 for disk reads/writes need to be accounted for. 129 static int xb_dump(void *, void *, vm_offset_t, off_t, size_t); 130 131 /* XXX move to xb_vbd.c when VBD update support is added */ 132 #define MAX_VBDS 64 133 134 #define XBD_SECTOR_SIZE 512 /* XXX: assume for now */ 135 #define XBD_SECTOR_SHFT 9 136 137 /* 138 * Translate Linux major/minor to an appropriate name and unit 139 * number. For HVM guests, this allows us to use the same drive names 140 * with blkfront as the emulated drives, easing transition slightly. 141 */ 142 static void 143 blkfront_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name) 144 { 145 static struct vdev_info { 146 int major; 147 int shift; 148 int base; 149 const char *name; 150 } info[] = { 151 {3, 6, 0, "ad"}, /* ide0 */ 152 {22, 6, 2, "ad"}, /* ide1 */ 153 {33, 6, 4, "ad"}, /* ide2 */ 154 {34, 6, 6, "ad"}, /* ide3 */ 155 {56, 6, 8, "ad"}, /* ide4 */ 156 {57, 6, 10, "ad"}, /* ide5 */ 157 {88, 6, 12, "ad"}, /* ide6 */ 158 {89, 6, 14, "ad"}, /* ide7 */ 159 {90, 6, 16, "ad"}, /* ide8 */ 160 {91, 6, 18, "ad"}, /* ide9 */ 161 162 {8, 4, 0, "da"}, /* scsi disk0 */ 163 {65, 4, 16, "da"}, /* scsi disk1 */ 164 {66, 4, 32, "da"}, /* scsi disk2 */ 165 {67, 4, 48, "da"}, /* scsi disk3 */ 166 {68, 4, 64, "da"}, /* scsi disk4 */ 167 {69, 4, 80, "da"}, /* scsi disk5 */ 168 {70, 4, 96, "da"}, /* scsi disk6 */ 169 {71, 4, 112, "da"}, /* scsi disk7 */ 170 {128, 4, 128, "da"}, /* scsi disk8 */ 171 {129, 4, 144, "da"}, /* scsi disk9 */ 172 {130, 4, 160, "da"}, /* scsi disk10 */ 173 {131, 4, 176, "da"}, /* scsi disk11 */ 174 {132, 4, 192, "da"}, /* scsi disk12 */ 175 {133, 4, 208, "da"}, /* scsi disk13 */ 176 {134, 4, 224, "da"}, /* scsi disk14 */ 177 {135, 4, 240, "da"}, /* scsi disk15 */ 178 179 {202, 4, 0, "xbd"}, /* xbd */ 180 181 {0, 0, 0, NULL}, 182 }; 183 int major = vdevice >> 8; 184 int minor = vdevice & 0xff; 185 int i; 186 187 if (vdevice & (1 << 28)) { 188 *unit = (vdevice & ((1 << 28) - 1)) >> 8; 189 *name = "xbd"; 190 return; 191 } 192 193 for (i = 0; info[i].major; i++) { 194 if (info[i].major == major) { 195 *unit = info[i].base + (minor >> info[i].shift); 196 *name = info[i].name; 197 return; 198 } 199 } 200 201 *unit = minor >> 4; 202 *name = "xbd"; 203 } 204 205 int 206 xlvbd_add(struct xb_softc *sc, blkif_sector_t sectors, 207 int vdevice, uint16_t vdisk_info, unsigned long sector_size) 208 { 209 int unit, error = 0; 210 const char *name; 211 212 blkfront_vdevice_to_unit(vdevice, &unit, &name); 213 214 sc->xb_unit = unit; 215 216 if (strcmp(name, "xbd")) 217 device_printf(sc->xb_dev, "attaching as %s%d\n", name, unit); 218 219 sc->xb_disk = disk_alloc(); 220 sc->xb_disk->d_unit = sc->xb_unit; 221 sc->xb_disk->d_open = blkif_open; 222 sc->xb_disk->d_close = blkif_close; 223 sc->xb_disk->d_ioctl = blkif_ioctl; 224 sc->xb_disk->d_strategy = xb_strategy; 225 sc->xb_disk->d_dump = xb_dump; 226 sc->xb_disk->d_name = name; 227 sc->xb_disk->d_drv1 = sc; 228 sc->xb_disk->d_sectorsize = sector_size; 229 230 sc->xb_disk->d_mediasize = sectors * sector_size; 231 sc->xb_disk->d_maxsize = sc->max_request_size; 232 sc->xb_disk->d_flags = 0; 233 disk_create(sc->xb_disk, DISK_VERSION_00); 234 235 return error; 236 } 237 238 /************************ end VBD support *****************/ 239 240 /* 241 * Read/write routine for a buffer. Finds the proper unit, place it on 242 * the sortq and kick the controller. 243 */ 244 static void 245 xb_strategy(struct bio *bp) 246 { 247 struct xb_softc *sc = (struct xb_softc *)bp->bio_disk->d_drv1; 248 249 /* bogus disk? */ 250 if (sc == NULL) { 251 bp->bio_error = EINVAL; 252 bp->bio_flags |= BIO_ERROR; 253 bp->bio_resid = bp->bio_bcount; 254 biodone(bp); 255 return; 256 } 257 258 /* 259 * Place it in the queue of disk activities for this disk 260 */ 261 mtx_lock(&sc->xb_io_lock); 262 263 xb_enqueue_bio(sc, bp); 264 xb_startio(sc); 265 266 mtx_unlock(&sc->xb_io_lock); 267 return; 268 } 269 270 static void 271 xb_bio_complete(struct xb_softc *sc, struct xb_command *cm) 272 { 273 struct bio *bp; 274 275 bp = cm->bp; 276 277 if ( unlikely(cm->status != BLKIF_RSP_OKAY) ) { 278 disk_err(bp, "disk error" , -1, 0); 279 printf(" status: %x\n", cm->status); 280 bp->bio_flags |= BIO_ERROR; 281 } 282 283 if (bp->bio_flags & BIO_ERROR) 284 bp->bio_error = EIO; 285 else 286 bp->bio_resid = 0; 287 288 xb_free_command(cm); 289 biodone(bp); 290 } 291 292 // Quiesce the disk writes for a dump file before allowing the next buffer. 293 static void 294 xb_quiesce(struct xb_softc *sc) 295 { 296 int mtd; 297 298 // While there are outstanding requests 299 while (!TAILQ_EMPTY(&sc->cm_busy)) { 300 RING_FINAL_CHECK_FOR_RESPONSES(&sc->ring, mtd); 301 if (mtd) { 302 /* Recieved request completions, update queue. */ 303 blkif_int(sc); 304 } 305 if (!TAILQ_EMPTY(&sc->cm_busy)) { 306 /* 307 * Still pending requests, wait for the disk i/o 308 * to complete. 309 */ 310 HYPERVISOR_yield(); 311 } 312 } 313 } 314 315 /* Kernel dump function for a paravirtualized disk device */ 316 static void 317 xb_dump_complete(struct xb_command *cm) 318 { 319 320 xb_enqueue_complete(cm); 321 } 322 323 static int 324 xb_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, 325 size_t length) 326 { 327 struct disk *dp = arg; 328 struct xb_softc *sc = (struct xb_softc *) dp->d_drv1; 329 struct xb_command *cm; 330 size_t chunk; 331 int sbp; 332 int rc = 0; 333 334 if (length <= 0) 335 return (rc); 336 337 xb_quiesce(sc); /* All quiet on the western front. */ 338 339 /* 340 * If this lock is held, then this module is failing, and a 341 * successful kernel dump is highly unlikely anyway. 342 */ 343 mtx_lock(&sc->xb_io_lock); 344 345 /* Split the 64KB block as needed */ 346 for (sbp=0; length > 0; sbp++) { 347 cm = xb_dequeue_free(sc); 348 if (cm == NULL) { 349 mtx_unlock(&sc->xb_io_lock); 350 device_printf(sc->xb_dev, "dump: no more commands?\n"); 351 return (EBUSY); 352 } 353 354 if (gnttab_alloc_grant_references(sc->max_request_segments, 355 &cm->gref_head) != 0) { 356 xb_free_command(cm); 357 mtx_unlock(&sc->xb_io_lock); 358 device_printf(sc->xb_dev, "no more grant allocs?\n"); 359 return (EBUSY); 360 } 361 362 chunk = length > sc->max_request_size 363 ? sc->max_request_size : length; 364 cm->data = virtual; 365 cm->datalen = chunk; 366 cm->operation = BLKIF_OP_WRITE; 367 cm->sector_number = offset / dp->d_sectorsize; 368 cm->cm_complete = xb_dump_complete; 369 370 xb_enqueue_ready(cm); 371 372 length -= chunk; 373 offset += chunk; 374 virtual = (char *) virtual + chunk; 375 } 376 377 /* Tell DOM0 to do the I/O */ 378 xb_startio(sc); 379 mtx_unlock(&sc->xb_io_lock); 380 381 /* Poll for the completion. */ 382 xb_quiesce(sc); /* All quite on the eastern front */ 383 384 /* If there were any errors, bail out... */ 385 while ((cm = xb_dequeue_complete(sc)) != NULL) { 386 if (cm->status != BLKIF_RSP_OKAY) { 387 device_printf(sc->xb_dev, 388 "Dump I/O failed at sector %jd\n", 389 cm->sector_number); 390 rc = EIO; 391 } 392 xb_free_command(cm); 393 } 394 395 return (rc); 396 } 397 398 399 static int 400 blkfront_probe(device_t dev) 401 { 402 403 if (!strcmp(xenbus_get_type(dev), "vbd")) { 404 device_set_desc(dev, "Virtual Block Device"); 405 device_quiet(dev); 406 return (0); 407 } 408 409 return (ENXIO); 410 } 411 412 static void 413 xb_setup_sysctl(struct xb_softc *xb) 414 { 415 struct sysctl_ctx_list *sysctl_ctx = NULL; 416 struct sysctl_oid *sysctl_tree = NULL; 417 418 sysctl_ctx = device_get_sysctl_ctx(xb->xb_dev); 419 if (sysctl_ctx == NULL) 420 return; 421 422 sysctl_tree = device_get_sysctl_tree(xb->xb_dev); 423 if (sysctl_tree == NULL) 424 return; 425 426 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 427 "max_requests", CTLFLAG_RD, &xb->max_requests, -1, 428 "maximum outstanding requests (negotiated)"); 429 430 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 431 "max_request_segments", CTLFLAG_RD, 432 &xb->max_request_segments, 0, 433 "maximum number of pages per requests (negotiated)"); 434 435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 436 "max_request_size", CTLFLAG_RD, 437 &xb->max_request_size, 0, 438 "maximum size in bytes of a request (negotiated)"); 439 440 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 441 "ring_pages", CTLFLAG_RD, 442 &xb->ring_pages, 0, 443 "communication channel pages (negotiated)"); 444 } 445 446 /* 447 * Setup supplies the backend dir, virtual device. We place an event 448 * channel and shared frame entries. We watch backend to wait if it's 449 * ok. 450 */ 451 static int 452 blkfront_attach(device_t dev) 453 { 454 struct xb_softc *sc; 455 const char *name; 456 uint32_t vdevice; 457 int error; 458 int i; 459 int unit; 460 461 /* FIXME: Use dynamic device id if this is not set. */ 462 error = xs_scanf(XST_NIL, xenbus_get_node(dev), 463 "virtual-device", NULL, "%" PRIu32, &vdevice); 464 if (error) { 465 xenbus_dev_fatal(dev, error, "reading virtual-device"); 466 device_printf(dev, "Couldn't determine virtual device.\n"); 467 return (error); 468 } 469 470 blkfront_vdevice_to_unit(vdevice, &unit, &name); 471 if (!strcmp(name, "xbd")) 472 device_set_unit(dev, unit); 473 474 sc = device_get_softc(dev); 475 mtx_init(&sc->xb_io_lock, "blkfront i/o lock", NULL, MTX_DEF); 476 xb_initq_free(sc); 477 xb_initq_busy(sc); 478 xb_initq_ready(sc); 479 xb_initq_complete(sc); 480 xb_initq_bio(sc); 481 for (i = 0; i < XBF_MAX_RING_PAGES; i++) 482 sc->ring_ref[i] = GRANT_INVALID_REF; 483 484 sc->xb_dev = dev; 485 sc->vdevice = vdevice; 486 sc->connected = BLKIF_STATE_DISCONNECTED; 487 488 xb_setup_sysctl(sc); 489 490 /* Wait for backend device to publish its protocol capabilities. */ 491 xenbus_set_state(dev, XenbusStateInitialising); 492 493 return (0); 494 } 495 496 static int 497 blkfront_suspend(device_t dev) 498 { 499 struct xb_softc *sc = device_get_softc(dev); 500 int retval; 501 int saved_state; 502 503 /* Prevent new requests being issued until we fix things up. */ 504 mtx_lock(&sc->xb_io_lock); 505 saved_state = sc->connected; 506 sc->connected = BLKIF_STATE_SUSPENDED; 507 508 /* Wait for outstanding I/O to drain. */ 509 retval = 0; 510 while (TAILQ_EMPTY(&sc->cm_busy) == 0) { 511 if (msleep(&sc->cm_busy, &sc->xb_io_lock, 512 PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) { 513 retval = EBUSY; 514 break; 515 } 516 } 517 mtx_unlock(&sc->xb_io_lock); 518 519 if (retval != 0) 520 sc->connected = saved_state; 521 522 return (retval); 523 } 524 525 static int 526 blkfront_resume(device_t dev) 527 { 528 struct xb_softc *sc = device_get_softc(dev); 529 530 DPRINTK("blkfront_resume: %s\n", xenbus_get_node(dev)); 531 532 blkif_free(sc); 533 blkfront_initialize(sc); 534 return (0); 535 } 536 537 static void 538 blkfront_initialize(struct xb_softc *sc) 539 { 540 const char *otherend_path; 541 const char *node_path; 542 uint32_t max_ring_page_order; 543 int error; 544 int i; 545 546 if (xenbus_get_state(sc->xb_dev) != XenbusStateInitialising) { 547 /* Initialization has already been performed. */ 548 return; 549 } 550 551 /* 552 * Protocol defaults valid even if negotiation for a 553 * setting fails. 554 */ 555 max_ring_page_order = 0; 556 sc->ring_pages = 1; 557 sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; 558 sc->max_request_size = XBF_SEGS_TO_SIZE(sc->max_request_segments); 559 sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments); 560 561 /* 562 * Protocol negotiation. 563 * 564 * \note xs_gather() returns on the first encountered error, so 565 * we must use independant calls in order to guarantee 566 * we don't miss information in a sparsly populated back-end 567 * tree. 568 * 569 * \note xs_scanf() does not update variables for unmatched 570 * fields. 571 */ 572 otherend_path = xenbus_get_otherend_path(sc->xb_dev); 573 node_path = xenbus_get_node(sc->xb_dev); 574 575 /* Support both backend schemes for relaying ring page limits. */ 576 (void)xs_scanf(XST_NIL, otherend_path, 577 "max-ring-page-order", NULL, "%" PRIu32, 578 &max_ring_page_order); 579 sc->ring_pages = 1 << max_ring_page_order; 580 (void)xs_scanf(XST_NIL, otherend_path, 581 "max-ring-pages", NULL, "%" PRIu32, 582 &sc->ring_pages); 583 if (sc->ring_pages < 1) 584 sc->ring_pages = 1; 585 586 sc->max_requests = BLKIF_MAX_RING_REQUESTS(sc->ring_pages * PAGE_SIZE); 587 (void)xs_scanf(XST_NIL, otherend_path, 588 "max-requests", NULL, "%" PRIu32, 589 &sc->max_requests); 590 591 (void)xs_scanf(XST_NIL, otherend_path, 592 "max-request-segments", NULL, "%" PRIu32, 593 &sc->max_request_segments); 594 595 (void)xs_scanf(XST_NIL, otherend_path, 596 "max-request-size", NULL, "%" PRIu32, 597 &sc->max_request_size); 598 599 if (sc->ring_pages > XBF_MAX_RING_PAGES) { 600 device_printf(sc->xb_dev, "Back-end specified ring-pages of " 601 "%u limited to front-end limit of %zu.\n", 602 sc->ring_pages, XBF_MAX_RING_PAGES); 603 sc->ring_pages = XBF_MAX_RING_PAGES; 604 } 605 606 if (powerof2(sc->ring_pages) == 0) { 607 uint32_t new_page_limit; 608 609 new_page_limit = 0x01 << (fls(sc->ring_pages) - 1); 610 device_printf(sc->xb_dev, "Back-end specified ring-pages of " 611 "%u is not a power of 2. Limited to %u.\n", 612 sc->ring_pages, new_page_limit); 613 sc->ring_pages = new_page_limit; 614 } 615 616 if (sc->max_requests > XBF_MAX_REQUESTS) { 617 device_printf(sc->xb_dev, "Back-end specified max_requests of " 618 "%u limited to front-end limit of %u.\n", 619 sc->max_requests, XBF_MAX_REQUESTS); 620 sc->max_requests = XBF_MAX_REQUESTS; 621 } 622 623 if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) { 624 device_printf(sc->xb_dev, "Back-end specified " 625 "max_request_segments of %u limited to " 626 "front-end limit of %u.\n", 627 sc->max_request_segments, 628 XBF_MAX_SEGMENTS_PER_REQUEST); 629 sc->max_request_segments = XBF_MAX_SEGMENTS_PER_REQUEST; 630 } 631 632 if (sc->max_request_size > XBF_MAX_REQUEST_SIZE) { 633 device_printf(sc->xb_dev, "Back-end specified " 634 "max_request_size of %u limited to front-end " 635 "limit of %u.\n", sc->max_request_size, 636 XBF_MAX_REQUEST_SIZE); 637 sc->max_request_size = XBF_MAX_REQUEST_SIZE; 638 } 639 640 if (sc->max_request_size > XBF_SEGS_TO_SIZE(sc->max_request_segments)) { 641 device_printf(sc->xb_dev, "Back-end specified " 642 "max_request_size of %u limited to front-end " 643 "limit of %u. (Too few segments.)\n", 644 sc->max_request_size, 645 XBF_SEGS_TO_SIZE(sc->max_request_segments)); 646 sc->max_request_size = 647 XBF_SEGS_TO_SIZE(sc->max_request_segments); 648 } 649 650 sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments); 651 652 /* Allocate datastructures based on negotiated values. */ 653 error = bus_dma_tag_create(NULL, /* parent */ 654 512, PAGE_SIZE, /* algnmnt, boundary */ 655 BUS_SPACE_MAXADDR, /* lowaddr */ 656 BUS_SPACE_MAXADDR, /* highaddr */ 657 NULL, NULL, /* filter, filterarg */ 658 sc->max_request_size, 659 sc->max_request_segments, 660 PAGE_SIZE, /* maxsegsize */ 661 BUS_DMA_ALLOCNOW, /* flags */ 662 busdma_lock_mutex, /* lockfunc */ 663 &sc->xb_io_lock, /* lockarg */ 664 &sc->xb_io_dmat); 665 if (error != 0) { 666 xenbus_dev_fatal(sc->xb_dev, error, 667 "Cannot allocate parent DMA tag\n"); 668 return; 669 } 670 671 /* Per-transaction data allocation. */ 672 sc->shadow = malloc(sizeof(*sc->shadow) * sc->max_requests, 673 M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); 674 if (sc->shadow == NULL) { 675 bus_dma_tag_destroy(sc->xb_io_dmat); 676 xenbus_dev_fatal(sc->xb_dev, error, 677 "Cannot allocate request structures\n"); 678 return; 679 } 680 681 for (i = 0; i < sc->max_requests; i++) { 682 struct xb_command *cm; 683 684 cm = &sc->shadow[i]; 685 cm->sg_refs = malloc(sizeof(grant_ref_t) 686 * sc->max_request_segments, 687 M_XENBLOCKFRONT, M_NOWAIT); 688 if (cm->sg_refs == NULL) 689 break; 690 cm->id = i; 691 cm->cm_sc = sc; 692 if (bus_dmamap_create(sc->xb_io_dmat, 0, &cm->map) != 0) 693 break; 694 xb_free_command(cm); 695 } 696 697 if (setup_blkring(sc) != 0) 698 return; 699 700 /* Support both backend schemes for relaying ring page limits. */ 701 error = xs_printf(XST_NIL, node_path, 702 "num-ring-pages","%u", sc->ring_pages); 703 if (error) { 704 xenbus_dev_fatal(sc->xb_dev, error, 705 "writing %s/num-ring-pages", 706 node_path); 707 return; 708 } 709 error = xs_printf(XST_NIL, node_path, 710 "ring-page-order","%u", fls(sc->ring_pages) - 1); 711 if (error) { 712 xenbus_dev_fatal(sc->xb_dev, error, 713 "writing %s/ring-page-order", 714 node_path); 715 return; 716 } 717 718 error = xs_printf(XST_NIL, node_path, 719 "max-requests","%u", sc->max_requests); 720 if (error) { 721 xenbus_dev_fatal(sc->xb_dev, error, 722 "writing %s/max-requests", 723 node_path); 724 return; 725 } 726 727 error = xs_printf(XST_NIL, node_path, 728 "max-request-segments","%u", sc->max_request_segments); 729 if (error) { 730 xenbus_dev_fatal(sc->xb_dev, error, 731 "writing %s/max-request-segments", 732 node_path); 733 return; 734 } 735 736 error = xs_printf(XST_NIL, node_path, 737 "max-request-size","%u", sc->max_request_size); 738 if (error) { 739 xenbus_dev_fatal(sc->xb_dev, error, 740 "writing %s/max-request-size", 741 node_path); 742 return; 743 } 744 745 error = xs_printf(XST_NIL, node_path, "event-channel", 746 "%u", irq_to_evtchn_port(sc->irq)); 747 if (error) { 748 xenbus_dev_fatal(sc->xb_dev, error, 749 "writing %s/event-channel", 750 node_path); 751 return; 752 } 753 754 error = xs_printf(XST_NIL, node_path, 755 "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); 756 if (error) { 757 xenbus_dev_fatal(sc->xb_dev, error, 758 "writing %s/protocol", 759 node_path); 760 return; 761 } 762 763 xenbus_set_state(sc->xb_dev, XenbusStateInitialised); 764 } 765 766 static int 767 setup_blkring(struct xb_softc *sc) 768 { 769 blkif_sring_t *sring; 770 uintptr_t sring_page_addr; 771 int error; 772 int i; 773 774 sring = malloc(sc->ring_pages * PAGE_SIZE, M_XENBLOCKFRONT, 775 M_NOWAIT|M_ZERO); 776 if (sring == NULL) { 777 xenbus_dev_fatal(sc->xb_dev, ENOMEM, "allocating shared ring"); 778 return (ENOMEM); 779 } 780 SHARED_RING_INIT(sring); 781 FRONT_RING_INIT(&sc->ring, sring, sc->ring_pages * PAGE_SIZE); 782 783 for (i = 0, sring_page_addr = (uintptr_t)sring; 784 i < sc->ring_pages; 785 i++, sring_page_addr += PAGE_SIZE) { 786 787 error = xenbus_grant_ring(sc->xb_dev, 788 (vtomach(sring_page_addr) >> PAGE_SHIFT), &sc->ring_ref[i]); 789 if (error) { 790 xenbus_dev_fatal(sc->xb_dev, error, 791 "granting ring_ref(%d)", i); 792 return (error); 793 } 794 } 795 if (sc->ring_pages == 1) { 796 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev), 797 "ring-ref", "%u", sc->ring_ref[0]); 798 if (error) { 799 xenbus_dev_fatal(sc->xb_dev, error, 800 "writing %s/ring-ref", 801 xenbus_get_node(sc->xb_dev)); 802 return (error); 803 } 804 } else { 805 for (i = 0; i < sc->ring_pages; i++) { 806 char ring_ref_name[]= "ring_refXX"; 807 808 snprintf(ring_ref_name, sizeof(ring_ref_name), 809 "ring-ref%u", i); 810 error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev), 811 ring_ref_name, "%u", sc->ring_ref[i]); 812 if (error) { 813 xenbus_dev_fatal(sc->xb_dev, error, 814 "writing %s/%s", 815 xenbus_get_node(sc->xb_dev), 816 ring_ref_name); 817 return (error); 818 } 819 } 820 } 821 822 error = bind_listening_port_to_irqhandler( 823 xenbus_get_otherend_id(sc->xb_dev), 824 "xbd", (driver_intr_t *)blkif_int, sc, 825 INTR_TYPE_BIO | INTR_MPSAFE, &sc->irq); 826 if (error) { 827 xenbus_dev_fatal(sc->xb_dev, error, 828 "bind_evtchn_to_irqhandler failed"); 829 return (error); 830 } 831 832 return (0); 833 } 834 835 /** 836 * Callback received when the backend's state changes. 837 */ 838 static void 839 blkfront_backend_changed(device_t dev, XenbusState backend_state) 840 { 841 struct xb_softc *sc = device_get_softc(dev); 842 843 DPRINTK("backend_state=%d\n", backend_state); 844 845 switch (backend_state) { 846 case XenbusStateUnknown: 847 case XenbusStateInitialising: 848 case XenbusStateReconfigured: 849 case XenbusStateReconfiguring: 850 case XenbusStateClosed: 851 break; 852 853 case XenbusStateInitWait: 854 case XenbusStateInitialised: 855 blkfront_initialize(sc); 856 break; 857 858 case XenbusStateConnected: 859 blkfront_initialize(sc); 860 blkfront_connect(sc); 861 break; 862 863 case XenbusStateClosing: 864 if (sc->users > 0) 865 xenbus_dev_error(dev, -EBUSY, 866 "Device in use; refusing to close"); 867 else 868 blkfront_closing(dev); 869 break; 870 } 871 } 872 873 /* 874 ** Invoked when the backend is finally 'ready' (and has published 875 ** the details about the physical device - #sectors, size, etc). 876 */ 877 static void 878 blkfront_connect(struct xb_softc *sc) 879 { 880 device_t dev = sc->xb_dev; 881 unsigned long sectors, sector_size; 882 unsigned int binfo; 883 int err, feature_barrier; 884 885 if( (sc->connected == BLKIF_STATE_CONNECTED) || 886 (sc->connected == BLKIF_STATE_SUSPENDED) ) 887 return; 888 889 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev)); 890 891 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 892 "sectors", "%lu", §ors, 893 "info", "%u", &binfo, 894 "sector-size", "%lu", §or_size, 895 NULL); 896 if (err) { 897 xenbus_dev_fatal(dev, err, 898 "reading backend fields at %s", 899 xenbus_get_otherend_path(dev)); 900 return; 901 } 902 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 903 "feature-barrier", "%lu", &feature_barrier, 904 NULL); 905 if (!err || feature_barrier) 906 sc->xb_flags |= XB_BARRIER; 907 908 if (sc->xb_disk == NULL) { 909 device_printf(dev, "%juMB <%s> at %s", 910 (uintmax_t) sectors / (1048576 / sector_size), 911 device_get_desc(dev), 912 xenbus_get_node(dev)); 913 bus_print_child_footer(device_get_parent(dev), dev); 914 915 xlvbd_add(sc, sectors, sc->vdevice, binfo, sector_size); 916 } 917 918 (void)xenbus_set_state(dev, XenbusStateConnected); 919 920 /* Kick pending requests. */ 921 mtx_lock(&sc->xb_io_lock); 922 sc->connected = BLKIF_STATE_CONNECTED; 923 xb_startio(sc); 924 sc->xb_flags |= XB_READY; 925 mtx_unlock(&sc->xb_io_lock); 926 } 927 928 /** 929 * Handle the change of state of the backend to Closing. We must delete our 930 * device-layer structures now, to ensure that writes are flushed through to 931 * the backend. Once this is done, we can switch to Closed in 932 * acknowledgement. 933 */ 934 static void 935 blkfront_closing(device_t dev) 936 { 937 struct xb_softc *sc = device_get_softc(dev); 938 939 xenbus_set_state(dev, XenbusStateClosing); 940 941 DPRINTK("blkfront_closing: %s removed\n", xenbus_get_node(dev)); 942 943 if (sc->xb_disk != NULL) { 944 disk_destroy(sc->xb_disk); 945 sc->xb_disk = NULL; 946 } 947 948 xenbus_set_state(dev, XenbusStateClosed); 949 } 950 951 952 static int 953 blkfront_detach(device_t dev) 954 { 955 struct xb_softc *sc = device_get_softc(dev); 956 957 DPRINTK("blkfront_remove: %s removed\n", xenbus_get_node(dev)); 958 959 blkif_free(sc); 960 mtx_destroy(&sc->xb_io_lock); 961 962 return 0; 963 } 964 965 966 static inline void 967 flush_requests(struct xb_softc *sc) 968 { 969 int notify; 970 971 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->ring, notify); 972 973 if (notify) 974 notify_remote_via_irq(sc->irq); 975 } 976 977 static void 978 blkif_restart_queue_callback(void *arg) 979 { 980 struct xb_softc *sc = arg; 981 982 mtx_lock(&sc->xb_io_lock); 983 984 xb_startio(sc); 985 986 mtx_unlock(&sc->xb_io_lock); 987 } 988 989 static int 990 blkif_open(struct disk *dp) 991 { 992 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 993 994 if (sc == NULL) { 995 printf("xb%d: not found", sc->xb_unit); 996 return (ENXIO); 997 } 998 999 sc->xb_flags |= XB_OPEN; 1000 sc->users++; 1001 return (0); 1002 } 1003 1004 static int 1005 blkif_close(struct disk *dp) 1006 { 1007 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 1008 1009 if (sc == NULL) 1010 return (ENXIO); 1011 sc->xb_flags &= ~XB_OPEN; 1012 if (--(sc->users) == 0) { 1013 /* 1014 * Check whether we have been instructed to close. We will 1015 * have ignored this request initially, as the device was 1016 * still mounted. 1017 */ 1018 if (xenbus_get_otherend_state(sc->xb_dev) == XenbusStateClosing) 1019 blkfront_closing(sc->xb_dev); 1020 } 1021 return (0); 1022 } 1023 1024 static int 1025 blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) 1026 { 1027 struct xb_softc *sc = (struct xb_softc *)dp->d_drv1; 1028 1029 if (sc == NULL) 1030 return (ENXIO); 1031 1032 return (ENOTTY); 1033 } 1034 1035 static void 1036 xb_free_command(struct xb_command *cm) 1037 { 1038 1039 KASSERT((cm->cm_flags & XB_ON_XBQ_MASK) == 0, 1040 ("Freeing command that is still on a queue\n")); 1041 1042 cm->cm_flags = 0; 1043 cm->bp = NULL; 1044 cm->cm_complete = NULL; 1045 xb_enqueue_free(cm); 1046 } 1047 1048 /* 1049 * blkif_queue_request 1050 * 1051 * request block io 1052 * 1053 * id: for guest use only. 1054 * operation: BLKIF_OP_{READ,WRITE,PROBE} 1055 * buffer: buffer to read/write into. this should be a 1056 * virtual address in the guest os. 1057 */ 1058 static struct xb_command * 1059 xb_bio_command(struct xb_softc *sc) 1060 { 1061 struct xb_command *cm; 1062 struct bio *bp; 1063 1064 if (unlikely(sc->connected != BLKIF_STATE_CONNECTED)) 1065 return (NULL); 1066 1067 bp = xb_dequeue_bio(sc); 1068 if (bp == NULL) 1069 return (NULL); 1070 1071 if ((cm = xb_dequeue_free(sc)) == NULL) { 1072 xb_requeue_bio(sc, bp); 1073 return (NULL); 1074 } 1075 1076 if (gnttab_alloc_grant_references(sc->max_request_segments, 1077 &cm->gref_head) != 0) { 1078 gnttab_request_free_callback(&sc->callback, 1079 blkif_restart_queue_callback, sc, 1080 sc->max_request_segments); 1081 xb_requeue_bio(sc, bp); 1082 xb_enqueue_free(cm); 1083 sc->xb_flags |= XB_FROZEN; 1084 return (NULL); 1085 } 1086 1087 cm->bp = bp; 1088 cm->data = bp->bio_data; 1089 cm->datalen = bp->bio_bcount; 1090 cm->operation = (bp->bio_cmd == BIO_READ) ? BLKIF_OP_READ : 1091 BLKIF_OP_WRITE; 1092 cm->sector_number = (blkif_sector_t)bp->bio_pblkno; 1093 1094 return (cm); 1095 } 1096 1097 static int 1098 blkif_queue_request(struct xb_softc *sc, struct xb_command *cm) 1099 { 1100 int error; 1101 1102 error = bus_dmamap_load(sc->xb_io_dmat, cm->map, cm->data, cm->datalen, 1103 blkif_queue_cb, cm, 0); 1104 if (error == EINPROGRESS) { 1105 printf("EINPROGRESS\n"); 1106 sc->xb_flags |= XB_FROZEN; 1107 cm->cm_flags |= XB_CMD_FROZEN; 1108 return (0); 1109 } 1110 1111 return (error); 1112 } 1113 1114 static void 1115 blkif_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1116 { 1117 struct xb_softc *sc; 1118 struct xb_command *cm; 1119 blkif_request_t *ring_req; 1120 struct blkif_request_segment *sg; 1121 struct blkif_request_segment *last_block_sg; 1122 grant_ref_t *sg_ref; 1123 vm_paddr_t buffer_ma; 1124 uint64_t fsect, lsect; 1125 int ref; 1126 int op; 1127 int block_segs; 1128 1129 cm = arg; 1130 sc = cm->cm_sc; 1131 1132 //printf("%s: Start\n", __func__); 1133 if (error) { 1134 printf("error %d in blkif_queue_cb\n", error); 1135 cm->bp->bio_error = EIO; 1136 biodone(cm->bp); 1137 xb_free_command(cm); 1138 return; 1139 } 1140 1141 /* Fill out a communications ring structure. */ 1142 ring_req = RING_GET_REQUEST(&sc->ring, sc->ring.req_prod_pvt); 1143 sc->ring.req_prod_pvt++; 1144 ring_req->id = cm->id; 1145 ring_req->operation = cm->operation; 1146 ring_req->sector_number = cm->sector_number; 1147 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xb_disk; 1148 ring_req->nr_segments = nsegs; 1149 cm->nseg = nsegs; 1150 1151 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK); 1152 sg = ring_req->seg; 1153 last_block_sg = sg + block_segs; 1154 sg_ref = cm->sg_refs; 1155 1156 while (1) { 1157 1158 while (sg < last_block_sg) { 1159 buffer_ma = segs->ds_addr; 1160 fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; 1161 lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; 1162 1163 KASSERT(lsect <= 7, ("XEN disk driver data cannot " 1164 "cross a page boundary")); 1165 1166 /* install a grant reference. */ 1167 ref = gnttab_claim_grant_reference(&cm->gref_head); 1168 1169 /* 1170 * GNTTAB_LIST_END == 0xffffffff, but it is private 1171 * to gnttab.c. 1172 */ 1173 KASSERT(ref != ~0, ("grant_reference failed")); 1174 1175 gnttab_grant_foreign_access_ref( 1176 ref, 1177 xenbus_get_otherend_id(sc->xb_dev), 1178 buffer_ma >> PAGE_SHIFT, 1179 ring_req->operation == BLKIF_OP_WRITE); 1180 1181 *sg_ref = ref; 1182 *sg = (struct blkif_request_segment) { 1183 .gref = ref, 1184 .first_sect = fsect, 1185 .last_sect = lsect }; 1186 sg++; 1187 sg_ref++; 1188 segs++; 1189 nsegs--; 1190 } 1191 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK); 1192 if (block_segs == 0) 1193 break; 1194 1195 sg = BLKRING_GET_SEG_BLOCK(&sc->ring, sc->ring.req_prod_pvt); 1196 sc->ring.req_prod_pvt++; 1197 last_block_sg = sg + block_segs; 1198 } 1199 1200 if (cm->operation == BLKIF_OP_READ) 1201 op = BUS_DMASYNC_PREREAD; 1202 else if (cm->operation == BLKIF_OP_WRITE) 1203 op = BUS_DMASYNC_PREWRITE; 1204 else 1205 op = 0; 1206 bus_dmamap_sync(sc->xb_io_dmat, cm->map, op); 1207 1208 gnttab_free_grant_references(cm->gref_head); 1209 1210 xb_enqueue_busy(cm); 1211 1212 /* 1213 * This flag means that we're probably executing in the busdma swi 1214 * instead of in the startio context, so an explicit flush is needed. 1215 */ 1216 if (cm->cm_flags & XB_CMD_FROZEN) 1217 flush_requests(sc); 1218 1219 //printf("%s: Done\n", __func__); 1220 return; 1221 } 1222 1223 /* 1224 * Dequeue buffers and place them in the shared communication ring. 1225 * Return when no more requests can be accepted or all buffers have 1226 * been queued. 1227 * 1228 * Signal XEN once the ring has been filled out. 1229 */ 1230 static void 1231 xb_startio(struct xb_softc *sc) 1232 { 1233 struct xb_command *cm; 1234 int error, queued = 0; 1235 1236 mtx_assert(&sc->xb_io_lock, MA_OWNED); 1237 1238 if (sc->connected != BLKIF_STATE_CONNECTED) 1239 return; 1240 1241 while (RING_FREE_REQUESTS(&sc->ring) >= sc->max_request_blocks) { 1242 if (sc->xb_flags & XB_FROZEN) 1243 break; 1244 1245 cm = xb_dequeue_ready(sc); 1246 1247 if (cm == NULL) 1248 cm = xb_bio_command(sc); 1249 1250 if (cm == NULL) 1251 break; 1252 1253 if ((error = blkif_queue_request(sc, cm)) != 0) { 1254 printf("blkif_queue_request returned %d\n", error); 1255 break; 1256 } 1257 queued++; 1258 } 1259 1260 if (queued != 0) 1261 flush_requests(sc); 1262 } 1263 1264 static void 1265 blkif_int(void *xsc) 1266 { 1267 struct xb_softc *sc = xsc; 1268 struct xb_command *cm; 1269 blkif_response_t *bret; 1270 RING_IDX i, rp; 1271 int op; 1272 1273 mtx_lock(&sc->xb_io_lock); 1274 1275 if (unlikely(sc->connected == BLKIF_STATE_DISCONNECTED)) { 1276 mtx_unlock(&sc->xb_io_lock); 1277 return; 1278 } 1279 1280 again: 1281 rp = sc->ring.sring->rsp_prod; 1282 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1283 1284 for (i = sc->ring.rsp_cons; i != rp;) { 1285 bret = RING_GET_RESPONSE(&sc->ring, i); 1286 cm = &sc->shadow[bret->id]; 1287 1288 xb_remove_busy(cm); 1289 i += blkif_completion(cm); 1290 1291 if (cm->operation == BLKIF_OP_READ) 1292 op = BUS_DMASYNC_POSTREAD; 1293 else if (cm->operation == BLKIF_OP_WRITE) 1294 op = BUS_DMASYNC_POSTWRITE; 1295 else 1296 op = 0; 1297 bus_dmamap_sync(sc->xb_io_dmat, cm->map, op); 1298 bus_dmamap_unload(sc->xb_io_dmat, cm->map); 1299 1300 /* 1301 * If commands are completing then resources are probably 1302 * being freed as well. It's a cheap assumption even when 1303 * wrong. 1304 */ 1305 sc->xb_flags &= ~XB_FROZEN; 1306 1307 /* 1308 * Directly call the i/o complete routine to save an 1309 * an indirection in the common case. 1310 */ 1311 cm->status = bret->status; 1312 if (cm->bp) 1313 xb_bio_complete(sc, cm); 1314 else if (cm->cm_complete) 1315 (cm->cm_complete)(cm); 1316 else 1317 xb_free_command(cm); 1318 } 1319 1320 sc->ring.rsp_cons = i; 1321 1322 if (i != sc->ring.req_prod_pvt) { 1323 int more_to_do; 1324 RING_FINAL_CHECK_FOR_RESPONSES(&sc->ring, more_to_do); 1325 if (more_to_do) 1326 goto again; 1327 } else { 1328 sc->ring.sring->rsp_event = i + 1; 1329 } 1330 1331 xb_startio(sc); 1332 1333 if (unlikely(sc->connected == BLKIF_STATE_SUSPENDED)) 1334 wakeup(&sc->cm_busy); 1335 1336 mtx_unlock(&sc->xb_io_lock); 1337 } 1338 1339 static void 1340 blkif_free(struct xb_softc *sc) 1341 { 1342 uint8_t *sring_page_ptr; 1343 int i; 1344 1345 /* Prevent new requests being issued until we fix things up. */ 1346 mtx_lock(&sc->xb_io_lock); 1347 sc->connected = BLKIF_STATE_DISCONNECTED; 1348 mtx_unlock(&sc->xb_io_lock); 1349 1350 /* Free resources associated with old device channel. */ 1351 if (sc->ring.sring != NULL) { 1352 sring_page_ptr = (uint8_t *)sc->ring.sring; 1353 for (i = 0; i < sc->ring_pages; i++) { 1354 if (sc->ring_ref[i] != GRANT_INVALID_REF) { 1355 gnttab_end_foreign_access_ref(sc->ring_ref[i]); 1356 sc->ring_ref[i] = GRANT_INVALID_REF; 1357 } 1358 sring_page_ptr += PAGE_SIZE; 1359 } 1360 free(sc->ring.sring, M_XENBLOCKFRONT); 1361 sc->ring.sring = NULL; 1362 } 1363 1364 if (sc->shadow) { 1365 1366 for (i = 0; i < sc->max_requests; i++) { 1367 struct xb_command *cm; 1368 1369 cm = &sc->shadow[i]; 1370 if (cm->sg_refs != NULL) { 1371 free(cm->sg_refs, M_XENBLOCKFRONT); 1372 cm->sg_refs = NULL; 1373 } 1374 1375 bus_dmamap_destroy(sc->xb_io_dmat, cm->map); 1376 } 1377 free(sc->shadow, M_XENBLOCKFRONT); 1378 sc->shadow = NULL; 1379 1380 bus_dma_tag_destroy(sc->xb_io_dmat); 1381 1382 xb_initq_free(sc); 1383 xb_initq_ready(sc); 1384 xb_initq_complete(sc); 1385 } 1386 1387 if (sc->irq) { 1388 unbind_from_irqhandler(sc->irq); 1389 sc->irq = 0; 1390 } 1391 } 1392 1393 static int 1394 blkif_completion(struct xb_command *s) 1395 { 1396 //printf("%s: Req %p(%d)\n", __func__, s, s->nseg); 1397 gnttab_end_foreign_access_references(s->nseg, s->sg_refs); 1398 return (BLKIF_SEGS_TO_BLOCKS(s->nseg)); 1399 } 1400 1401 /* ** Driver registration ** */ 1402 static device_method_t blkfront_methods[] = { 1403 /* Device interface */ 1404 DEVMETHOD(device_probe, blkfront_probe), 1405 DEVMETHOD(device_attach, blkfront_attach), 1406 DEVMETHOD(device_detach, blkfront_detach), 1407 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1408 DEVMETHOD(device_suspend, blkfront_suspend), 1409 DEVMETHOD(device_resume, blkfront_resume), 1410 1411 /* Xenbus interface */ 1412 DEVMETHOD(xenbus_otherend_changed, blkfront_backend_changed), 1413 1414 { 0, 0 } 1415 }; 1416 1417 static driver_t blkfront_driver = { 1418 "xbd", 1419 blkfront_methods, 1420 sizeof(struct xb_softc), 1421 }; 1422 devclass_t blkfront_devclass; 1423 1424 DRIVER_MODULE(xbd, xenbusb_front, blkfront_driver, blkfront_devclass, 0, 0); 1425