1 /* 2 * XenBSD block device driver 3 * 4 * Copyright (c) 2010-2013 Spectra Logic Corporation 5 * Copyright (c) 2009 Scott Long, Yahoo! 6 * Copyright (c) 2009 Frank Suchomel, Citrix 7 * Copyright (c) 2009 Doug F. Rabson, Citrix 8 * Copyright (c) 2005 Kip Macy 9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 11 * 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to 15 * deal in the Software without restriction, including without limitation the 16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 17 * sell copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28 * DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <vm/vm.h> 39 #include <vm/pmap.h> 40 41 #include <sys/bio.h> 42 #include <sys/bus.h> 43 #include <sys/conf.h> 44 #include <sys/module.h> 45 #include <sys/sysctl.h> 46 47 #include <machine/bus.h> 48 #include <sys/rman.h> 49 #include <machine/resource.h> 50 #include <machine/intr_machdep.h> 51 #include <machine/vmparam.h> 52 #include <sys/bus_dma.h> 53 54 #include <machine/_inttypes.h> 55 #include <machine/xen/xen-os.h> 56 #include <machine/xen/xenvar.h> 57 #include <machine/xen/xenfunc.h> 58 59 #include <xen/hypervisor.h> 60 #include <xen/xen_intr.h> 61 #include <xen/evtchn.h> 62 #include <xen/gnttab.h> 63 #include <xen/interface/grant_table.h> 64 #include <xen/interface/io/protocols.h> 65 #include <xen/xenbus/xenbusvar.h> 66 67 #include <geom/geom_disk.h> 68 69 #include <dev/xen/blkfront/block.h> 70 71 #include "xenbus_if.h" 72 73 /* prototypes */ 74 static void xbd_free_command(struct xbd_command *cm); 75 static void xbd_startio(struct xbd_softc *sc); 76 static void xbd_connect(struct xbd_softc *); 77 static void xbd_closing(device_t); 78 static int xbd_detach(device_t); 79 static int xbd_setup_ring(struct xbd_softc *); 80 static void xbd_int(void *); 81 static void xbd_initialize(struct xbd_softc *); 82 static int xbd_completion(struct xbd_command *); 83 static void xbd_free(struct xbd_softc *); 84 static void xbd_queue_cb(void *, bus_dma_segment_t *, int, int); 85 86 static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data"); 87 88 #define GRANT_INVALID_REF 0 89 90 /* Control whether runtime update of vbds is enabled. */ 91 #define ENABLE_VBD_UPDATE 0 92 93 #if ENABLE_VBD_UPDATE 94 static void vbd_update(void); 95 #endif 96 97 #define XBD_STATE_DISCONNECTED 0 98 #define XBD_STATE_CONNECTED 1 99 #define XBD_STATE_SUSPENDED 2 100 101 #ifdef notyet 102 static char *xbd_state_name[] = { 103 [XBD_STATE_DISCONNECTED] = "disconnected", 104 [XBD_STATE_CONNECTED] = "connected", 105 [XBD_STATE_SUSPENDED] = "closed", 106 }; 107 108 static char * xbd_status_name[] = { 109 [BLKIF_INTERFACE_STATUS_CLOSED] = "closed", 110 [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected", 111 [BLKIF_INTERFACE_STATUS_CONNECTED] = "connected", 112 [BLKIF_INTERFACE_STATUS_CHANGED] = "changed", 113 }; 114 #endif 115 116 #if 0 117 #define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args) 118 #else 119 #define DPRINTK(fmt, args...) 120 #endif 121 122 static int xbd_open(struct disk *dp); 123 static int xbd_close(struct disk *dp); 124 static int xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td); 125 static int xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm); 126 static void xbd_strategy(struct bio *bp); 127 128 // In order to quiesce the device during kernel dumps, outstanding requests to 129 // DOM0 for disk reads/writes need to be accounted for. 130 static int xbd_dump(void *, void *, vm_offset_t, off_t, size_t); 131 132 /* XXX move to xbd_vbd.c when VBD update support is added */ 133 #define MAX_VBDS 64 134 135 #define XBD_SECTOR_SIZE 512 /* XXX: assume for now */ 136 #define XBD_SECTOR_SHFT 9 137 138 /* 139 * Translate Linux major/minor to an appropriate name and unit 140 * number. For HVM guests, this allows us to use the same drive names 141 * with blkfront as the emulated drives, easing transition slightly. 142 */ 143 static void 144 xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name) 145 { 146 static struct vdev_info { 147 int major; 148 int shift; 149 int base; 150 const char *name; 151 } info[] = { 152 {3, 6, 0, "ada"}, /* ide0 */ 153 {22, 6, 2, "ada"}, /* ide1 */ 154 {33, 6, 4, "ada"}, /* ide2 */ 155 {34, 6, 6, "ada"}, /* ide3 */ 156 {56, 6, 8, "ada"}, /* ide4 */ 157 {57, 6, 10, "ada"}, /* ide5 */ 158 {88, 6, 12, "ada"}, /* ide6 */ 159 {89, 6, 14, "ada"}, /* ide7 */ 160 {90, 6, 16, "ada"}, /* ide8 */ 161 {91, 6, 18, "ada"}, /* ide9 */ 162 163 {8, 4, 0, "da"}, /* scsi disk0 */ 164 {65, 4, 16, "da"}, /* scsi disk1 */ 165 {66, 4, 32, "da"}, /* scsi disk2 */ 166 {67, 4, 48, "da"}, /* scsi disk3 */ 167 {68, 4, 64, "da"}, /* scsi disk4 */ 168 {69, 4, 80, "da"}, /* scsi disk5 */ 169 {70, 4, 96, "da"}, /* scsi disk6 */ 170 {71, 4, 112, "da"}, /* scsi disk7 */ 171 {128, 4, 128, "da"}, /* scsi disk8 */ 172 {129, 4, 144, "da"}, /* scsi disk9 */ 173 {130, 4, 160, "da"}, /* scsi disk10 */ 174 {131, 4, 176, "da"}, /* scsi disk11 */ 175 {132, 4, 192, "da"}, /* scsi disk12 */ 176 {133, 4, 208, "da"}, /* scsi disk13 */ 177 {134, 4, 224, "da"}, /* scsi disk14 */ 178 {135, 4, 240, "da"}, /* scsi disk15 */ 179 180 {202, 4, 0, "xbd"}, /* xbd */ 181 182 {0, 0, 0, NULL}, 183 }; 184 int major = vdevice >> 8; 185 int minor = vdevice & 0xff; 186 int i; 187 188 if (vdevice & (1 << 28)) { 189 *unit = (vdevice & ((1 << 28) - 1)) >> 8; 190 *name = "xbd"; 191 return; 192 } 193 194 for (i = 0; info[i].major; i++) { 195 if (info[i].major == major) { 196 *unit = info[i].base + (minor >> info[i].shift); 197 *name = info[i].name; 198 return; 199 } 200 } 201 202 *unit = minor >> 4; 203 *name = "xbd"; 204 } 205 206 int 207 xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors, 208 int vdevice, uint16_t vdisk_info, unsigned long sector_size) 209 { 210 int unit, error = 0; 211 const char *name; 212 213 xbd_vdevice_to_unit(vdevice, &unit, &name); 214 215 sc->xbd_unit = unit; 216 217 if (strcmp(name, "xbd")) 218 device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit); 219 220 sc->xbd_disk = disk_alloc(); 221 sc->xbd_disk->d_unit = sc->xbd_unit; 222 sc->xbd_disk->d_open = xbd_open; 223 sc->xbd_disk->d_close = xbd_close; 224 sc->xbd_disk->d_ioctl = xbd_ioctl; 225 sc->xbd_disk->d_strategy = xbd_strategy; 226 sc->xbd_disk->d_dump = xbd_dump; 227 sc->xbd_disk->d_name = name; 228 sc->xbd_disk->d_drv1 = sc; 229 sc->xbd_disk->d_sectorsize = sector_size; 230 231 sc->xbd_disk->d_mediasize = sectors * sector_size; 232 sc->xbd_disk->d_maxsize = sc->xbd_max_request_size; 233 sc->xbd_disk->d_flags = 0; 234 disk_create(sc->xbd_disk, DISK_VERSION); 235 236 return error; 237 } 238 239 /************************ end VBD support *****************/ 240 241 /* 242 * Read/write routine for a buffer. Finds the proper unit, place it on 243 * the sortq and kick the controller. 244 */ 245 static void 246 xbd_strategy(struct bio *bp) 247 { 248 struct xbd_softc *sc = bp->bio_disk->d_drv1; 249 250 /* bogus disk? */ 251 if (sc == NULL) { 252 bp->bio_error = EINVAL; 253 bp->bio_flags |= BIO_ERROR; 254 bp->bio_resid = bp->bio_bcount; 255 biodone(bp); 256 return; 257 } 258 259 /* 260 * Place it in the queue of disk activities for this disk 261 */ 262 mtx_lock(&sc->xbd_io_lock); 263 264 xbd_enqueue_bio(sc, bp); 265 xbd_startio(sc); 266 267 mtx_unlock(&sc->xbd_io_lock); 268 return; 269 } 270 271 static void 272 xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm) 273 { 274 struct bio *bp; 275 276 bp = cm->cm_bp; 277 278 if (unlikely(cm->cm_status != BLKIF_RSP_OKAY)) { 279 disk_err(bp, "disk error" , -1, 0); 280 printf(" status: %x\n", cm->cm_status); 281 bp->bio_flags |= BIO_ERROR; 282 } 283 284 if (bp->bio_flags & BIO_ERROR) 285 bp->bio_error = EIO; 286 else 287 bp->bio_resid = 0; 288 289 xbd_free_command(cm); 290 biodone(bp); 291 } 292 293 // Quiesce the disk writes for a dump file before allowing the next buffer. 294 static void 295 xbd_quiesce(struct xbd_softc *sc) 296 { 297 int mtd; 298 299 // While there are outstanding requests 300 while (!TAILQ_EMPTY(&sc->xbd_cm_busy)) { 301 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd); 302 if (mtd) { 303 /* Recieved request completions, update queue. */ 304 xbd_int(sc); 305 } 306 if (!TAILQ_EMPTY(&sc->xbd_cm_busy)) { 307 /* 308 * Still pending requests, wait for the disk i/o 309 * to complete. 310 */ 311 HYPERVISOR_yield(); 312 } 313 } 314 } 315 316 /* Kernel dump function for a paravirtualized disk device */ 317 static void 318 xbd_dump_complete(struct xbd_command *cm) 319 { 320 321 xbd_enqueue_complete(cm); 322 } 323 324 static int 325 xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, 326 size_t length) 327 { 328 struct disk *dp = arg; 329 struct xbd_softc *sc = dp->d_drv1; 330 struct xbd_command *cm; 331 size_t chunk; 332 int sbp; 333 int rc = 0; 334 335 if (length <= 0) 336 return (rc); 337 338 xbd_quiesce(sc); /* All quiet on the western front. */ 339 340 /* 341 * If this lock is held, then this module is failing, and a 342 * successful kernel dump is highly unlikely anyway. 343 */ 344 mtx_lock(&sc->xbd_io_lock); 345 346 /* Split the 64KB block as needed */ 347 for (sbp=0; length > 0; sbp++) { 348 cm = xbd_dequeue_free(sc); 349 if (cm == NULL) { 350 mtx_unlock(&sc->xbd_io_lock); 351 device_printf(sc->xbd_dev, "dump: no more commands?\n"); 352 return (EBUSY); 353 } 354 355 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, 356 &cm->cm_gref_head) != 0) { 357 xbd_free_command(cm); 358 mtx_unlock(&sc->xbd_io_lock); 359 device_printf(sc->xbd_dev, "no more grant allocs?\n"); 360 return (EBUSY); 361 } 362 363 chunk = length > sc->xbd_max_request_size ? 364 sc->xbd_max_request_size : length; 365 cm->cm_data = virtual; 366 cm->cm_datalen = chunk; 367 cm->cm_operation = BLKIF_OP_WRITE; 368 cm->cm_sector_number = offset / dp->d_sectorsize; 369 cm->cm_complete = xbd_dump_complete; 370 371 xbd_enqueue_ready(cm); 372 373 length -= chunk; 374 offset += chunk; 375 virtual = (char *) virtual + chunk; 376 } 377 378 /* Tell DOM0 to do the I/O */ 379 xbd_startio(sc); 380 mtx_unlock(&sc->xbd_io_lock); 381 382 /* Poll for the completion. */ 383 xbd_quiesce(sc); /* All quite on the eastern front */ 384 385 /* If there were any errors, bail out... */ 386 while ((cm = xbd_dequeue_complete(sc)) != NULL) { 387 if (cm->cm_status != BLKIF_RSP_OKAY) { 388 device_printf(sc->xbd_dev, 389 "Dump I/O failed at sector %jd\n", 390 cm->cm_sector_number); 391 rc = EIO; 392 } 393 xbd_free_command(cm); 394 } 395 396 return (rc); 397 } 398 399 400 static int 401 xbd_probe(device_t dev) 402 { 403 404 if (!strcmp(xenbus_get_type(dev), "vbd")) { 405 device_set_desc(dev, "Virtual Block Device"); 406 device_quiet(dev); 407 return (0); 408 } 409 410 return (ENXIO); 411 } 412 413 static void 414 xbd_setup_sysctl(struct xbd_softc *xbd) 415 { 416 struct sysctl_ctx_list *sysctl_ctx = NULL; 417 struct sysctl_oid *sysctl_tree = NULL; 418 419 sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev); 420 if (sysctl_ctx == NULL) 421 return; 422 423 sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev); 424 if (sysctl_tree == NULL) 425 return; 426 427 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 428 "max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1, 429 "maximum outstanding requests (negotiated)"); 430 431 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 432 "max_request_segments", CTLFLAG_RD, 433 &xbd->xbd_max_request_segments, 0, 434 "maximum number of pages per requests (negotiated)"); 435 436 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 437 "max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0, 438 "maximum size in bytes of a request (negotiated)"); 439 440 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 441 "ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0, 442 "communication channel pages (negotiated)"); 443 } 444 445 /* 446 * Setup supplies the backend dir, virtual device. We place an event 447 * channel and shared frame entries. We watch backend to wait if it's 448 * ok. 449 */ 450 static int 451 xbd_attach(device_t dev) 452 { 453 struct xbd_softc *sc; 454 const char *name; 455 uint32_t vdevice; 456 int error; 457 int i; 458 int unit; 459 460 /* FIXME: Use dynamic device id if this is not set. */ 461 error = xs_scanf(XST_NIL, xenbus_get_node(dev), 462 "virtual-device", NULL, "%" PRIu32, &vdevice); 463 if (error) { 464 xenbus_dev_fatal(dev, error, "reading virtual-device"); 465 device_printf(dev, "Couldn't determine virtual device.\n"); 466 return (error); 467 } 468 469 xbd_vdevice_to_unit(vdevice, &unit, &name); 470 if (!strcmp(name, "xbd")) 471 device_set_unit(dev, unit); 472 473 sc = device_get_softc(dev); 474 mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF); 475 xbd_initq_free(sc); 476 xbd_initq_busy(sc); 477 xbd_initq_ready(sc); 478 xbd_initq_complete(sc); 479 xbd_initq_bio(sc); 480 for (i = 0; i < XBD_MAX_RING_PAGES; i++) 481 sc->xbd_ring_ref[i] = GRANT_INVALID_REF; 482 483 sc->xbd_dev = dev; 484 sc->xbd_vdevice = vdevice; 485 sc->xbd_connected = XBD_STATE_DISCONNECTED; 486 487 xbd_setup_sysctl(sc); 488 489 /* Wait for backend device to publish its protocol capabilities. */ 490 xenbus_set_state(dev, XenbusStateInitialising); 491 492 return (0); 493 } 494 495 static int 496 xbd_suspend(device_t dev) 497 { 498 struct xbd_softc *sc = device_get_softc(dev); 499 int retval; 500 int saved_state; 501 502 /* Prevent new requests being issued until we fix things up. */ 503 mtx_lock(&sc->xbd_io_lock); 504 saved_state = sc->xbd_connected; 505 sc->xbd_connected = XBD_STATE_SUSPENDED; 506 507 /* Wait for outstanding I/O to drain. */ 508 retval = 0; 509 while (TAILQ_EMPTY(&sc->xbd_cm_busy) == 0) { 510 if (msleep(&sc->xbd_cm_busy, &sc->xbd_io_lock, 511 PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) { 512 retval = EBUSY; 513 break; 514 } 515 } 516 mtx_unlock(&sc->xbd_io_lock); 517 518 if (retval != 0) 519 sc->xbd_connected = saved_state; 520 521 return (retval); 522 } 523 524 static int 525 xbd_resume(device_t dev) 526 { 527 struct xbd_softc *sc = device_get_softc(dev); 528 529 DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev)); 530 531 xbd_free(sc); 532 xbd_initialize(sc); 533 return (0); 534 } 535 536 static void 537 xbd_initialize(struct xbd_softc *sc) 538 { 539 const char *otherend_path; 540 const char *node_path; 541 uint32_t max_ring_page_order; 542 int error; 543 int i; 544 545 if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) { 546 /* Initialization has already been performed. */ 547 return; 548 } 549 550 /* 551 * Protocol defaults valid even if negotiation for a 552 * setting fails. 553 */ 554 max_ring_page_order = 0; 555 sc->xbd_ring_pages = 1; 556 sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; 557 sc->xbd_max_request_size = 558 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); 559 sc->xbd_max_request_blocks = 560 BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments); 561 562 /* 563 * Protocol negotiation. 564 * 565 * \note xs_gather() returns on the first encountered error, so 566 * we must use independant calls in order to guarantee 567 * we don't miss information in a sparsly populated back-end 568 * tree. 569 * 570 * \note xs_scanf() does not update variables for unmatched 571 * fields. 572 */ 573 otherend_path = xenbus_get_otherend_path(sc->xbd_dev); 574 node_path = xenbus_get_node(sc->xbd_dev); 575 576 /* Support both backend schemes for relaying ring page limits. */ 577 (void)xs_scanf(XST_NIL, otherend_path, 578 "max-ring-page-order", NULL, "%" PRIu32, 579 &max_ring_page_order); 580 sc->xbd_ring_pages = 1 << max_ring_page_order; 581 (void)xs_scanf(XST_NIL, otherend_path, 582 "max-ring-pages", NULL, "%" PRIu32, 583 &sc->xbd_ring_pages); 584 if (sc->xbd_ring_pages < 1) 585 sc->xbd_ring_pages = 1; 586 587 sc->xbd_max_requests = 588 BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE); 589 (void)xs_scanf(XST_NIL, otherend_path, 590 "max-requests", NULL, "%" PRIu32, 591 &sc->xbd_max_requests); 592 593 (void)xs_scanf(XST_NIL, otherend_path, 594 "max-request-segments", NULL, "%" PRIu32, 595 &sc->xbd_max_request_segments); 596 597 (void)xs_scanf(XST_NIL, otherend_path, 598 "max-request-size", NULL, "%" PRIu32, 599 &sc->xbd_max_request_size); 600 601 if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) { 602 device_printf(sc->xbd_dev, 603 "Back-end specified ring-pages of %u " 604 "limited to front-end limit of %zu.\n", 605 sc->xbd_ring_pages, XBD_MAX_RING_PAGES); 606 sc->xbd_ring_pages = XBD_MAX_RING_PAGES; 607 } 608 609 if (powerof2(sc->xbd_ring_pages) == 0) { 610 uint32_t new_page_limit; 611 612 new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1); 613 device_printf(sc->xbd_dev, 614 "Back-end specified ring-pages of %u " 615 "is not a power of 2. Limited to %u.\n", 616 sc->xbd_ring_pages, new_page_limit); 617 sc->xbd_ring_pages = new_page_limit; 618 } 619 620 if (sc->xbd_max_requests > XBD_MAX_REQUESTS) { 621 device_printf(sc->xbd_dev, 622 "Back-end specified max_requests of %u " 623 "limited to front-end limit of %u.\n", 624 sc->xbd_max_requests, XBD_MAX_REQUESTS); 625 sc->xbd_max_requests = XBD_MAX_REQUESTS; 626 } 627 628 if (sc->xbd_max_request_segments > XBD_MAX_SEGMENTS_PER_REQUEST) { 629 device_printf(sc->xbd_dev, 630 "Back-end specified max_request_segments of %u " 631 "limited to front-end limit of %u.\n", 632 sc->xbd_max_request_segments, 633 XBD_MAX_SEGMENTS_PER_REQUEST); 634 sc->xbd_max_request_segments = XBD_MAX_SEGMENTS_PER_REQUEST; 635 } 636 637 if (sc->xbd_max_request_size > XBD_MAX_REQUEST_SIZE) { 638 device_printf(sc->xbd_dev, 639 "Back-end specified max_request_size of %u " 640 "limited to front-end limit of %u.\n", 641 sc->xbd_max_request_size, 642 XBD_MAX_REQUEST_SIZE); 643 sc->xbd_max_request_size = XBD_MAX_REQUEST_SIZE; 644 } 645 646 if (sc->xbd_max_request_size > 647 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)) { 648 device_printf(sc->xbd_dev, 649 "Back-end specified max_request_size of %u " 650 "limited to front-end limit of %u. (Too few segments.)\n", 651 sc->xbd_max_request_size, 652 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)); 653 sc->xbd_max_request_size = 654 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); 655 } 656 657 sc->xbd_max_request_blocks = 658 BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments); 659 660 /* Allocate datastructures based on negotiated values. */ 661 error = bus_dma_tag_create( 662 bus_get_dma_tag(sc->xbd_dev), /* parent */ 663 512, PAGE_SIZE, /* algnmnt, boundary */ 664 BUS_SPACE_MAXADDR, /* lowaddr */ 665 BUS_SPACE_MAXADDR, /* highaddr */ 666 NULL, NULL, /* filter, filterarg */ 667 sc->xbd_max_request_size, 668 sc->xbd_max_request_segments, 669 PAGE_SIZE, /* maxsegsize */ 670 BUS_DMA_ALLOCNOW, /* flags */ 671 busdma_lock_mutex, /* lockfunc */ 672 &sc->xbd_io_lock, /* lockarg */ 673 &sc->xbd_io_dmat); 674 if (error != 0) { 675 xenbus_dev_fatal(sc->xbd_dev, error, 676 "Cannot allocate parent DMA tag\n"); 677 return; 678 } 679 680 /* Per-transaction data allocation. */ 681 sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests, 682 M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); 683 if (sc->xbd_shadow == NULL) { 684 bus_dma_tag_destroy(sc->xbd_io_dmat); 685 xenbus_dev_fatal(sc->xbd_dev, error, 686 "Cannot allocate request structures\n"); 687 return; 688 } 689 690 for (i = 0; i < sc->xbd_max_requests; i++) { 691 struct xbd_command *cm; 692 693 cm = &sc->xbd_shadow[i]; 694 cm->cm_sg_refs = malloc( 695 sizeof(grant_ref_t) * sc->xbd_max_request_segments, 696 M_XENBLOCKFRONT, M_NOWAIT); 697 if (cm->cm_sg_refs == NULL) 698 break; 699 cm->cm_id = i; 700 cm->cm_sc = sc; 701 if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0) 702 break; 703 xbd_free_command(cm); 704 } 705 706 if (xbd_setup_ring(sc) != 0) 707 return; 708 709 /* Support both backend schemes for relaying ring page limits. */ 710 if (sc->xbd_ring_pages > 1) { 711 error = xs_printf(XST_NIL, node_path, 712 "num-ring-pages","%u", 713 sc->xbd_ring_pages); 714 if (error) { 715 xenbus_dev_fatal(sc->xbd_dev, error, 716 "writing %s/num-ring-pages", 717 node_path); 718 return; 719 } 720 721 error = xs_printf(XST_NIL, node_path, 722 "ring-page-order", "%u", 723 fls(sc->xbd_ring_pages) - 1); 724 if (error) { 725 xenbus_dev_fatal(sc->xbd_dev, error, 726 "writing %s/ring-page-order", 727 node_path); 728 return; 729 } 730 } 731 732 error = xs_printf(XST_NIL, node_path, 733 "max-requests","%u", 734 sc->xbd_max_requests); 735 if (error) { 736 xenbus_dev_fatal(sc->xbd_dev, error, 737 "writing %s/max-requests", 738 node_path); 739 return; 740 } 741 742 error = xs_printf(XST_NIL, node_path, 743 "max-request-segments","%u", 744 sc->xbd_max_request_segments); 745 if (error) { 746 xenbus_dev_fatal(sc->xbd_dev, error, 747 "writing %s/max-request-segments", 748 node_path); 749 return; 750 } 751 752 error = xs_printf(XST_NIL, node_path, 753 "max-request-size","%u", 754 sc->xbd_max_request_size); 755 if (error) { 756 xenbus_dev_fatal(sc->xbd_dev, error, 757 "writing %s/max-request-size", 758 node_path); 759 return; 760 } 761 762 error = xs_printf(XST_NIL, node_path, "event-channel", 763 "%u", irq_to_evtchn_port(sc->xbd_irq)); 764 if (error) { 765 xenbus_dev_fatal(sc->xbd_dev, error, 766 "writing %s/event-channel", 767 node_path); 768 return; 769 } 770 771 error = xs_printf(XST_NIL, node_path, "protocol", 772 "%s", XEN_IO_PROTO_ABI_NATIVE); 773 if (error) { 774 xenbus_dev_fatal(sc->xbd_dev, error, 775 "writing %s/protocol", 776 node_path); 777 return; 778 } 779 780 xenbus_set_state(sc->xbd_dev, XenbusStateInitialised); 781 } 782 783 static int 784 xbd_setup_ring(struct xbd_softc *sc) 785 { 786 blkif_sring_t *sring; 787 uintptr_t sring_page_addr; 788 int error; 789 int i; 790 791 sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT, 792 M_NOWAIT|M_ZERO); 793 if (sring == NULL) { 794 xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring"); 795 return (ENOMEM); 796 } 797 SHARED_RING_INIT(sring); 798 FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE); 799 800 for (i = 0, sring_page_addr = (uintptr_t)sring; 801 i < sc->xbd_ring_pages; 802 i++, sring_page_addr += PAGE_SIZE) { 803 804 error = xenbus_grant_ring(sc->xbd_dev, 805 (vtomach(sring_page_addr) >> PAGE_SHIFT), 806 &sc->xbd_ring_ref[i]); 807 if (error) { 808 xenbus_dev_fatal(sc->xbd_dev, error, 809 "granting ring_ref(%d)", i); 810 return (error); 811 } 812 } 813 if (sc->xbd_ring_pages == 1) { 814 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev), 815 "ring-ref", "%u", sc->xbd_ring_ref[0]); 816 if (error) { 817 xenbus_dev_fatal(sc->xbd_dev, error, 818 "writing %s/ring-ref", 819 xenbus_get_node(sc->xbd_dev)); 820 return (error); 821 } 822 } else { 823 for (i = 0; i < sc->xbd_ring_pages; i++) { 824 char ring_ref_name[]= "ring_refXX"; 825 826 snprintf(ring_ref_name, sizeof(ring_ref_name), 827 "ring-ref%u", i); 828 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev), 829 ring_ref_name, "%u", sc->xbd_ring_ref[i]); 830 if (error) { 831 xenbus_dev_fatal(sc->xbd_dev, error, 832 "writing %s/%s", 833 xenbus_get_node(sc->xbd_dev), 834 ring_ref_name); 835 return (error); 836 } 837 } 838 } 839 840 error = bind_listening_port_to_irqhandler( 841 xenbus_get_otherend_id(sc->xbd_dev), 842 "xbd", (driver_intr_t *)xbd_int, sc, 843 INTR_TYPE_BIO | INTR_MPSAFE, &sc->xbd_irq); 844 if (error) { 845 xenbus_dev_fatal(sc->xbd_dev, error, 846 "bind_evtchn_to_irqhandler failed"); 847 return (error); 848 } 849 850 return (0); 851 } 852 853 /** 854 * Callback received when the backend's state changes. 855 */ 856 static void 857 xbd_backend_changed(device_t dev, XenbusState backend_state) 858 { 859 struct xbd_softc *sc = device_get_softc(dev); 860 861 DPRINTK("backend_state=%d\n", backend_state); 862 863 switch (backend_state) { 864 case XenbusStateUnknown: 865 case XenbusStateInitialising: 866 case XenbusStateReconfigured: 867 case XenbusStateReconfiguring: 868 case XenbusStateClosed: 869 break; 870 871 case XenbusStateInitWait: 872 case XenbusStateInitialised: 873 xbd_initialize(sc); 874 break; 875 876 case XenbusStateConnected: 877 xbd_initialize(sc); 878 xbd_connect(sc); 879 break; 880 881 case XenbusStateClosing: 882 if (sc->xbd_users > 0) 883 xenbus_dev_error(dev, -EBUSY, 884 "Device in use; refusing to close"); 885 else 886 xbd_closing(dev); 887 break; 888 } 889 } 890 891 /* 892 * Invoked when the backend is finally 'ready' (and has published 893 * the details about the physical device - #sectors, size, etc). 894 */ 895 static void 896 xbd_connect(struct xbd_softc *sc) 897 { 898 device_t dev = sc->xbd_dev; 899 unsigned long sectors, sector_size; 900 unsigned int binfo; 901 int err, feature_barrier; 902 903 if ((sc->xbd_connected == XBD_STATE_CONNECTED) || 904 (sc->xbd_connected == XBD_STATE_SUSPENDED)) 905 return; 906 907 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev)); 908 909 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 910 "sectors", "%lu", §ors, 911 "info", "%u", &binfo, 912 "sector-size", "%lu", §or_size, 913 NULL); 914 if (err) { 915 xenbus_dev_fatal(dev, err, 916 "reading backend fields at %s", 917 xenbus_get_otherend_path(dev)); 918 return; 919 } 920 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), 921 "feature-barrier", "%lu", &feature_barrier, 922 NULL); 923 if (!err || feature_barrier) 924 sc->xbd_flags |= XBD_BARRIER; 925 926 if (sc->xbd_disk == NULL) { 927 device_printf(dev, "%juMB <%s> at %s", 928 (uintmax_t) sectors / (1048576 / sector_size), 929 device_get_desc(dev), 930 xenbus_get_node(dev)); 931 bus_print_child_footer(device_get_parent(dev), dev); 932 933 xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo, 934 sector_size); 935 } 936 937 (void)xenbus_set_state(dev, XenbusStateConnected); 938 939 /* Kick pending requests. */ 940 mtx_lock(&sc->xbd_io_lock); 941 sc->xbd_connected = XBD_STATE_CONNECTED; 942 xbd_startio(sc); 943 sc->xbd_flags |= XBD_READY; 944 mtx_unlock(&sc->xbd_io_lock); 945 } 946 947 /** 948 * Handle the change of state of the backend to Closing. We must delete our 949 * device-layer structures now, to ensure that writes are flushed through to 950 * the backend. Once this is done, we can switch to Closed in 951 * acknowledgement. 952 */ 953 static void 954 xbd_closing(device_t dev) 955 { 956 struct xbd_softc *sc = device_get_softc(dev); 957 958 xenbus_set_state(dev, XenbusStateClosing); 959 960 DPRINTK("xbd_closing: %s removed\n", xenbus_get_node(dev)); 961 962 if (sc->xbd_disk != NULL) { 963 disk_destroy(sc->xbd_disk); 964 sc->xbd_disk = NULL; 965 } 966 967 xenbus_set_state(dev, XenbusStateClosed); 968 } 969 970 971 static int 972 xbd_detach(device_t dev) 973 { 974 struct xbd_softc *sc = device_get_softc(dev); 975 976 DPRINTK("xbd_remove: %s removed\n", xenbus_get_node(dev)); 977 978 xbd_free(sc); 979 mtx_destroy(&sc->xbd_io_lock); 980 981 return 0; 982 } 983 984 985 static inline void 986 flush_requests(struct xbd_softc *sc) 987 { 988 int notify; 989 990 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify); 991 992 if (notify) 993 notify_remote_via_irq(sc->xbd_irq); 994 } 995 996 static void 997 xbd_restart_queue_callback(void *arg) 998 { 999 struct xbd_softc *sc = arg; 1000 1001 mtx_lock(&sc->xbd_io_lock); 1002 1003 xbd_startio(sc); 1004 1005 mtx_unlock(&sc->xbd_io_lock); 1006 } 1007 1008 static int 1009 xbd_open(struct disk *dp) 1010 { 1011 struct xbd_softc *sc = dp->d_drv1; 1012 1013 if (sc == NULL) { 1014 printf("xb%d: not found", sc->xbd_unit); 1015 return (ENXIO); 1016 } 1017 1018 sc->xbd_flags |= XBD_OPEN; 1019 sc->xbd_users++; 1020 return (0); 1021 } 1022 1023 static int 1024 xbd_close(struct disk *dp) 1025 { 1026 struct xbd_softc *sc = dp->d_drv1; 1027 1028 if (sc == NULL) 1029 return (ENXIO); 1030 sc->xbd_flags &= ~XBD_OPEN; 1031 if (--(sc->xbd_users) == 0) { 1032 /* 1033 * Check whether we have been instructed to close. We will 1034 * have ignored this request initially, as the device was 1035 * still mounted. 1036 */ 1037 if (xenbus_get_otherend_state(sc->xbd_dev) == 1038 XenbusStateClosing) 1039 xbd_closing(sc->xbd_dev); 1040 } 1041 return (0); 1042 } 1043 1044 static int 1045 xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) 1046 { 1047 struct xbd_softc *sc = dp->d_drv1; 1048 1049 if (sc == NULL) 1050 return (ENXIO); 1051 1052 return (ENOTTY); 1053 } 1054 1055 static void 1056 xbd_free_command(struct xbd_command *cm) 1057 { 1058 1059 KASSERT((cm->cm_flags & XBD_ON_XBDQ_MASK) == 0, 1060 ("Freeing command that is still on a queue\n")); 1061 1062 cm->cm_flags = 0; 1063 cm->cm_bp = NULL; 1064 cm->cm_complete = NULL; 1065 xbd_enqueue_free(cm); 1066 } 1067 1068 /* 1069 * xbd_queue_request 1070 * 1071 * request block io 1072 * 1073 * id: for guest use only. 1074 * operation: BLKIF_OP_{READ,WRITE,PROBE} 1075 * buffer: buffer to read/write into. this should be a 1076 * virtual address in the guest os. 1077 */ 1078 static struct xbd_command * 1079 xbd_bio_command(struct xbd_softc *sc) 1080 { 1081 struct xbd_command *cm; 1082 struct bio *bp; 1083 1084 if (unlikely(sc->xbd_connected != XBD_STATE_CONNECTED)) 1085 return (NULL); 1086 1087 bp = xbd_dequeue_bio(sc); 1088 if (bp == NULL) 1089 return (NULL); 1090 1091 if ((cm = xbd_dequeue_free(sc)) == NULL) { 1092 xbd_requeue_bio(sc, bp); 1093 return (NULL); 1094 } 1095 1096 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, 1097 &cm->cm_gref_head) != 0) { 1098 gnttab_request_free_callback(&sc->xbd_callback, 1099 xbd_restart_queue_callback, sc, 1100 sc->xbd_max_request_segments); 1101 xbd_requeue_bio(sc, bp); 1102 xbd_enqueue_free(cm); 1103 sc->xbd_flags |= XBD_FROZEN; 1104 return (NULL); 1105 } 1106 1107 cm->cm_bp = bp; 1108 cm->cm_data = bp->bio_data; 1109 cm->cm_datalen = bp->bio_bcount; 1110 cm->cm_operation = (bp->bio_cmd == BIO_READ) ? 1111 BLKIF_OP_READ : BLKIF_OP_WRITE; 1112 cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno; 1113 1114 return (cm); 1115 } 1116 1117 static int 1118 xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm) 1119 { 1120 int error; 1121 1122 error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, cm->cm_data, 1123 cm->cm_datalen, xbd_queue_cb, cm, 0); 1124 if (error == EINPROGRESS) { 1125 printf("EINPROGRESS\n"); 1126 sc->xbd_flags |= XBD_FROZEN; 1127 cm->cm_flags |= XBD_CMD_FROZEN; 1128 return (0); 1129 } 1130 1131 return (error); 1132 } 1133 1134 static void 1135 xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1136 { 1137 struct xbd_softc *sc; 1138 struct xbd_command *cm; 1139 blkif_request_t *ring_req; 1140 struct blkif_request_segment *sg; 1141 struct blkif_request_segment *last_block_sg; 1142 grant_ref_t *sg_ref; 1143 vm_paddr_t buffer_ma; 1144 uint64_t fsect, lsect; 1145 int ref; 1146 int op; 1147 int block_segs; 1148 1149 cm = arg; 1150 sc = cm->cm_sc; 1151 1152 if (error) { 1153 printf("error %d in xbd_queue_cb\n", error); 1154 cm->cm_bp->bio_error = EIO; 1155 biodone(cm->cm_bp); 1156 xbd_free_command(cm); 1157 return; 1158 } 1159 1160 /* Fill out a communications ring structure. */ 1161 ring_req = RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); 1162 sc->xbd_ring.req_prod_pvt++; 1163 ring_req->id = cm->cm_id; 1164 ring_req->operation = cm->cm_operation; 1165 ring_req->sector_number = cm->cm_sector_number; 1166 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; 1167 ring_req->nr_segments = nsegs; 1168 cm->cm_nseg = nsegs; 1169 1170 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK); 1171 sg = ring_req->seg; 1172 last_block_sg = sg + block_segs; 1173 sg_ref = cm->cm_sg_refs; 1174 1175 while (1) { 1176 1177 while (sg < last_block_sg) { 1178 buffer_ma = segs->ds_addr; 1179 fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; 1180 lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; 1181 1182 KASSERT(lsect <= 7, ("XEN disk driver data cannot " 1183 "cross a page boundary")); 1184 1185 /* install a grant reference. */ 1186 ref = gnttab_claim_grant_reference(&cm->cm_gref_head); 1187 1188 /* 1189 * GNTTAB_LIST_END == 0xffffffff, but it is private 1190 * to gnttab.c. 1191 */ 1192 KASSERT(ref != ~0, ("grant_reference failed")); 1193 1194 gnttab_grant_foreign_access_ref( 1195 ref, 1196 xenbus_get_otherend_id(sc->xbd_dev), 1197 buffer_ma >> PAGE_SHIFT, 1198 ring_req->operation == BLKIF_OP_WRITE); 1199 1200 *sg_ref = ref; 1201 *sg = (struct blkif_request_segment) { 1202 .gref = ref, 1203 .first_sect = fsect, 1204 .last_sect = lsect 1205 }; 1206 sg++; 1207 sg_ref++; 1208 segs++; 1209 nsegs--; 1210 } 1211 block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK); 1212 if (block_segs == 0) 1213 break; 1214 1215 sg = BLKRING_GET_SEG_BLOCK(&sc->xbd_ring, 1216 sc->xbd_ring.req_prod_pvt); 1217 sc->xbd_ring.req_prod_pvt++; 1218 last_block_sg = sg + block_segs; 1219 } 1220 1221 if (cm->cm_operation == BLKIF_OP_READ) 1222 op = BUS_DMASYNC_PREREAD; 1223 else if (cm->cm_operation == BLKIF_OP_WRITE) 1224 op = BUS_DMASYNC_PREWRITE; 1225 else 1226 op = 0; 1227 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); 1228 1229 gnttab_free_grant_references(cm->cm_gref_head); 1230 1231 xbd_enqueue_busy(cm); 1232 1233 /* 1234 * This flag means that we're probably executing in the busdma swi 1235 * instead of in the startio context, so an explicit flush is needed. 1236 */ 1237 if (cm->cm_flags & XBD_CMD_FROZEN) 1238 flush_requests(sc); 1239 1240 return; 1241 } 1242 1243 /* 1244 * Dequeue buffers and place them in the shared communication ring. 1245 * Return when no more requests can be accepted or all buffers have 1246 * been queued. 1247 * 1248 * Signal XEN once the ring has been filled out. 1249 */ 1250 static void 1251 xbd_startio(struct xbd_softc *sc) 1252 { 1253 struct xbd_command *cm; 1254 int error, queued = 0; 1255 1256 mtx_assert(&sc->xbd_io_lock, MA_OWNED); 1257 1258 if (sc->xbd_connected != XBD_STATE_CONNECTED) 1259 return; 1260 1261 while (RING_FREE_REQUESTS(&sc->xbd_ring) >= 1262 sc->xbd_max_request_blocks) { 1263 if (sc->xbd_flags & XBD_FROZEN) 1264 break; 1265 1266 cm = xbd_dequeue_ready(sc); 1267 1268 if (cm == NULL) 1269 cm = xbd_bio_command(sc); 1270 1271 if (cm == NULL) 1272 break; 1273 1274 if ((error = xbd_queue_request(sc, cm)) != 0) { 1275 printf("xbd_queue_request returned %d\n", error); 1276 break; 1277 } 1278 queued++; 1279 } 1280 1281 if (queued != 0) 1282 flush_requests(sc); 1283 } 1284 1285 static void 1286 xbd_int(void *xsc) 1287 { 1288 struct xbd_softc *sc = xsc; 1289 struct xbd_command *cm; 1290 blkif_response_t *bret; 1291 RING_IDX i, rp; 1292 int op; 1293 1294 mtx_lock(&sc->xbd_io_lock); 1295 1296 if (unlikely(sc->xbd_connected == XBD_STATE_DISCONNECTED)) { 1297 mtx_unlock(&sc->xbd_io_lock); 1298 return; 1299 } 1300 1301 again: 1302 rp = sc->xbd_ring.sring->rsp_prod; 1303 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1304 1305 for (i = sc->xbd_ring.rsp_cons; i != rp;) { 1306 bret = RING_GET_RESPONSE(&sc->xbd_ring, i); 1307 cm = &sc->xbd_shadow[bret->id]; 1308 1309 xbd_remove_busy(cm); 1310 i += xbd_completion(cm); 1311 1312 if (cm->cm_operation == BLKIF_OP_READ) 1313 op = BUS_DMASYNC_POSTREAD; 1314 else if (cm->cm_operation == BLKIF_OP_WRITE) 1315 op = BUS_DMASYNC_POSTWRITE; 1316 else 1317 op = 0; 1318 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); 1319 bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); 1320 1321 /* 1322 * If commands are completing then resources are probably 1323 * being freed as well. It's a cheap assumption even when 1324 * wrong. 1325 */ 1326 sc->xbd_flags &= ~XBD_FROZEN; 1327 1328 /* 1329 * Directly call the i/o complete routine to save an 1330 * an indirection in the common case. 1331 */ 1332 cm->cm_status = bret->status; 1333 if (cm->cm_bp) 1334 xbd_bio_complete(sc, cm); 1335 else if (cm->cm_complete != NULL) 1336 cm->cm_complete(cm); 1337 else 1338 xbd_free_command(cm); 1339 } 1340 1341 sc->xbd_ring.rsp_cons = i; 1342 1343 if (i != sc->xbd_ring.req_prod_pvt) { 1344 int more_to_do; 1345 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do); 1346 if (more_to_do) 1347 goto again; 1348 } else { 1349 sc->xbd_ring.sring->rsp_event = i + 1; 1350 } 1351 1352 xbd_startio(sc); 1353 1354 if (unlikely(sc->xbd_connected == XBD_STATE_SUSPENDED)) 1355 wakeup(&sc->xbd_cm_busy); 1356 1357 mtx_unlock(&sc->xbd_io_lock); 1358 } 1359 1360 static void 1361 xbd_free(struct xbd_softc *sc) 1362 { 1363 uint8_t *sring_page_ptr; 1364 int i; 1365 1366 /* Prevent new requests being issued until we fix things up. */ 1367 mtx_lock(&sc->xbd_io_lock); 1368 sc->xbd_connected = XBD_STATE_DISCONNECTED; 1369 mtx_unlock(&sc->xbd_io_lock); 1370 1371 /* Free resources associated with old device channel. */ 1372 if (sc->xbd_ring.sring != NULL) { 1373 sring_page_ptr = (uint8_t *)sc->xbd_ring.sring; 1374 for (i = 0; i < sc->xbd_ring_pages; i++) { 1375 grant_ref_t *ref; 1376 1377 ref = &sc->xbd_ring_ref[i]; 1378 if (*ref != GRANT_INVALID_REF) { 1379 gnttab_end_foreign_access_ref(*ref); 1380 *ref = GRANT_INVALID_REF; 1381 } 1382 sring_page_ptr += PAGE_SIZE; 1383 } 1384 free(sc->xbd_ring.sring, M_XENBLOCKFRONT); 1385 sc->xbd_ring.sring = NULL; 1386 } 1387 1388 if (sc->xbd_shadow) { 1389 1390 for (i = 0; i < sc->xbd_max_requests; i++) { 1391 struct xbd_command *cm; 1392 1393 cm = &sc->xbd_shadow[i]; 1394 if (cm->cm_sg_refs != NULL) { 1395 free(cm->cm_sg_refs, M_XENBLOCKFRONT); 1396 cm->cm_sg_refs = NULL; 1397 } 1398 1399 bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map); 1400 } 1401 free(sc->xbd_shadow, M_XENBLOCKFRONT); 1402 sc->xbd_shadow = NULL; 1403 1404 bus_dma_tag_destroy(sc->xbd_io_dmat); 1405 1406 xbd_initq_free(sc); 1407 xbd_initq_ready(sc); 1408 xbd_initq_complete(sc); 1409 } 1410 1411 if (sc->xbd_irq) { 1412 unbind_from_irqhandler(sc->xbd_irq); 1413 sc->xbd_irq = 0; 1414 } 1415 } 1416 1417 static int 1418 xbd_completion(struct xbd_command *cm) 1419 { 1420 gnttab_end_foreign_access_references(cm->cm_nseg, cm->cm_sg_refs); 1421 return (BLKIF_SEGS_TO_BLOCKS(cm->cm_nseg)); 1422 } 1423 1424 /* ** Driver registration ** */ 1425 static device_method_t xbd_methods[] = { 1426 /* Device interface */ 1427 DEVMETHOD(device_probe, xbd_probe), 1428 DEVMETHOD(device_attach, xbd_attach), 1429 DEVMETHOD(device_detach, xbd_detach), 1430 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1431 DEVMETHOD(device_suspend, xbd_suspend), 1432 DEVMETHOD(device_resume, xbd_resume), 1433 1434 /* Xenbus interface */ 1435 DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed), 1436 1437 { 0, 0 } 1438 }; 1439 1440 static driver_t xbd_driver = { 1441 "xbd", 1442 xbd_methods, 1443 sizeof(struct xbd_softc), 1444 }; 1445 devclass_t xbd_devclass; 1446 1447 DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0); 1448