1 /*- 2 * Copyright (c) 2009-2012 Spectra Logic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * Authors: Justin T. Gibbs (Spectra Logic Corporation) 31 * Ken Merry (Spectra Logic Corporation) 32 */ 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /** 37 * \file blkback.c 38 * 39 * \brief Device driver supporting the vending of block storage from 40 * a FreeBSD domain to other domains. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 48 #include <sys/bio.h> 49 #include <sys/bus.h> 50 #include <sys/conf.h> 51 #include <sys/devicestat.h> 52 #include <sys/disk.h> 53 #include <sys/fcntl.h> 54 #include <sys/filedesc.h> 55 #include <sys/kdb.h> 56 #include <sys/module.h> 57 #include <sys/namei.h> 58 #include <sys/proc.h> 59 #include <sys/rman.h> 60 #include <sys/taskqueue.h> 61 #include <sys/types.h> 62 #include <sys/vnode.h> 63 #include <sys/mount.h> 64 #include <sys/sysctl.h> 65 #include <sys/bitstring.h> 66 #include <sys/sdt.h> 67 68 #include <geom/geom.h> 69 70 #include <machine/_inttypes.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_extern.h> 74 #include <vm/vm_kern.h> 75 76 #include <xen/xen-os.h> 77 #include <xen/blkif.h> 78 #include <xen/gnttab.h> 79 #include <xen/xen_intr.h> 80 81 #include <xen/interface/event_channel.h> 82 #include <xen/interface/grant_table.h> 83 84 #include <xen/xenbus/xenbusvar.h> 85 86 /*--------------------------- Compile-time Tunables --------------------------*/ 87 /** 88 * The maximum number of shared memory ring pages we will allow in a 89 * negotiated block-front/back communication channel. Allow enough 90 * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd. 91 */ 92 #define XBB_MAX_RING_PAGES 32 93 94 /** 95 * The maximum number of outstanding request blocks (request headers plus 96 * additional segment blocks) we will allow in a negotiated block-front/back 97 * communication channel. 98 */ 99 #define XBB_MAX_REQUESTS \ 100 __CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES) 101 102 /** 103 * \brief Define to force all I/O to be performed on memory owned by the 104 * backend device, with a copy-in/out to the remote domain's memory. 105 * 106 * \note This option is currently required when this driver's domain is 107 * operating in HVM mode on a system using an IOMMU. 108 * 109 * This driver uses Xen's grant table API to gain access to the memory of 110 * the remote domains it serves. When our domain is operating in PV mode, 111 * the grant table mechanism directly updates our domain's page table entries 112 * to point to the physical pages of the remote domain. This scheme guarantees 113 * that blkback and the backing devices it uses can safely perform DMA 114 * operations to satisfy requests. In HVM mode, Xen may use a HW IOMMU to 115 * insure that our domain cannot DMA to pages owned by another domain. As 116 * of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant 117 * table API. For this reason, in HVM mode, we must bounce all requests into 118 * memory that is mapped into our domain at domain startup and thus has 119 * valid IOMMU mappings. 120 */ 121 #define XBB_USE_BOUNCE_BUFFERS 122 123 /** 124 * \brief Define to enable rudimentary request logging to the console. 125 */ 126 #undef XBB_DEBUG 127 128 /*---------------------------------- Macros ----------------------------------*/ 129 /** 130 * Custom malloc type for all driver allocations. 131 */ 132 static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data"); 133 134 #ifdef XBB_DEBUG 135 #define DPRINTF(fmt, args...) \ 136 printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 137 #else 138 #define DPRINTF(fmt, args...) do {} while(0) 139 #endif 140 141 /** 142 * The maximum mapped region size per request we will allow in a negotiated 143 * block-front/back communication channel. 144 */ 145 #define XBB_MAX_REQUEST_SIZE \ 146 MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) 147 148 /** 149 * The maximum number of segments (within a request header and accompanying 150 * segment blocks) per request we will allow in a negotiated block-front/back 151 * communication channel. 152 */ 153 #define XBB_MAX_SEGMENTS_PER_REQUEST \ 154 (MIN(UIO_MAXIOV, \ 155 MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ 156 (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))) 157 158 /** 159 * The maximum number of ring pages that we can allow per request list. 160 * We limit this to the maximum number of segments per request, because 161 * that is already a reasonable number of segments to aggregate. This 162 * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST, 163 * because that would leave situations where we can't dispatch even one 164 * large request. 165 */ 166 #define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST 167 168 /*--------------------------- Forward Declarations ---------------------------*/ 169 struct xbb_softc; 170 struct xbb_xen_req; 171 172 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, 173 ...) __attribute__((format(printf, 3, 4))); 174 static int xbb_shutdown(struct xbb_softc *xbb); 175 static int xbb_detach(device_t dev); 176 177 /*------------------------------ Data Structures -----------------------------*/ 178 179 STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req); 180 181 typedef enum { 182 XBB_REQLIST_NONE = 0x00, 183 XBB_REQLIST_MAPPED = 0x01 184 } xbb_reqlist_flags; 185 186 struct xbb_xen_reqlist { 187 /** 188 * Back reference to the parent block back instance for this 189 * request. Used during bio_done handling. 190 */ 191 struct xbb_softc *xbb; 192 193 /** 194 * BLKIF_OP code for this request. 195 */ 196 int operation; 197 198 /** 199 * Set to BLKIF_RSP_* to indicate request status. 200 * 201 * This field allows an error status to be recorded even if the 202 * delivery of this status must be deferred. Deferred reporting 203 * is necessary, for example, when an error is detected during 204 * completion processing of one bio when other bios for this 205 * request are still outstanding. 206 */ 207 int status; 208 209 /** 210 * Number of 512 byte sectors not transferred. 211 */ 212 int residual_512b_sectors; 213 214 /** 215 * Starting sector number of the first request in the list. 216 */ 217 off_t starting_sector_number; 218 219 /** 220 * If we're going to coalesce, the next contiguous sector would be 221 * this one. 222 */ 223 off_t next_contig_sector; 224 225 /** 226 * Number of child requests in the list. 227 */ 228 int num_children; 229 230 /** 231 * Number of I/O requests still pending on the backend. 232 */ 233 int pendcnt; 234 235 /** 236 * Total number of segments for requests in the list. 237 */ 238 int nr_segments; 239 240 /** 241 * Flags for this particular request list. 242 */ 243 xbb_reqlist_flags flags; 244 245 /** 246 * Kernel virtual address space reserved for this request 247 * list structure and used to map the remote domain's pages for 248 * this I/O, into our domain's address space. 249 */ 250 uint8_t *kva; 251 252 /** 253 * Base, psuedo-physical address, corresponding to the start 254 * of this request's kva region. 255 */ 256 uint64_t gnt_base; 257 258 259 #ifdef XBB_USE_BOUNCE_BUFFERS 260 /** 261 * Pre-allocated domain local memory used to proxy remote 262 * domain memory during I/O operations. 263 */ 264 uint8_t *bounce; 265 #endif 266 267 /** 268 * Array of grant handles (one per page) used to map this request. 269 */ 270 grant_handle_t *gnt_handles; 271 272 /** 273 * Device statistics request ordering type (ordered or simple). 274 */ 275 devstat_tag_type ds_tag_type; 276 277 /** 278 * Device statistics request type (read, write, no_data). 279 */ 280 devstat_trans_flags ds_trans_type; 281 282 /** 283 * The start time for this request. 284 */ 285 struct bintime ds_t0; 286 287 /** 288 * Linked list of contiguous requests with the same operation type. 289 */ 290 struct xbb_xen_req_list contig_req_list; 291 292 /** 293 * Linked list links used to aggregate idle requests in the 294 * request list free pool (xbb->reqlist_free_stailq) and pending 295 * requests waiting for execution (xbb->reqlist_pending_stailq). 296 */ 297 STAILQ_ENTRY(xbb_xen_reqlist) links; 298 }; 299 300 STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist); 301 302 /** 303 * \brief Object tracking an in-flight I/O from a Xen VBD consumer. 304 */ 305 struct xbb_xen_req { 306 /** 307 * Linked list links used to aggregate requests into a reqlist 308 * and to store them in the request free pool. 309 */ 310 STAILQ_ENTRY(xbb_xen_req) links; 311 312 /** 313 * The remote domain's identifier for this I/O request. 314 */ 315 uint64_t id; 316 317 /** 318 * The number of pages currently mapped for this request. 319 */ 320 int nr_pages; 321 322 /** 323 * The number of 512 byte sectors comprising this requests. 324 */ 325 int nr_512b_sectors; 326 327 /** 328 * BLKIF_OP code for this request. 329 */ 330 int operation; 331 332 /** 333 * Storage used for non-native ring requests. 334 */ 335 blkif_request_t ring_req_storage; 336 337 /** 338 * Pointer to the Xen request in the ring. 339 */ 340 blkif_request_t *ring_req; 341 342 /** 343 * Consumer index for this request. 344 */ 345 RING_IDX req_ring_idx; 346 347 /** 348 * The start time for this request. 349 */ 350 struct bintime ds_t0; 351 352 /** 353 * Pointer back to our parent request list. 354 */ 355 struct xbb_xen_reqlist *reqlist; 356 }; 357 SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req); 358 359 /** 360 * \brief Configuration data for the shared memory request ring 361 * used to communicate with the front-end client of this 362 * this driver. 363 */ 364 struct xbb_ring_config { 365 /** KVA address where ring memory is mapped. */ 366 vm_offset_t va; 367 368 /** The pseudo-physical address where ring memory is mapped.*/ 369 uint64_t gnt_addr; 370 371 /** 372 * Grant table handles, one per-ring page, returned by the 373 * hyperpervisor upon mapping of the ring and required to 374 * unmap it when a connection is torn down. 375 */ 376 grant_handle_t handle[XBB_MAX_RING_PAGES]; 377 378 /** 379 * The device bus address returned by the hypervisor when 380 * mapping the ring and required to unmap it when a connection 381 * is torn down. 382 */ 383 uint64_t bus_addr[XBB_MAX_RING_PAGES]; 384 385 /** The number of ring pages mapped for the current connection. */ 386 u_int ring_pages; 387 388 /** 389 * The grant references, one per-ring page, supplied by the 390 * front-end, allowing us to reference the ring pages in the 391 * front-end's domain and to map these pages into our own domain. 392 */ 393 grant_ref_t ring_ref[XBB_MAX_RING_PAGES]; 394 395 /** The interrupt driven even channel used to signal ring events. */ 396 evtchn_port_t evtchn; 397 }; 398 399 /** 400 * Per-instance connection state flags. 401 */ 402 typedef enum 403 { 404 /** 405 * The front-end requested a read-only mount of the 406 * back-end device/file. 407 */ 408 XBBF_READ_ONLY = 0x01, 409 410 /** Communication with the front-end has been established. */ 411 XBBF_RING_CONNECTED = 0x02, 412 413 /** 414 * Front-end requests exist in the ring and are waiting for 415 * xbb_xen_req objects to free up. 416 */ 417 XBBF_RESOURCE_SHORTAGE = 0x04, 418 419 /** Connection teardown in progress. */ 420 XBBF_SHUTDOWN = 0x08, 421 422 /** A thread is already performing shutdown processing. */ 423 XBBF_IN_SHUTDOWN = 0x10 424 } xbb_flag_t; 425 426 /** Backend device type. */ 427 typedef enum { 428 /** Backend type unknown. */ 429 XBB_TYPE_NONE = 0x00, 430 431 /** 432 * Backend type disk (access via cdev switch 433 * strategy routine). 434 */ 435 XBB_TYPE_DISK = 0x01, 436 437 /** Backend type file (access vnode operations.). */ 438 XBB_TYPE_FILE = 0x02 439 } xbb_type; 440 441 /** 442 * \brief Structure used to memoize information about a per-request 443 * scatter-gather list. 444 * 445 * The chief benefit of using this data structure is it avoids having 446 * to reparse the possibly discontiguous S/G list in the original 447 * request. Due to the way that the mapping of the memory backing an 448 * I/O transaction is handled by Xen, a second pass is unavoidable. 449 * At least this way the second walk is a simple array traversal. 450 * 451 * \note A single Scatter/Gather element in the block interface covers 452 * at most 1 machine page. In this context a sector (blkif 453 * nomenclature, not what I'd choose) is a 512b aligned unit 454 * of mapping within the machine page referenced by an S/G 455 * element. 456 */ 457 struct xbb_sg { 458 /** The number of 512b data chunks mapped in this S/G element. */ 459 int16_t nsect; 460 461 /** 462 * The index (0 based) of the first 512b data chunk mapped 463 * in this S/G element. 464 */ 465 uint8_t first_sect; 466 467 /** 468 * The index (0 based) of the last 512b data chunk mapped 469 * in this S/G element. 470 */ 471 uint8_t last_sect; 472 }; 473 474 /** 475 * Character device backend specific configuration data. 476 */ 477 struct xbb_dev_data { 478 /** Cdev used for device backend access. */ 479 struct cdev *cdev; 480 481 /** Cdev switch used for device backend access. */ 482 struct cdevsw *csw; 483 484 /** Used to hold a reference on opened cdev backend devices. */ 485 int dev_ref; 486 }; 487 488 /** 489 * File backend specific configuration data. 490 */ 491 struct xbb_file_data { 492 /** Credentials to use for vnode backed (file based) I/O. */ 493 struct ucred *cred; 494 495 /** 496 * \brief Array of io vectors used to process file based I/O. 497 * 498 * Only a single file based request is outstanding per-xbb instance, 499 * so we only need one of these. 500 */ 501 struct iovec xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; 502 #ifdef XBB_USE_BOUNCE_BUFFERS 503 504 /** 505 * \brief Array of io vectors used to handle bouncing of file reads. 506 * 507 * Vnode operations are free to modify uio data during their 508 * exectuion. In the case of a read with bounce buffering active, 509 * we need some of the data from the original uio in order to 510 * bounce-out the read data. This array serves as the temporary 511 * storage for this saved data. 512 */ 513 struct iovec saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; 514 515 /** 516 * \brief Array of memoized bounce buffer kva offsets used 517 * in the file based backend. 518 * 519 * Due to the way that the mapping of the memory backing an 520 * I/O transaction is handled by Xen, a second pass through 521 * the request sg elements is unavoidable. We memoize the computed 522 * bounce address here to reduce the cost of the second walk. 523 */ 524 void *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST]; 525 #endif /* XBB_USE_BOUNCE_BUFFERS */ 526 }; 527 528 /** 529 * Collection of backend type specific data. 530 */ 531 union xbb_backend_data { 532 struct xbb_dev_data dev; 533 struct xbb_file_data file; 534 }; 535 536 /** 537 * Function signature of backend specific I/O handlers. 538 */ 539 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb, 540 struct xbb_xen_reqlist *reqlist, int operation, 541 int flags); 542 543 /** 544 * Per-instance configuration data. 545 */ 546 struct xbb_softc { 547 548 /** 549 * Task-queue used to process I/O requests. 550 */ 551 struct taskqueue *io_taskqueue; 552 553 /** 554 * Single "run the request queue" task enqueued 555 * on io_taskqueue. 556 */ 557 struct task io_task; 558 559 /** Device type for this instance. */ 560 xbb_type device_type; 561 562 /** NewBus device corresponding to this instance. */ 563 device_t dev; 564 565 /** Backend specific dispatch routine for this instance. */ 566 xbb_dispatch_t dispatch_io; 567 568 /** The number of requests outstanding on the backend device/file. */ 569 int active_request_count; 570 571 /** Free pool of request tracking structures. */ 572 struct xbb_xen_req_list request_free_stailq; 573 574 /** Array, sized at connection time, of request tracking structures. */ 575 struct xbb_xen_req *requests; 576 577 /** Free pool of request list structures. */ 578 struct xbb_xen_reqlist_list reqlist_free_stailq; 579 580 /** List of pending request lists awaiting execution. */ 581 struct xbb_xen_reqlist_list reqlist_pending_stailq; 582 583 /** Array, sized at connection time, of request list structures. */ 584 struct xbb_xen_reqlist *request_lists; 585 586 /** 587 * Global pool of kva used for mapping remote domain ring 588 * and I/O transaction data. 589 */ 590 vm_offset_t kva; 591 592 /** Psuedo-physical address corresponding to kva. */ 593 uint64_t gnt_base_addr; 594 595 /** The size of the global kva pool. */ 596 int kva_size; 597 598 /** The size of the KVA area used for request lists. */ 599 int reqlist_kva_size; 600 601 /** The number of pages of KVA used for request lists */ 602 int reqlist_kva_pages; 603 604 /** Bitmap of free KVA pages */ 605 bitstr_t *kva_free; 606 607 /** 608 * \brief Cached value of the front-end's domain id. 609 * 610 * This value is used at once for each mapped page in 611 * a transaction. We cache it to avoid incuring the 612 * cost of an ivar access every time this is needed. 613 */ 614 domid_t otherend_id; 615 616 /** 617 * \brief The blkif protocol abi in effect. 618 * 619 * There are situations where the back and front ends can 620 * have a different, native abi (e.g. intel x86_64 and 621 * 32bit x86 domains on the same machine). The back-end 622 * always accomodates the front-end's native abi. That 623 * value is pulled from the XenStore and recorded here. 624 */ 625 int abi; 626 627 /** 628 * \brief The maximum number of requests and request lists allowed 629 * to be in flight at a time. 630 * 631 * This value is negotiated via the XenStore. 632 */ 633 u_int max_requests; 634 635 /** 636 * \brief The maximum number of segments (1 page per segment) 637 * that can be mapped by a request. 638 * 639 * This value is negotiated via the XenStore. 640 */ 641 u_int max_request_segments; 642 643 /** 644 * \brief Maximum number of segments per request list. 645 * 646 * This value is derived from and will generally be larger than 647 * max_request_segments. 648 */ 649 u_int max_reqlist_segments; 650 651 /** 652 * The maximum size of any request to this back-end 653 * device. 654 * 655 * This value is negotiated via the XenStore. 656 */ 657 u_int max_request_size; 658 659 /** 660 * The maximum size of any request list. This is derived directly 661 * from max_reqlist_segments. 662 */ 663 u_int max_reqlist_size; 664 665 /** Various configuration and state bit flags. */ 666 xbb_flag_t flags; 667 668 /** Ring mapping and interrupt configuration data. */ 669 struct xbb_ring_config ring_config; 670 671 /** Runtime, cross-abi safe, structures for ring access. */ 672 blkif_back_rings_t rings; 673 674 /** IRQ mapping for the communication ring event channel. */ 675 xen_intr_handle_t xen_intr_handle; 676 677 /** 678 * \brief Backend access mode flags (e.g. write, or read-only). 679 * 680 * This value is passed to us by the front-end via the XenStore. 681 */ 682 char *dev_mode; 683 684 /** 685 * \brief Backend device type (e.g. "disk", "cdrom", "floppy"). 686 * 687 * This value is passed to us by the front-end via the XenStore. 688 * Currently unused. 689 */ 690 char *dev_type; 691 692 /** 693 * \brief Backend device/file identifier. 694 * 695 * This value is passed to us by the front-end via the XenStore. 696 * We expect this to be a POSIX path indicating the file or 697 * device to open. 698 */ 699 char *dev_name; 700 701 /** 702 * Vnode corresponding to the backend device node or file 703 * we are acessing. 704 */ 705 struct vnode *vn; 706 707 union xbb_backend_data backend; 708 709 /** The native sector size of the backend. */ 710 u_int sector_size; 711 712 /** log2 of sector_size. */ 713 u_int sector_size_shift; 714 715 /** Size in bytes of the backend device or file. */ 716 off_t media_size; 717 718 /** 719 * \brief media_size expressed in terms of the backend native 720 * sector size. 721 * 722 * (e.g. xbb->media_size >> xbb->sector_size_shift). 723 */ 724 uint64_t media_num_sectors; 725 726 /** 727 * \brief Array of memoized scatter gather data computed during the 728 * conversion of blkif ring requests to internal xbb_xen_req 729 * structures. 730 * 731 * Ring processing is serialized so we only need one of these. 732 */ 733 struct xbb_sg xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST]; 734 735 /** 736 * Temporary grant table map used in xbb_dispatch_io(). When 737 * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the 738 * stack could cause a stack overflow. 739 */ 740 struct gnttab_map_grant_ref maps[XBB_MAX_SEGMENTS_PER_REQLIST]; 741 742 /** Mutex protecting per-instance data. */ 743 struct mtx lock; 744 745 /** 746 * Resource representing allocated physical address space 747 * associated with our per-instance kva region. 748 */ 749 struct resource *pseudo_phys_res; 750 751 /** Resource id for allocated physical address space. */ 752 int pseudo_phys_res_id; 753 754 /** 755 * I/O statistics from BlockBack dispatch down. These are 756 * coalesced requests, and we start them right before execution. 757 */ 758 struct devstat *xbb_stats; 759 760 /** 761 * I/O statistics coming into BlockBack. These are the requests as 762 * we get them from BlockFront. They are started as soon as we 763 * receive a request, and completed when the I/O is complete. 764 */ 765 struct devstat *xbb_stats_in; 766 767 /** Disable sending flush to the backend */ 768 int disable_flush; 769 770 /** Send a real flush for every N flush requests */ 771 int flush_interval; 772 773 /** Count of flush requests in the interval */ 774 int flush_count; 775 776 /** Don't coalesce requests if this is set */ 777 int no_coalesce_reqs; 778 779 /** Number of requests we have received */ 780 uint64_t reqs_received; 781 782 /** Number of requests we have completed*/ 783 uint64_t reqs_completed; 784 785 /** Number of requests we queued but not pushed*/ 786 uint64_t reqs_queued_for_completion; 787 788 /** Number of requests we completed with an error status*/ 789 uint64_t reqs_completed_with_error; 790 791 /** How many forced dispatches (i.e. without coalescing) have happend */ 792 uint64_t forced_dispatch; 793 794 /** How many normal dispatches have happend */ 795 uint64_t normal_dispatch; 796 797 /** How many total dispatches have happend */ 798 uint64_t total_dispatch; 799 800 /** How many times we have run out of KVA */ 801 uint64_t kva_shortages; 802 803 /** How many times we have run out of request structures */ 804 uint64_t request_shortages; 805 }; 806 807 /*---------------------------- Request Processing ----------------------------*/ 808 /** 809 * Allocate an internal transaction tracking structure from the free pool. 810 * 811 * \param xbb Per-instance xbb configuration structure. 812 * 813 * \return On success, a pointer to the allocated xbb_xen_req structure. 814 * Otherwise NULL. 815 */ 816 static inline struct xbb_xen_req * 817 xbb_get_req(struct xbb_softc *xbb) 818 { 819 struct xbb_xen_req *req; 820 821 req = NULL; 822 823 mtx_assert(&xbb->lock, MA_OWNED); 824 825 if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) { 826 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links); 827 xbb->active_request_count++; 828 } 829 830 return (req); 831 } 832 833 /** 834 * Return an allocated transaction tracking structure to the free pool. 835 * 836 * \param xbb Per-instance xbb configuration structure. 837 * \param req The request structure to free. 838 */ 839 static inline void 840 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req) 841 { 842 mtx_assert(&xbb->lock, MA_OWNED); 843 844 STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links); 845 xbb->active_request_count--; 846 847 KASSERT(xbb->active_request_count >= 0, 848 ("xbb_release_req: negative active count")); 849 } 850 851 /** 852 * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool. 853 * 854 * \param xbb Per-instance xbb configuration structure. 855 * \param req_list The list of requests to free. 856 * \param nreqs The number of items in the list. 857 */ 858 static inline void 859 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list, 860 int nreqs) 861 { 862 mtx_assert(&xbb->lock, MA_OWNED); 863 864 STAILQ_CONCAT(&xbb->request_free_stailq, req_list); 865 xbb->active_request_count -= nreqs; 866 867 KASSERT(xbb->active_request_count >= 0, 868 ("xbb_release_reqs: negative active count")); 869 } 870 871 /** 872 * Given a page index and 512b sector offset within that page, 873 * calculate an offset into a request's kva region. 874 * 875 * \param reqlist The request structure whose kva region will be accessed. 876 * \param pagenr The page index used to compute the kva offset. 877 * \param sector The 512b sector index used to compute the page relative 878 * kva offset. 879 * 880 * \return The computed global KVA offset. 881 */ 882 static inline uint8_t * 883 xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 884 { 885 return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9)); 886 } 887 888 #ifdef XBB_USE_BOUNCE_BUFFERS 889 /** 890 * Given a page index and 512b sector offset within that page, 891 * calculate an offset into a request's local bounce memory region. 892 * 893 * \param reqlist The request structure whose bounce region will be accessed. 894 * \param pagenr The page index used to compute the bounce offset. 895 * \param sector The 512b sector index used to compute the page relative 896 * bounce offset. 897 * 898 * \return The computed global bounce buffer address. 899 */ 900 static inline uint8_t * 901 xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 902 { 903 return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9)); 904 } 905 #endif 906 907 /** 908 * Given a page number and 512b sector offset within that page, 909 * calculate an offset into the request's memory region that the 910 * underlying backend device/file should use for I/O. 911 * 912 * \param reqlist The request structure whose I/O region will be accessed. 913 * \param pagenr The page index used to compute the I/O offset. 914 * \param sector The 512b sector index used to compute the page relative 915 * I/O offset. 916 * 917 * \return The computed global I/O address. 918 * 919 * Depending on configuration, this will either be a local bounce buffer 920 * or a pointer to the memory mapped in from the front-end domain for 921 * this request. 922 */ 923 static inline uint8_t * 924 xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 925 { 926 #ifdef XBB_USE_BOUNCE_BUFFERS 927 return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector)); 928 #else 929 return (xbb_reqlist_vaddr(reqlist, pagenr, sector)); 930 #endif 931 } 932 933 /** 934 * Given a page index and 512b sector offset within that page, calculate 935 * an offset into the local psuedo-physical address space used to map a 936 * front-end's request data into a request. 937 * 938 * \param reqlist The request list structure whose pseudo-physical region 939 * will be accessed. 940 * \param pagenr The page index used to compute the pseudo-physical offset. 941 * \param sector The 512b sector index used to compute the page relative 942 * pseudo-physical offset. 943 * 944 * \return The computed global pseudo-phsyical address. 945 * 946 * Depending on configuration, this will either be a local bounce buffer 947 * or a pointer to the memory mapped in from the front-end domain for 948 * this request. 949 */ 950 static inline uintptr_t 951 xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 952 { 953 struct xbb_softc *xbb; 954 955 xbb = reqlist->xbb; 956 957 return ((uintptr_t)(xbb->gnt_base_addr + 958 (uintptr_t)(reqlist->kva - xbb->kva) + 959 (PAGE_SIZE * pagenr) + (sector << 9))); 960 } 961 962 /** 963 * Get Kernel Virtual Address space for mapping requests. 964 * 965 * \param xbb Per-instance xbb configuration structure. 966 * \param nr_pages Number of pages needed. 967 * \param check_only If set, check for free KVA but don't allocate it. 968 * \param have_lock If set, xbb lock is already held. 969 * 970 * \return On success, a pointer to the allocated KVA region. Otherwise NULL. 971 * 972 * Note: This should be unnecessary once we have either chaining or 973 * scatter/gather support for struct bio. At that point we'll be able to 974 * put multiple addresses and lengths in one bio/bio chain and won't need 975 * to map everything into one virtual segment. 976 */ 977 static uint8_t * 978 xbb_get_kva(struct xbb_softc *xbb, int nr_pages) 979 { 980 intptr_t first_clear; 981 intptr_t num_clear; 982 uint8_t *free_kva; 983 int i; 984 985 KASSERT(nr_pages != 0, ("xbb_get_kva of zero length")); 986 987 first_clear = 0; 988 free_kva = NULL; 989 990 mtx_lock(&xbb->lock); 991 992 /* 993 * Look for the first available page. If there are none, we're done. 994 */ 995 bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear); 996 997 if (first_clear == -1) 998 goto bailout; 999 1000 /* 1001 * Starting at the first available page, look for consecutive free 1002 * pages that will satisfy the user's request. 1003 */ 1004 for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) { 1005 /* 1006 * If this is true, the page is used, so we have to reset 1007 * the number of clear pages and the first clear page 1008 * (since it pointed to a region with an insufficient number 1009 * of clear pages). 1010 */ 1011 if (bit_test(xbb->kva_free, i)) { 1012 num_clear = 0; 1013 first_clear = -1; 1014 continue; 1015 } 1016 1017 if (first_clear == -1) 1018 first_clear = i; 1019 1020 /* 1021 * If this is true, we've found a large enough free region 1022 * to satisfy the request. 1023 */ 1024 if (++num_clear == nr_pages) { 1025 1026 bit_nset(xbb->kva_free, first_clear, 1027 first_clear + nr_pages - 1); 1028 1029 free_kva = xbb->kva + 1030 (uint8_t *)(first_clear * PAGE_SIZE); 1031 1032 KASSERT(free_kva >= (uint8_t *)xbb->kva && 1033 free_kva + (nr_pages * PAGE_SIZE) <= 1034 (uint8_t *)xbb->ring_config.va, 1035 ("Free KVA %p len %d out of range, " 1036 "kva = %#jx, ring VA = %#jx\n", free_kva, 1037 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva, 1038 (uintmax_t)xbb->ring_config.va)); 1039 break; 1040 } 1041 } 1042 1043 bailout: 1044 1045 if (free_kva == NULL) { 1046 xbb->flags |= XBBF_RESOURCE_SHORTAGE; 1047 xbb->kva_shortages++; 1048 } 1049 1050 mtx_unlock(&xbb->lock); 1051 1052 return (free_kva); 1053 } 1054 1055 /** 1056 * Free allocated KVA. 1057 * 1058 * \param xbb Per-instance xbb configuration structure. 1059 * \param kva_ptr Pointer to allocated KVA region. 1060 * \param nr_pages Number of pages in the KVA region. 1061 */ 1062 static void 1063 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages) 1064 { 1065 intptr_t start_page; 1066 1067 mtx_assert(&xbb->lock, MA_OWNED); 1068 1069 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT; 1070 bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1); 1071 1072 } 1073 1074 /** 1075 * Unmap the front-end pages associated with this I/O request. 1076 * 1077 * \param req The request structure to unmap. 1078 */ 1079 static void 1080 xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist) 1081 { 1082 struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST]; 1083 u_int i; 1084 u_int invcount; 1085 int error; 1086 1087 invcount = 0; 1088 for (i = 0; i < reqlist->nr_segments; i++) { 1089 1090 if (reqlist->gnt_handles[i] == GRANT_REF_INVALID) 1091 continue; 1092 1093 unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0); 1094 unmap[invcount].dev_bus_addr = 0; 1095 unmap[invcount].handle = reqlist->gnt_handles[i]; 1096 reqlist->gnt_handles[i] = GRANT_REF_INVALID; 1097 invcount++; 1098 } 1099 1100 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, 1101 unmap, invcount); 1102 KASSERT(error == 0, ("Grant table operation failed")); 1103 } 1104 1105 /** 1106 * Allocate an internal transaction tracking structure from the free pool. 1107 * 1108 * \param xbb Per-instance xbb configuration structure. 1109 * 1110 * \return On success, a pointer to the allocated xbb_xen_reqlist structure. 1111 * Otherwise NULL. 1112 */ 1113 static inline struct xbb_xen_reqlist * 1114 xbb_get_reqlist(struct xbb_softc *xbb) 1115 { 1116 struct xbb_xen_reqlist *reqlist; 1117 1118 reqlist = NULL; 1119 1120 mtx_assert(&xbb->lock, MA_OWNED); 1121 1122 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) { 1123 1124 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links); 1125 reqlist->flags = XBB_REQLIST_NONE; 1126 reqlist->kva = NULL; 1127 reqlist->status = BLKIF_RSP_OKAY; 1128 reqlist->residual_512b_sectors = 0; 1129 reqlist->num_children = 0; 1130 reqlist->nr_segments = 0; 1131 STAILQ_INIT(&reqlist->contig_req_list); 1132 } 1133 1134 return (reqlist); 1135 } 1136 1137 /** 1138 * Return an allocated transaction tracking structure to the free pool. 1139 * 1140 * \param xbb Per-instance xbb configuration structure. 1141 * \param req The request list structure to free. 1142 * \param wakeup If set, wakeup the work thread if freeing this reqlist 1143 * during a resource shortage condition. 1144 */ 1145 static inline void 1146 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, 1147 int wakeup) 1148 { 1149 1150 mtx_assert(&xbb->lock, MA_OWNED); 1151 1152 if (wakeup) { 1153 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE; 1154 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE; 1155 } 1156 1157 if (reqlist->kva != NULL) 1158 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments); 1159 1160 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children); 1161 1162 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); 1163 1164 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { 1165 /* 1166 * Shutdown is in progress. See if we can 1167 * progress further now that one more request 1168 * has completed and been returned to the 1169 * free pool. 1170 */ 1171 xbb_shutdown(xbb); 1172 } 1173 1174 if (wakeup != 0) 1175 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 1176 } 1177 1178 /** 1179 * Request resources and do basic request setup. 1180 * 1181 * \param xbb Per-instance xbb configuration structure. 1182 * \param reqlist Pointer to reqlist pointer. 1183 * \param ring_req Pointer to a block ring request. 1184 * \param ring_index The ring index of this request. 1185 * 1186 * \return 0 for success, non-zero for failure. 1187 */ 1188 static int 1189 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist, 1190 blkif_request_t *ring_req, RING_IDX ring_idx) 1191 { 1192 struct xbb_xen_reqlist *nreqlist; 1193 struct xbb_xen_req *nreq; 1194 1195 nreqlist = NULL; 1196 nreq = NULL; 1197 1198 mtx_lock(&xbb->lock); 1199 1200 /* 1201 * We don't allow new resources to be allocated if we're in the 1202 * process of shutting down. 1203 */ 1204 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { 1205 mtx_unlock(&xbb->lock); 1206 return (1); 1207 } 1208 1209 /* 1210 * Allocate a reqlist if the caller doesn't have one already. 1211 */ 1212 if (*reqlist == NULL) { 1213 nreqlist = xbb_get_reqlist(xbb); 1214 if (nreqlist == NULL) 1215 goto bailout_error; 1216 } 1217 1218 /* We always allocate a request. */ 1219 nreq = xbb_get_req(xbb); 1220 if (nreq == NULL) 1221 goto bailout_error; 1222 1223 mtx_unlock(&xbb->lock); 1224 1225 if (*reqlist == NULL) { 1226 *reqlist = nreqlist; 1227 nreqlist->operation = ring_req->operation; 1228 nreqlist->starting_sector_number = ring_req->sector_number; 1229 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist, 1230 links); 1231 } 1232 1233 nreq->reqlist = *reqlist; 1234 nreq->req_ring_idx = ring_idx; 1235 nreq->id = ring_req->id; 1236 nreq->operation = ring_req->operation; 1237 1238 if (xbb->abi != BLKIF_PROTOCOL_NATIVE) { 1239 bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req)); 1240 nreq->ring_req = &nreq->ring_req_storage; 1241 } else { 1242 nreq->ring_req = ring_req; 1243 } 1244 1245 binuptime(&nreq->ds_t0); 1246 devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0); 1247 STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links); 1248 (*reqlist)->num_children++; 1249 (*reqlist)->nr_segments += ring_req->nr_segments; 1250 1251 return (0); 1252 1253 bailout_error: 1254 1255 /* 1256 * We're out of resources, so set the shortage flag. The next time 1257 * a request is released, we'll try waking up the work thread to 1258 * see if we can allocate more resources. 1259 */ 1260 xbb->flags |= XBBF_RESOURCE_SHORTAGE; 1261 xbb->request_shortages++; 1262 1263 if (nreq != NULL) 1264 xbb_release_req(xbb, nreq); 1265 1266 if (nreqlist != NULL) 1267 xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0); 1268 1269 mtx_unlock(&xbb->lock); 1270 1271 return (1); 1272 } 1273 1274 /** 1275 * Create and queue a response to a blkif request. 1276 * 1277 * \param xbb Per-instance xbb configuration structure. 1278 * \param req The request structure to which to respond. 1279 * \param status The status code to report. See BLKIF_RSP_* 1280 * in sys/xen/interface/io/blkif.h. 1281 */ 1282 static void 1283 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status) 1284 { 1285 blkif_response_t *resp; 1286 1287 /* 1288 * The mutex is required here, and should be held across this call 1289 * until after the subsequent call to xbb_push_responses(). This 1290 * is to guarantee that another context won't queue responses and 1291 * push them while we're active. 1292 * 1293 * That could lead to the other end being notified of responses 1294 * before the resources have been freed on this end. The other end 1295 * would then be able to queue additional I/O, and we may run out 1296 * of resources because we haven't freed them all yet. 1297 */ 1298 mtx_assert(&xbb->lock, MA_OWNED); 1299 1300 /* 1301 * Place on the response ring for the relevant domain. 1302 * For now, only the spacing between entries is different 1303 * in the different ABIs, not the response entry layout. 1304 */ 1305 switch (xbb->abi) { 1306 case BLKIF_PROTOCOL_NATIVE: 1307 resp = RING_GET_RESPONSE(&xbb->rings.native, 1308 xbb->rings.native.rsp_prod_pvt); 1309 break; 1310 case BLKIF_PROTOCOL_X86_32: 1311 resp = (blkif_response_t *) 1312 RING_GET_RESPONSE(&xbb->rings.x86_32, 1313 xbb->rings.x86_32.rsp_prod_pvt); 1314 break; 1315 case BLKIF_PROTOCOL_X86_64: 1316 resp = (blkif_response_t *) 1317 RING_GET_RESPONSE(&xbb->rings.x86_64, 1318 xbb->rings.x86_64.rsp_prod_pvt); 1319 break; 1320 default: 1321 panic("Unexpected blkif protocol ABI."); 1322 } 1323 1324 resp->id = req->id; 1325 resp->operation = req->operation; 1326 resp->status = status; 1327 1328 if (status != BLKIF_RSP_OKAY) 1329 xbb->reqs_completed_with_error++; 1330 1331 xbb->rings.common.rsp_prod_pvt++; 1332 1333 xbb->reqs_queued_for_completion++; 1334 1335 } 1336 1337 /** 1338 * Send queued responses to blkif requests. 1339 * 1340 * \param xbb Per-instance xbb configuration structure. 1341 * \param run_taskqueue Flag that is set to 1 if the taskqueue 1342 * should be run, 0 if it does not need to be run. 1343 * \param notify Flag that is set to 1 if the other end should be 1344 * notified via irq, 0 if the other end should not be 1345 * notified. 1346 */ 1347 static void 1348 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify) 1349 { 1350 int more_to_do; 1351 1352 /* 1353 * The mutex is required here. 1354 */ 1355 mtx_assert(&xbb->lock, MA_OWNED); 1356 1357 more_to_do = 0; 1358 1359 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify); 1360 1361 if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) { 1362 1363 /* 1364 * Tail check for pending requests. Allows frontend to avoid 1365 * notifications if requests are already in flight (lower 1366 * overheads and promotes batching). 1367 */ 1368 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do); 1369 } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) { 1370 1371 more_to_do = 1; 1372 } 1373 1374 xbb->reqs_completed += xbb->reqs_queued_for_completion; 1375 xbb->reqs_queued_for_completion = 0; 1376 1377 *run_taskqueue = more_to_do; 1378 } 1379 1380 /** 1381 * Complete a request list. 1382 * 1383 * \param xbb Per-instance xbb configuration structure. 1384 * \param reqlist Allocated internal request list structure. 1385 */ 1386 static void 1387 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) 1388 { 1389 struct xbb_xen_req *nreq; 1390 off_t sectors_sent; 1391 int notify, run_taskqueue; 1392 1393 sectors_sent = 0; 1394 1395 if (reqlist->flags & XBB_REQLIST_MAPPED) 1396 xbb_unmap_reqlist(reqlist); 1397 1398 mtx_lock(&xbb->lock); 1399 1400 /* 1401 * All I/O is done, send the response. A lock is not necessary 1402 * to protect the request list, because all requests have 1403 * completed. Therefore this is the only context accessing this 1404 * reqlist right now. However, in order to make sure that no one 1405 * else queues responses onto the queue or pushes them to the other 1406 * side while we're active, we need to hold the lock across the 1407 * calls to xbb_queue_response() and xbb_push_responses(). 1408 */ 1409 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { 1410 off_t cur_sectors_sent; 1411 1412 /* Put this response on the ring, but don't push yet */ 1413 xbb_queue_response(xbb, nreq, reqlist->status); 1414 1415 /* We don't report bytes sent if there is an error. */ 1416 if (reqlist->status == BLKIF_RSP_OKAY) 1417 cur_sectors_sent = nreq->nr_512b_sectors; 1418 else 1419 cur_sectors_sent = 0; 1420 1421 sectors_sent += cur_sectors_sent; 1422 1423 devstat_end_transaction(xbb->xbb_stats_in, 1424 /*bytes*/cur_sectors_sent << 9, 1425 reqlist->ds_tag_type, 1426 reqlist->ds_trans_type, 1427 /*now*/NULL, 1428 /*then*/&nreq->ds_t0); 1429 } 1430 1431 /* 1432 * Take out any sectors not sent. If we wind up negative (which 1433 * might happen if an error is reported as well as a residual), just 1434 * report 0 sectors sent. 1435 */ 1436 sectors_sent -= reqlist->residual_512b_sectors; 1437 if (sectors_sent < 0) 1438 sectors_sent = 0; 1439 1440 devstat_end_transaction(xbb->xbb_stats, 1441 /*bytes*/ sectors_sent << 9, 1442 reqlist->ds_tag_type, 1443 reqlist->ds_trans_type, 1444 /*now*/NULL, 1445 /*then*/&reqlist->ds_t0); 1446 1447 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1); 1448 1449 xbb_push_responses(xbb, &run_taskqueue, ¬ify); 1450 1451 mtx_unlock(&xbb->lock); 1452 1453 if (run_taskqueue) 1454 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 1455 1456 if (notify) 1457 xen_intr_signal(xbb->xen_intr_handle); 1458 } 1459 1460 /** 1461 * Completion handler for buffer I/O requests issued by the device 1462 * backend driver. 1463 * 1464 * \param bio The buffer I/O request on which to perform completion 1465 * processing. 1466 */ 1467 static void 1468 xbb_bio_done(struct bio *bio) 1469 { 1470 struct xbb_softc *xbb; 1471 struct xbb_xen_reqlist *reqlist; 1472 1473 reqlist = bio->bio_caller1; 1474 xbb = reqlist->xbb; 1475 1476 reqlist->residual_512b_sectors += bio->bio_resid >> 9; 1477 1478 /* 1479 * This is a bit imprecise. With aggregated I/O a single 1480 * request list can contain multiple front-end requests and 1481 * a multiple bios may point to a single request. By carefully 1482 * walking the request list, we could map residuals and errors 1483 * back to the original front-end request, but the interface 1484 * isn't sufficiently rich for us to properly report the error. 1485 * So, we just treat the entire request list as having failed if an 1486 * error occurs on any part. And, if an error occurs, we treat 1487 * the amount of data transferred as 0. 1488 * 1489 * For residuals, we report it on the overall aggregated device, 1490 * but not on the individual requests, since we don't currently 1491 * do the work to determine which front-end request to which the 1492 * residual applies. 1493 */ 1494 if (bio->bio_error) { 1495 DPRINTF("BIO returned error %d for operation on device %s\n", 1496 bio->bio_error, xbb->dev_name); 1497 reqlist->status = BLKIF_RSP_ERROR; 1498 1499 if (bio->bio_error == ENXIO 1500 && xenbus_get_state(xbb->dev) == XenbusStateConnected) { 1501 1502 /* 1503 * Backend device has disappeared. Signal the 1504 * front-end that we (the device proxy) want to 1505 * go away. 1506 */ 1507 xenbus_set_state(xbb->dev, XenbusStateClosing); 1508 } 1509 } 1510 1511 #ifdef XBB_USE_BOUNCE_BUFFERS 1512 if (bio->bio_cmd == BIO_READ) { 1513 vm_offset_t kva_offset; 1514 1515 kva_offset = (vm_offset_t)bio->bio_data 1516 - (vm_offset_t)reqlist->bounce; 1517 memcpy((uint8_t *)reqlist->kva + kva_offset, 1518 bio->bio_data, bio->bio_bcount); 1519 } 1520 #endif /* XBB_USE_BOUNCE_BUFFERS */ 1521 1522 /* 1523 * Decrement the pending count for the request list. When we're 1524 * done with the requests, send status back for all of them. 1525 */ 1526 if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1) 1527 xbb_complete_reqlist(xbb, reqlist); 1528 1529 g_destroy_bio(bio); 1530 } 1531 1532 /** 1533 * Parse a blkif request into an internal request structure and send 1534 * it to the backend for processing. 1535 * 1536 * \param xbb Per-instance xbb configuration structure. 1537 * \param reqlist Allocated internal request list structure. 1538 * 1539 * \return On success, 0. For resource shortages, non-zero. 1540 * 1541 * This routine performs the backend common aspects of request parsing 1542 * including compiling an internal request structure, parsing the S/G 1543 * list and any secondary ring requests in which they may reside, and 1544 * the mapping of front-end I/O pages into our domain. 1545 */ 1546 static int 1547 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) 1548 { 1549 struct xbb_sg *xbb_sg; 1550 struct gnttab_map_grant_ref *map; 1551 struct blkif_request_segment *sg; 1552 struct blkif_request_segment *last_block_sg; 1553 struct xbb_xen_req *nreq; 1554 u_int nseg; 1555 u_int seg_idx; 1556 u_int block_segs; 1557 int nr_sects; 1558 int total_sects; 1559 int operation; 1560 uint8_t bio_flags; 1561 int error; 1562 1563 reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1564 bio_flags = 0; 1565 total_sects = 0; 1566 nr_sects = 0; 1567 1568 /* 1569 * First determine whether we have enough free KVA to satisfy this 1570 * request list. If not, tell xbb_run_queue() so it can go to 1571 * sleep until we have more KVA. 1572 */ 1573 reqlist->kva = NULL; 1574 if (reqlist->nr_segments != 0) { 1575 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments); 1576 if (reqlist->kva == NULL) { 1577 /* 1578 * If we're out of KVA, return ENOMEM. 1579 */ 1580 return (ENOMEM); 1581 } 1582 } 1583 1584 binuptime(&reqlist->ds_t0); 1585 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0); 1586 1587 switch (reqlist->operation) { 1588 case BLKIF_OP_WRITE_BARRIER: 1589 bio_flags |= BIO_ORDERED; 1590 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; 1591 /* FALLTHROUGH */ 1592 case BLKIF_OP_WRITE: 1593 operation = BIO_WRITE; 1594 reqlist->ds_trans_type = DEVSTAT_WRITE; 1595 if ((xbb->flags & XBBF_READ_ONLY) != 0) { 1596 DPRINTF("Attempt to write to read only device %s\n", 1597 xbb->dev_name); 1598 reqlist->status = BLKIF_RSP_ERROR; 1599 goto send_response; 1600 } 1601 break; 1602 case BLKIF_OP_READ: 1603 operation = BIO_READ; 1604 reqlist->ds_trans_type = DEVSTAT_READ; 1605 break; 1606 case BLKIF_OP_FLUSH_DISKCACHE: 1607 /* 1608 * If this is true, the user has requested that we disable 1609 * flush support. So we just complete the requests 1610 * successfully. 1611 */ 1612 if (xbb->disable_flush != 0) { 1613 goto send_response; 1614 } 1615 1616 /* 1617 * The user has requested that we only send a real flush 1618 * for every N flush requests. So keep count, and either 1619 * complete the request immediately or queue it for the 1620 * backend. 1621 */ 1622 if (xbb->flush_interval != 0) { 1623 if (++(xbb->flush_count) < xbb->flush_interval) { 1624 goto send_response; 1625 } else 1626 xbb->flush_count = 0; 1627 } 1628 1629 operation = BIO_FLUSH; 1630 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; 1631 reqlist->ds_trans_type = DEVSTAT_NO_DATA; 1632 goto do_dispatch; 1633 /*NOTREACHED*/ 1634 default: 1635 DPRINTF("error: unknown block io operation [%d]\n", 1636 reqlist->operation); 1637 reqlist->status = BLKIF_RSP_ERROR; 1638 goto send_response; 1639 } 1640 1641 reqlist->xbb = xbb; 1642 xbb_sg = xbb->xbb_sgs; 1643 map = xbb->maps; 1644 seg_idx = 0; 1645 1646 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { 1647 blkif_request_t *ring_req; 1648 RING_IDX req_ring_idx; 1649 u_int req_seg_idx; 1650 1651 ring_req = nreq->ring_req; 1652 req_ring_idx = nreq->req_ring_idx; 1653 nr_sects = 0; 1654 nseg = ring_req->nr_segments; 1655 nreq->nr_pages = nseg; 1656 nreq->nr_512b_sectors = 0; 1657 req_seg_idx = 0; 1658 sg = NULL; 1659 1660 /* Check that number of segments is sane. */ 1661 if (__predict_false(nseg == 0) 1662 || __predict_false(nseg > xbb->max_request_segments)) { 1663 DPRINTF("Bad number of segments in request (%d)\n", 1664 nseg); 1665 reqlist->status = BLKIF_RSP_ERROR; 1666 goto send_response; 1667 } 1668 1669 block_segs = nseg; 1670 sg = ring_req->seg; 1671 last_block_sg = sg + block_segs; 1672 1673 while (sg < last_block_sg) { 1674 KASSERT(seg_idx < 1675 XBB_MAX_SEGMENTS_PER_REQLIST, 1676 ("seg_idx %d is too large, max " 1677 "segs %d\n", seg_idx, 1678 XBB_MAX_SEGMENTS_PER_REQLIST)); 1679 1680 xbb_sg->first_sect = sg->first_sect; 1681 xbb_sg->last_sect = sg->last_sect; 1682 xbb_sg->nsect = 1683 (int8_t)(sg->last_sect - 1684 sg->first_sect + 1); 1685 1686 if ((sg->last_sect >= (PAGE_SIZE >> 9)) 1687 || (xbb_sg->nsect <= 0)) { 1688 reqlist->status = BLKIF_RSP_ERROR; 1689 goto send_response; 1690 } 1691 1692 nr_sects += xbb_sg->nsect; 1693 map->host_addr = xbb_get_gntaddr(reqlist, 1694 seg_idx, /*sector*/0); 1695 KASSERT(map->host_addr + PAGE_SIZE <= 1696 xbb->ring_config.gnt_addr, 1697 ("Host address %#jx len %d overlaps " 1698 "ring address %#jx\n", 1699 (uintmax_t)map->host_addr, PAGE_SIZE, 1700 (uintmax_t)xbb->ring_config.gnt_addr)); 1701 1702 map->flags = GNTMAP_host_map; 1703 map->ref = sg->gref; 1704 map->dom = xbb->otherend_id; 1705 if (operation == BIO_WRITE) 1706 map->flags |= GNTMAP_readonly; 1707 sg++; 1708 map++; 1709 xbb_sg++; 1710 seg_idx++; 1711 req_seg_idx++; 1712 } 1713 1714 /* Convert to the disk's sector size */ 1715 nreq->nr_512b_sectors = nr_sects; 1716 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift; 1717 total_sects += nr_sects; 1718 1719 if ((nreq->nr_512b_sectors & 1720 ((xbb->sector_size >> 9) - 1)) != 0) { 1721 device_printf(xbb->dev, "%s: I/O size (%d) is not " 1722 "a multiple of the backing store sector " 1723 "size (%d)\n", __func__, 1724 nreq->nr_512b_sectors << 9, 1725 xbb->sector_size); 1726 reqlist->status = BLKIF_RSP_ERROR; 1727 goto send_response; 1728 } 1729 } 1730 1731 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 1732 xbb->maps, reqlist->nr_segments); 1733 if (error != 0) 1734 panic("Grant table operation failed (%d)", error); 1735 1736 reqlist->flags |= XBB_REQLIST_MAPPED; 1737 1738 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments; 1739 seg_idx++, map++){ 1740 1741 if (__predict_false(map->status != 0)) { 1742 DPRINTF("invalid buffer -- could not remap " 1743 "it (%d)\n", map->status); 1744 DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags " 1745 "0x%x ref 0x%x, dom %d\n", seg_idx, 1746 map->host_addr, map->flags, map->ref, 1747 map->dom); 1748 reqlist->status = BLKIF_RSP_ERROR; 1749 goto send_response; 1750 } 1751 1752 reqlist->gnt_handles[seg_idx] = map->handle; 1753 } 1754 if (reqlist->starting_sector_number + total_sects > 1755 xbb->media_num_sectors) { 1756 1757 DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] " 1758 "extends past end of device %s\n", 1759 operation == BIO_READ ? "read" : "write", 1760 reqlist->starting_sector_number, 1761 reqlist->starting_sector_number + total_sects, 1762 xbb->dev_name); 1763 reqlist->status = BLKIF_RSP_ERROR; 1764 goto send_response; 1765 } 1766 1767 do_dispatch: 1768 1769 error = xbb->dispatch_io(xbb, 1770 reqlist, 1771 operation, 1772 bio_flags); 1773 1774 if (error != 0) { 1775 reqlist->status = BLKIF_RSP_ERROR; 1776 goto send_response; 1777 } 1778 1779 return (0); 1780 1781 send_response: 1782 1783 xbb_complete_reqlist(xbb, reqlist); 1784 1785 return (0); 1786 } 1787 1788 static __inline int 1789 xbb_count_sects(blkif_request_t *ring_req) 1790 { 1791 int i; 1792 int cur_size = 0; 1793 1794 for (i = 0; i < ring_req->nr_segments; i++) { 1795 int nsect; 1796 1797 nsect = (int8_t)(ring_req->seg[i].last_sect - 1798 ring_req->seg[i].first_sect + 1); 1799 if (nsect <= 0) 1800 break; 1801 1802 cur_size += nsect; 1803 } 1804 1805 return (cur_size); 1806 } 1807 1808 /** 1809 * Process incoming requests from the shared communication ring in response 1810 * to a signal on the ring's event channel. 1811 * 1812 * \param context Callback argument registerd during task initialization - 1813 * the xbb_softc for this instance. 1814 * \param pending The number of taskqueue_enqueue events that have 1815 * occurred since this handler was last run. 1816 */ 1817 static void 1818 xbb_run_queue(void *context, int pending) 1819 { 1820 struct xbb_softc *xbb; 1821 blkif_back_rings_t *rings; 1822 RING_IDX rp; 1823 uint64_t cur_sector; 1824 int cur_operation; 1825 struct xbb_xen_reqlist *reqlist; 1826 1827 1828 xbb = (struct xbb_softc *)context; 1829 rings = &xbb->rings; 1830 1831 /* 1832 * Work gather and dispatch loop. Note that we have a bias here 1833 * towards gathering I/O sent by blockfront. We first gather up 1834 * everything in the ring, as long as we have resources. Then we 1835 * dispatch one request, and then attempt to gather up any 1836 * additional requests that have come in while we were dispatching 1837 * the request. 1838 * 1839 * This allows us to get a clearer picture (via devstat) of how 1840 * many requests blockfront is queueing to us at any given time. 1841 */ 1842 for (;;) { 1843 int retval; 1844 1845 /* 1846 * Initialize reqlist to the last element in the pending 1847 * queue, if there is one. This allows us to add more 1848 * requests to that request list, if we have room. 1849 */ 1850 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq, 1851 xbb_xen_reqlist, links); 1852 if (reqlist != NULL) { 1853 cur_sector = reqlist->next_contig_sector; 1854 cur_operation = reqlist->operation; 1855 } else { 1856 cur_operation = 0; 1857 cur_sector = 0; 1858 } 1859 1860 /* 1861 * Cache req_prod to avoid accessing a cache line shared 1862 * with the frontend. 1863 */ 1864 rp = rings->common.sring->req_prod; 1865 1866 /* Ensure we see queued requests up to 'rp'. */ 1867 rmb(); 1868 1869 /** 1870 * Run so long as there is work to consume and the generation 1871 * of a response will not overflow the ring. 1872 * 1873 * @note There's a 1 to 1 relationship between requests and 1874 * responses, so an overflow should never occur. This 1875 * test is to protect our domain from digesting bogus 1876 * data. Shouldn't we log this? 1877 */ 1878 while (rings->common.req_cons != rp 1879 && RING_REQUEST_CONS_OVERFLOW(&rings->common, 1880 rings->common.req_cons) == 0){ 1881 blkif_request_t ring_req_storage; 1882 blkif_request_t *ring_req; 1883 int cur_size; 1884 1885 switch (xbb->abi) { 1886 case BLKIF_PROTOCOL_NATIVE: 1887 ring_req = RING_GET_REQUEST(&xbb->rings.native, 1888 rings->common.req_cons); 1889 break; 1890 case BLKIF_PROTOCOL_X86_32: 1891 { 1892 struct blkif_x86_32_request *ring_req32; 1893 1894 ring_req32 = RING_GET_REQUEST( 1895 &xbb->rings.x86_32, rings->common.req_cons); 1896 blkif_get_x86_32_req(&ring_req_storage, 1897 ring_req32); 1898 ring_req = &ring_req_storage; 1899 break; 1900 } 1901 case BLKIF_PROTOCOL_X86_64: 1902 { 1903 struct blkif_x86_64_request *ring_req64; 1904 1905 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64, 1906 rings->common.req_cons); 1907 blkif_get_x86_64_req(&ring_req_storage, 1908 ring_req64); 1909 ring_req = &ring_req_storage; 1910 break; 1911 } 1912 default: 1913 panic("Unexpected blkif protocol ABI."); 1914 /* NOTREACHED */ 1915 } 1916 1917 /* 1918 * Check for situations that would require closing 1919 * off this I/O for further coalescing: 1920 * - Coalescing is turned off. 1921 * - Current I/O is out of sequence with the previous 1922 * I/O. 1923 * - Coalesced I/O would be too large. 1924 */ 1925 if ((reqlist != NULL) 1926 && ((xbb->no_coalesce_reqs != 0) 1927 || ((xbb->no_coalesce_reqs == 0) 1928 && ((ring_req->sector_number != cur_sector) 1929 || (ring_req->operation != cur_operation) 1930 || ((ring_req->nr_segments + reqlist->nr_segments) > 1931 xbb->max_reqlist_segments))))) { 1932 reqlist = NULL; 1933 } 1934 1935 /* 1936 * Grab and check for all resources in one shot. 1937 * If we can't get all of the resources we need, 1938 * the shortage is noted and the thread will get 1939 * woken up when more resources are available. 1940 */ 1941 retval = xbb_get_resources(xbb, &reqlist, ring_req, 1942 xbb->rings.common.req_cons); 1943 1944 if (retval != 0) { 1945 /* 1946 * Resource shortage has been recorded. 1947 * We'll be scheduled to run once a request 1948 * object frees up due to a completion. 1949 */ 1950 break; 1951 } 1952 1953 /* 1954 * Signify that we can overwrite this request with 1955 * a response by incrementing our consumer index. 1956 * The response won't be generated until after 1957 * we've already consumed all necessary data out 1958 * of the version of the request in the ring buffer 1959 * (for native mode). We must update the consumer 1960 * index before issueing back-end I/O so there is 1961 * no possibility that it will complete and a 1962 * response be generated before we make room in 1963 * the queue for that response. 1964 */ 1965 xbb->rings.common.req_cons++; 1966 xbb->reqs_received++; 1967 1968 cur_size = xbb_count_sects(ring_req); 1969 cur_sector = ring_req->sector_number + cur_size; 1970 reqlist->next_contig_sector = cur_sector; 1971 cur_operation = ring_req->operation; 1972 } 1973 1974 /* Check for I/O to dispatch */ 1975 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); 1976 if (reqlist == NULL) { 1977 /* 1978 * We're out of work to do, put the task queue to 1979 * sleep. 1980 */ 1981 break; 1982 } 1983 1984 /* 1985 * Grab the first request off the queue and attempt 1986 * to dispatch it. 1987 */ 1988 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links); 1989 1990 retval = xbb_dispatch_io(xbb, reqlist); 1991 if (retval != 0) { 1992 /* 1993 * xbb_dispatch_io() returns non-zero only when 1994 * there is a resource shortage. If that's the 1995 * case, re-queue this request on the head of the 1996 * queue, and go to sleep until we have more 1997 * resources. 1998 */ 1999 STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq, 2000 reqlist, links); 2001 break; 2002 } else { 2003 /* 2004 * If we still have anything on the queue after 2005 * removing the head entry, that is because we 2006 * met one of the criteria to create a new 2007 * request list (outlined above), and we'll call 2008 * that a forced dispatch for statistical purposes. 2009 * 2010 * Otherwise, if there is only one element on the 2011 * queue, we coalesced everything available on 2012 * the ring and we'll call that a normal dispatch. 2013 */ 2014 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); 2015 2016 if (reqlist != NULL) 2017 xbb->forced_dispatch++; 2018 else 2019 xbb->normal_dispatch++; 2020 2021 xbb->total_dispatch++; 2022 } 2023 } 2024 } 2025 2026 /** 2027 * Interrupt handler bound to the shared ring's event channel. 2028 * 2029 * \param arg Callback argument registerd during event channel 2030 * binding - the xbb_softc for this instance. 2031 */ 2032 static int 2033 xbb_filter(void *arg) 2034 { 2035 struct xbb_softc *xbb; 2036 2037 /* Defer to taskqueue thread. */ 2038 xbb = (struct xbb_softc *)arg; 2039 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 2040 2041 return (FILTER_HANDLED); 2042 } 2043 2044 SDT_PROVIDER_DEFINE(xbb); 2045 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int"); 2046 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t", 2047 "uint64_t"); 2048 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int", 2049 "uint64_t", "uint64_t"); 2050 2051 /*----------------------------- Backend Handlers -----------------------------*/ 2052 /** 2053 * Backend handler for character device access. 2054 * 2055 * \param xbb Per-instance xbb configuration structure. 2056 * \param reqlist Allocated internal request list structure. 2057 * \param operation BIO_* I/O operation code. 2058 * \param bio_flags Additional bio_flag data to pass to any generated 2059 * bios (e.g. BIO_ORDERED).. 2060 * 2061 * \return 0 for success, errno codes for failure. 2062 */ 2063 static int 2064 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, 2065 int operation, int bio_flags) 2066 { 2067 struct xbb_dev_data *dev_data; 2068 struct bio *bios[XBB_MAX_SEGMENTS_PER_REQLIST]; 2069 off_t bio_offset; 2070 struct bio *bio; 2071 struct xbb_sg *xbb_sg; 2072 u_int nbio; 2073 u_int bio_idx; 2074 u_int nseg; 2075 u_int seg_idx; 2076 int error; 2077 2078 dev_data = &xbb->backend.dev; 2079 bio_offset = (off_t)reqlist->starting_sector_number 2080 << xbb->sector_size_shift; 2081 error = 0; 2082 nbio = 0; 2083 bio_idx = 0; 2084 2085 if (operation == BIO_FLUSH) { 2086 bio = g_new_bio(); 2087 if (__predict_false(bio == NULL)) { 2088 DPRINTF("Unable to allocate bio for BIO_FLUSH\n"); 2089 error = ENOMEM; 2090 return (error); 2091 } 2092 2093 bio->bio_cmd = BIO_FLUSH; 2094 bio->bio_flags |= BIO_ORDERED; 2095 bio->bio_dev = dev_data->cdev; 2096 bio->bio_offset = 0; 2097 bio->bio_data = 0; 2098 bio->bio_done = xbb_bio_done; 2099 bio->bio_caller1 = reqlist; 2100 bio->bio_pblkno = 0; 2101 2102 reqlist->pendcnt = 1; 2103 2104 SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush, 2105 device_get_unit(xbb->dev)); 2106 2107 (*dev_data->csw->d_strategy)(bio); 2108 2109 return (0); 2110 } 2111 2112 xbb_sg = xbb->xbb_sgs; 2113 bio = NULL; 2114 nseg = reqlist->nr_segments; 2115 2116 for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { 2117 2118 /* 2119 * KVA will not be contiguous, so any additional 2120 * I/O will need to be represented in a new bio. 2121 */ 2122 if ((bio != NULL) 2123 && (xbb_sg->first_sect != 0)) { 2124 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { 2125 printf("%s: Discontiguous I/O request " 2126 "from domain %d ends on " 2127 "non-sector boundary\n", 2128 __func__, xbb->otherend_id); 2129 error = EINVAL; 2130 goto fail_free_bios; 2131 } 2132 bio = NULL; 2133 } 2134 2135 if (bio == NULL) { 2136 /* 2137 * Make sure that the start of this bio is 2138 * aligned to a device sector. 2139 */ 2140 if ((bio_offset & (xbb->sector_size - 1)) != 0){ 2141 printf("%s: Misaligned I/O request " 2142 "from domain %d\n", __func__, 2143 xbb->otherend_id); 2144 error = EINVAL; 2145 goto fail_free_bios; 2146 } 2147 2148 bio = bios[nbio++] = g_new_bio(); 2149 if (__predict_false(bio == NULL)) { 2150 error = ENOMEM; 2151 goto fail_free_bios; 2152 } 2153 bio->bio_cmd = operation; 2154 bio->bio_flags |= bio_flags; 2155 bio->bio_dev = dev_data->cdev; 2156 bio->bio_offset = bio_offset; 2157 bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx, 2158 xbb_sg->first_sect); 2159 bio->bio_done = xbb_bio_done; 2160 bio->bio_caller1 = reqlist; 2161 bio->bio_pblkno = bio_offset >> xbb->sector_size_shift; 2162 } 2163 2164 bio->bio_length += xbb_sg->nsect << 9; 2165 bio->bio_bcount = bio->bio_length; 2166 bio_offset += xbb_sg->nsect << 9; 2167 2168 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) { 2169 2170 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { 2171 printf("%s: Discontiguous I/O request " 2172 "from domain %d ends on " 2173 "non-sector boundary\n", 2174 __func__, xbb->otherend_id); 2175 error = EINVAL; 2176 goto fail_free_bios; 2177 } 2178 /* 2179 * KVA will not be contiguous, so any additional 2180 * I/O will need to be represented in a new bio. 2181 */ 2182 bio = NULL; 2183 } 2184 } 2185 2186 reqlist->pendcnt = nbio; 2187 2188 for (bio_idx = 0; bio_idx < nbio; bio_idx++) 2189 { 2190 #ifdef XBB_USE_BOUNCE_BUFFERS 2191 vm_offset_t kva_offset; 2192 2193 kva_offset = (vm_offset_t)bios[bio_idx]->bio_data 2194 - (vm_offset_t)reqlist->bounce; 2195 if (operation == BIO_WRITE) { 2196 memcpy(bios[bio_idx]->bio_data, 2197 (uint8_t *)reqlist->kva + kva_offset, 2198 bios[bio_idx]->bio_bcount); 2199 } 2200 #endif 2201 if (operation == BIO_READ) { 2202 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read, 2203 device_get_unit(xbb->dev), 2204 bios[bio_idx]->bio_offset, 2205 bios[bio_idx]->bio_length); 2206 } else if (operation == BIO_WRITE) { 2207 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write, 2208 device_get_unit(xbb->dev), 2209 bios[bio_idx]->bio_offset, 2210 bios[bio_idx]->bio_length); 2211 } 2212 (*dev_data->csw->d_strategy)(bios[bio_idx]); 2213 } 2214 2215 return (error); 2216 2217 fail_free_bios: 2218 for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++) 2219 g_destroy_bio(bios[bio_idx]); 2220 2221 return (error); 2222 } 2223 2224 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int"); 2225 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t", 2226 "uint64_t"); 2227 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int", 2228 "uint64_t", "uint64_t"); 2229 2230 /** 2231 * Backend handler for file access. 2232 * 2233 * \param xbb Per-instance xbb configuration structure. 2234 * \param reqlist Allocated internal request list. 2235 * \param operation BIO_* I/O operation code. 2236 * \param flags Additional bio_flag data to pass to any generated bios 2237 * (e.g. BIO_ORDERED).. 2238 * 2239 * \return 0 for success, errno codes for failure. 2240 */ 2241 static int 2242 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, 2243 int operation, int flags) 2244 { 2245 struct xbb_file_data *file_data; 2246 u_int seg_idx; 2247 u_int nseg; 2248 struct uio xuio; 2249 struct xbb_sg *xbb_sg; 2250 struct iovec *xiovec; 2251 #ifdef XBB_USE_BOUNCE_BUFFERS 2252 void **p_vaddr; 2253 int saved_uio_iovcnt; 2254 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2255 int error; 2256 2257 file_data = &xbb->backend.file; 2258 error = 0; 2259 bzero(&xuio, sizeof(xuio)); 2260 2261 switch (operation) { 2262 case BIO_READ: 2263 xuio.uio_rw = UIO_READ; 2264 break; 2265 case BIO_WRITE: 2266 xuio.uio_rw = UIO_WRITE; 2267 break; 2268 case BIO_FLUSH: { 2269 struct mount *mountpoint; 2270 2271 SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush, 2272 device_get_unit(xbb->dev)); 2273 2274 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT); 2275 2276 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); 2277 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread); 2278 VOP_UNLOCK(xbb->vn, 0); 2279 2280 vn_finished_write(mountpoint); 2281 2282 goto bailout_send_response; 2283 /* NOTREACHED */ 2284 } 2285 default: 2286 panic("invalid operation %d", operation); 2287 /* NOTREACHED */ 2288 } 2289 xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number 2290 << xbb->sector_size_shift; 2291 xuio.uio_segflg = UIO_SYSSPACE; 2292 xuio.uio_iov = file_data->xiovecs; 2293 xuio.uio_iovcnt = 0; 2294 xbb_sg = xbb->xbb_sgs; 2295 nseg = reqlist->nr_segments; 2296 2297 for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { 2298 2299 /* 2300 * If the first sector is not 0, the KVA will 2301 * not be contiguous and we'll need to go on 2302 * to another segment. 2303 */ 2304 if (xbb_sg->first_sect != 0) 2305 xiovec = NULL; 2306 2307 if (xiovec == NULL) { 2308 xiovec = &file_data->xiovecs[xuio.uio_iovcnt]; 2309 xiovec->iov_base = xbb_reqlist_ioaddr(reqlist, 2310 seg_idx, xbb_sg->first_sect); 2311 #ifdef XBB_USE_BOUNCE_BUFFERS 2312 /* 2313 * Store the address of the incoming 2314 * buffer at this particular offset 2315 * as well, so we can do the copy 2316 * later without having to do more 2317 * work to recalculate this address. 2318 */ 2319 p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt]; 2320 *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx, 2321 xbb_sg->first_sect); 2322 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2323 xiovec->iov_len = 0; 2324 xuio.uio_iovcnt++; 2325 } 2326 2327 xiovec->iov_len += xbb_sg->nsect << 9; 2328 2329 xuio.uio_resid += xbb_sg->nsect << 9; 2330 2331 /* 2332 * If the last sector is not the full page 2333 * size count, the next segment will not be 2334 * contiguous in KVA and we need a new iovec. 2335 */ 2336 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) 2337 xiovec = NULL; 2338 } 2339 2340 xuio.uio_td = curthread; 2341 2342 #ifdef XBB_USE_BOUNCE_BUFFERS 2343 saved_uio_iovcnt = xuio.uio_iovcnt; 2344 2345 if (operation == BIO_WRITE) { 2346 /* Copy the write data to the local buffer. */ 2347 for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, 2348 xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt; 2349 seg_idx++, xiovec++, p_vaddr++) { 2350 2351 memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len); 2352 } 2353 } else { 2354 /* 2355 * We only need to save off the iovecs in the case of a 2356 * read, because the copy for the read happens after the 2357 * VOP_READ(). (The uio will get modified in that call 2358 * sequence.) 2359 */ 2360 memcpy(file_data->saved_xiovecs, xuio.uio_iov, 2361 xuio.uio_iovcnt * sizeof(xuio.uio_iov[0])); 2362 } 2363 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2364 2365 switch (operation) { 2366 case BIO_READ: 2367 2368 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read, 2369 device_get_unit(xbb->dev), xuio.uio_offset, 2370 xuio.uio_resid); 2371 2372 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); 2373 2374 /* 2375 * UFS pays attention to IO_DIRECT for reads. If the 2376 * DIRECTIO option is configured into the kernel, it calls 2377 * ffs_rawread(). But that only works for single-segment 2378 * uios with user space addresses. In our case, with a 2379 * kernel uio, it still reads into the buffer cache, but it 2380 * will just try to release the buffer from the cache later 2381 * on in ffs_read(). 2382 * 2383 * ZFS does not pay attention to IO_DIRECT for reads. 2384 * 2385 * UFS does not pay attention to IO_SYNC for reads. 2386 * 2387 * ZFS pays attention to IO_SYNC (which translates into the 2388 * Solaris define FRSYNC for zfs_read()) for reads. It 2389 * attempts to sync the file before reading. 2390 * 2391 * So, to attempt to provide some barrier semantics in the 2392 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 2393 */ 2394 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ? 2395 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 2396 2397 VOP_UNLOCK(xbb->vn, 0); 2398 break; 2399 case BIO_WRITE: { 2400 struct mount *mountpoint; 2401 2402 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write, 2403 device_get_unit(xbb->dev), xuio.uio_offset, 2404 xuio.uio_resid); 2405 2406 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT); 2407 2408 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); 2409 2410 /* 2411 * UFS pays attention to IO_DIRECT for writes. The write 2412 * is done asynchronously. (Normally the write would just 2413 * get put into cache. 2414 * 2415 * UFS pays attention to IO_SYNC for writes. It will 2416 * attempt to write the buffer out synchronously if that 2417 * flag is set. 2418 * 2419 * ZFS does not pay attention to IO_DIRECT for writes. 2420 * 2421 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 2422 * for writes. It will flush the transaction from the 2423 * cache before returning. 2424 * 2425 * So if we've got the BIO_ORDERED flag set, we want 2426 * IO_SYNC in either the UFS or ZFS case. 2427 */ 2428 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ? 2429 IO_SYNC : 0, file_data->cred); 2430 VOP_UNLOCK(xbb->vn, 0); 2431 2432 vn_finished_write(mountpoint); 2433 2434 break; 2435 } 2436 default: 2437 panic("invalid operation %d", operation); 2438 /* NOTREACHED */ 2439 } 2440 2441 #ifdef XBB_USE_BOUNCE_BUFFERS 2442 /* We only need to copy here for read operations */ 2443 if (operation == BIO_READ) { 2444 2445 for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, 2446 xiovec = file_data->saved_xiovecs; 2447 seg_idx < saved_uio_iovcnt; seg_idx++, 2448 xiovec++, p_vaddr++) { 2449 2450 /* 2451 * Note that we have to use the copy of the 2452 * io vector we made above. uiomove() modifies 2453 * the uio and its referenced vector as uiomove 2454 * performs the copy, so we can't rely on any 2455 * state from the original uio. 2456 */ 2457 memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len); 2458 } 2459 } 2460 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2461 2462 bailout_send_response: 2463 2464 if (error != 0) 2465 reqlist->status = BLKIF_RSP_ERROR; 2466 2467 xbb_complete_reqlist(xbb, reqlist); 2468 2469 return (0); 2470 } 2471 2472 /*--------------------------- Backend Configuration --------------------------*/ 2473 /** 2474 * Close and cleanup any backend device/file specific state for this 2475 * block back instance. 2476 * 2477 * \param xbb Per-instance xbb configuration structure. 2478 */ 2479 static void 2480 xbb_close_backend(struct xbb_softc *xbb) 2481 { 2482 DROP_GIANT(); 2483 DPRINTF("closing dev=%s\n", xbb->dev_name); 2484 if (xbb->vn) { 2485 int flags = FREAD; 2486 2487 if ((xbb->flags & XBBF_READ_ONLY) == 0) 2488 flags |= FWRITE; 2489 2490 switch (xbb->device_type) { 2491 case XBB_TYPE_DISK: 2492 if (xbb->backend.dev.csw) { 2493 dev_relthread(xbb->backend.dev.cdev, 2494 xbb->backend.dev.dev_ref); 2495 xbb->backend.dev.csw = NULL; 2496 xbb->backend.dev.cdev = NULL; 2497 } 2498 break; 2499 case XBB_TYPE_FILE: 2500 break; 2501 case XBB_TYPE_NONE: 2502 default: 2503 panic("Unexpected backend type."); 2504 break; 2505 } 2506 2507 (void)vn_close(xbb->vn, flags, NOCRED, curthread); 2508 xbb->vn = NULL; 2509 2510 switch (xbb->device_type) { 2511 case XBB_TYPE_DISK: 2512 break; 2513 case XBB_TYPE_FILE: 2514 if (xbb->backend.file.cred != NULL) { 2515 crfree(xbb->backend.file.cred); 2516 xbb->backend.file.cred = NULL; 2517 } 2518 break; 2519 case XBB_TYPE_NONE: 2520 default: 2521 panic("Unexpected backend type."); 2522 break; 2523 } 2524 } 2525 PICKUP_GIANT(); 2526 } 2527 2528 /** 2529 * Open a character device to be used for backend I/O. 2530 * 2531 * \param xbb Per-instance xbb configuration structure. 2532 * 2533 * \return 0 for success, errno codes for failure. 2534 */ 2535 static int 2536 xbb_open_dev(struct xbb_softc *xbb) 2537 { 2538 struct vattr vattr; 2539 struct cdev *dev; 2540 struct cdevsw *devsw; 2541 int error; 2542 2543 xbb->device_type = XBB_TYPE_DISK; 2544 xbb->dispatch_io = xbb_dispatch_dev; 2545 xbb->backend.dev.cdev = xbb->vn->v_rdev; 2546 xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev, 2547 &xbb->backend.dev.dev_ref); 2548 if (xbb->backend.dev.csw == NULL) 2549 panic("Unable to retrieve device switch"); 2550 2551 error = VOP_GETATTR(xbb->vn, &vattr, NOCRED); 2552 if (error) { 2553 xenbus_dev_fatal(xbb->dev, error, "error getting " 2554 "vnode attributes for device %s", 2555 xbb->dev_name); 2556 return (error); 2557 } 2558 2559 2560 dev = xbb->vn->v_rdev; 2561 devsw = dev->si_devsw; 2562 if (!devsw->d_ioctl) { 2563 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for " 2564 "device %s!", xbb->dev_name); 2565 return (ENODEV); 2566 } 2567 2568 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 2569 (caddr_t)&xbb->sector_size, FREAD, 2570 curthread); 2571 if (error) { 2572 xenbus_dev_fatal(xbb->dev, error, 2573 "error calling ioctl DIOCGSECTORSIZE " 2574 "for device %s", xbb->dev_name); 2575 return (error); 2576 } 2577 2578 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2579 (caddr_t)&xbb->media_size, FREAD, 2580 curthread); 2581 if (error) { 2582 xenbus_dev_fatal(xbb->dev, error, 2583 "error calling ioctl DIOCGMEDIASIZE " 2584 "for device %s", xbb->dev_name); 2585 return (error); 2586 } 2587 2588 return (0); 2589 } 2590 2591 /** 2592 * Open a file to be used for backend I/O. 2593 * 2594 * \param xbb Per-instance xbb configuration structure. 2595 * 2596 * \return 0 for success, errno codes for failure. 2597 */ 2598 static int 2599 xbb_open_file(struct xbb_softc *xbb) 2600 { 2601 struct xbb_file_data *file_data; 2602 struct vattr vattr; 2603 int error; 2604 2605 file_data = &xbb->backend.file; 2606 xbb->device_type = XBB_TYPE_FILE; 2607 xbb->dispatch_io = xbb_dispatch_file; 2608 error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred); 2609 if (error != 0) { 2610 xenbus_dev_fatal(xbb->dev, error, 2611 "error calling VOP_GETATTR()" 2612 "for file %s", xbb->dev_name); 2613 return (error); 2614 } 2615 2616 /* 2617 * Verify that we have the ability to upgrade to exclusive 2618 * access on this file so we can trap errors at open instead 2619 * of reporting them during first access. 2620 */ 2621 if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { 2622 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); 2623 if (xbb->vn->v_iflag & VI_DOOMED) { 2624 error = EBADF; 2625 xenbus_dev_fatal(xbb->dev, error, 2626 "error locking file %s", 2627 xbb->dev_name); 2628 2629 return (error); 2630 } 2631 } 2632 2633 file_data->cred = crhold(curthread->td_ucred); 2634 xbb->media_size = vattr.va_size; 2635 2636 /* 2637 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 2638 * With ZFS, it is 131072 bytes. Block sizes that large don't work 2639 * with disklabel and UFS on FreeBSD at least. Large block sizes 2640 * may not work with other OSes as well. So just export a sector 2641 * size of 512 bytes, which should work with any OS or 2642 * application. Since our backing is a file, any block size will 2643 * work fine for the backing store. 2644 */ 2645 #if 0 2646 xbb->sector_size = vattr.va_blocksize; 2647 #endif 2648 xbb->sector_size = 512; 2649 2650 /* 2651 * Sanity check. The media size has to be at least one 2652 * sector long. 2653 */ 2654 if (xbb->media_size < xbb->sector_size) { 2655 error = EINVAL; 2656 xenbus_dev_fatal(xbb->dev, error, 2657 "file %s size %ju < block size %u", 2658 xbb->dev_name, 2659 (uintmax_t)xbb->media_size, 2660 xbb->sector_size); 2661 } 2662 return (error); 2663 } 2664 2665 /** 2666 * Open the backend provider for this connection. 2667 * 2668 * \param xbb Per-instance xbb configuration structure. 2669 * 2670 * \return 0 for success, errno codes for failure. 2671 */ 2672 static int 2673 xbb_open_backend(struct xbb_softc *xbb) 2674 { 2675 struct nameidata nd; 2676 int flags; 2677 int error; 2678 2679 flags = FREAD; 2680 error = 0; 2681 2682 DPRINTF("opening dev=%s\n", xbb->dev_name); 2683 2684 if (rootvnode == NULL) { 2685 xenbus_dev_fatal(xbb->dev, ENOENT, 2686 "Root file system not mounted"); 2687 return (ENOENT); 2688 } 2689 2690 if ((xbb->flags & XBBF_READ_ONLY) == 0) 2691 flags |= FWRITE; 2692 2693 pwd_ensure_dirs(); 2694 2695 again: 2696 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread); 2697 error = vn_open(&nd, &flags, 0, NULL); 2698 if (error) { 2699 /* 2700 * This is the only reasonable guess we can make as far as 2701 * path if the user doesn't give us a fully qualified path. 2702 * If they want to specify a file, they need to specify the 2703 * full path. 2704 */ 2705 if (xbb->dev_name[0] != '/') { 2706 char *dev_path = "/dev/"; 2707 char *dev_name; 2708 2709 /* Try adding device path at beginning of name */ 2710 dev_name = malloc(strlen(xbb->dev_name) 2711 + strlen(dev_path) + 1, 2712 M_XENBLOCKBACK, M_NOWAIT); 2713 if (dev_name) { 2714 sprintf(dev_name, "%s%s", dev_path, 2715 xbb->dev_name); 2716 free(xbb->dev_name, M_XENBLOCKBACK); 2717 xbb->dev_name = dev_name; 2718 goto again; 2719 } 2720 } 2721 xenbus_dev_fatal(xbb->dev, error, "error opening device %s", 2722 xbb->dev_name); 2723 return (error); 2724 } 2725 2726 NDFREE(&nd, NDF_ONLY_PNBUF); 2727 2728 xbb->vn = nd.ni_vp; 2729 2730 /* We only support disks and files. */ 2731 if (vn_isdisk(xbb->vn, &error)) { 2732 error = xbb_open_dev(xbb); 2733 } else if (xbb->vn->v_type == VREG) { 2734 error = xbb_open_file(xbb); 2735 } else { 2736 error = EINVAL; 2737 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk " 2738 "or file", xbb->dev_name); 2739 } 2740 VOP_UNLOCK(xbb->vn, 0); 2741 2742 if (error != 0) { 2743 xbb_close_backend(xbb); 2744 return (error); 2745 } 2746 2747 xbb->sector_size_shift = fls(xbb->sector_size) - 1; 2748 xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift; 2749 2750 DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n", 2751 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file", 2752 xbb->dev_name, xbb->sector_size, xbb->media_size); 2753 2754 return (0); 2755 } 2756 2757 /*------------------------ Inter-Domain Communication ------------------------*/ 2758 /** 2759 * Free dynamically allocated KVA or pseudo-physical address allocations. 2760 * 2761 * \param xbb Per-instance xbb configuration structure. 2762 */ 2763 static void 2764 xbb_free_communication_mem(struct xbb_softc *xbb) 2765 { 2766 if (xbb->kva != 0) { 2767 if (xbb->pseudo_phys_res != NULL) { 2768 xenmem_free(xbb->dev, xbb->pseudo_phys_res_id, 2769 xbb->pseudo_phys_res); 2770 xbb->pseudo_phys_res = NULL; 2771 } 2772 } 2773 xbb->kva = 0; 2774 xbb->gnt_base_addr = 0; 2775 if (xbb->kva_free != NULL) { 2776 free(xbb->kva_free, M_XENBLOCKBACK); 2777 xbb->kva_free = NULL; 2778 } 2779 } 2780 2781 /** 2782 * Cleanup all inter-domain communication mechanisms. 2783 * 2784 * \param xbb Per-instance xbb configuration structure. 2785 */ 2786 static int 2787 xbb_disconnect(struct xbb_softc *xbb) 2788 { 2789 struct gnttab_unmap_grant_ref ops[XBB_MAX_RING_PAGES]; 2790 struct gnttab_unmap_grant_ref *op; 2791 u_int ring_idx; 2792 int error; 2793 2794 DPRINTF("\n"); 2795 2796 if ((xbb->flags & XBBF_RING_CONNECTED) == 0) 2797 return (0); 2798 2799 xen_intr_unbind(&xbb->xen_intr_handle); 2800 2801 mtx_unlock(&xbb->lock); 2802 taskqueue_drain(xbb->io_taskqueue, &xbb->io_task); 2803 mtx_lock(&xbb->lock); 2804 2805 /* 2806 * No new interrupts can generate work, but we must wait 2807 * for all currently active requests to drain. 2808 */ 2809 if (xbb->active_request_count != 0) 2810 return (EAGAIN); 2811 2812 for (ring_idx = 0, op = ops; 2813 ring_idx < xbb->ring_config.ring_pages; 2814 ring_idx++, op++) { 2815 2816 op->host_addr = xbb->ring_config.gnt_addr 2817 + (ring_idx * PAGE_SIZE); 2818 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx]; 2819 op->handle = xbb->ring_config.handle[ring_idx]; 2820 } 2821 2822 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops, 2823 xbb->ring_config.ring_pages); 2824 if (error != 0) 2825 panic("Grant table op failed (%d)", error); 2826 2827 xbb_free_communication_mem(xbb); 2828 2829 if (xbb->requests != NULL) { 2830 free(xbb->requests, M_XENBLOCKBACK); 2831 xbb->requests = NULL; 2832 } 2833 2834 if (xbb->request_lists != NULL) { 2835 struct xbb_xen_reqlist *reqlist; 2836 int i; 2837 2838 /* There is one request list for ever allocated request. */ 2839 for (i = 0, reqlist = xbb->request_lists; 2840 i < xbb->max_requests; i++, reqlist++){ 2841 #ifdef XBB_USE_BOUNCE_BUFFERS 2842 if (reqlist->bounce != NULL) { 2843 free(reqlist->bounce, M_XENBLOCKBACK); 2844 reqlist->bounce = NULL; 2845 } 2846 #endif 2847 if (reqlist->gnt_handles != NULL) { 2848 free(reqlist->gnt_handles, M_XENBLOCKBACK); 2849 reqlist->gnt_handles = NULL; 2850 } 2851 } 2852 free(xbb->request_lists, M_XENBLOCKBACK); 2853 xbb->request_lists = NULL; 2854 } 2855 2856 xbb->flags &= ~XBBF_RING_CONNECTED; 2857 return (0); 2858 } 2859 2860 /** 2861 * Map shared memory ring into domain local address space, initialize 2862 * ring control structures, and bind an interrupt to the event channel 2863 * used to notify us of ring changes. 2864 * 2865 * \param xbb Per-instance xbb configuration structure. 2866 */ 2867 static int 2868 xbb_connect_ring(struct xbb_softc *xbb) 2869 { 2870 struct gnttab_map_grant_ref gnts[XBB_MAX_RING_PAGES]; 2871 struct gnttab_map_grant_ref *gnt; 2872 u_int ring_idx; 2873 int error; 2874 2875 if ((xbb->flags & XBBF_RING_CONNECTED) != 0) 2876 return (0); 2877 2878 /* 2879 * Kva for our ring is at the tail of the region of kva allocated 2880 * by xbb_alloc_communication_mem(). 2881 */ 2882 xbb->ring_config.va = xbb->kva 2883 + (xbb->kva_size 2884 - (xbb->ring_config.ring_pages * PAGE_SIZE)); 2885 xbb->ring_config.gnt_addr = xbb->gnt_base_addr 2886 + (xbb->kva_size 2887 - (xbb->ring_config.ring_pages * PAGE_SIZE)); 2888 2889 for (ring_idx = 0, gnt = gnts; 2890 ring_idx < xbb->ring_config.ring_pages; 2891 ring_idx++, gnt++) { 2892 2893 gnt->host_addr = xbb->ring_config.gnt_addr 2894 + (ring_idx * PAGE_SIZE); 2895 gnt->flags = GNTMAP_host_map; 2896 gnt->ref = xbb->ring_config.ring_ref[ring_idx]; 2897 gnt->dom = xbb->otherend_id; 2898 } 2899 2900 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts, 2901 xbb->ring_config.ring_pages); 2902 if (error) 2903 panic("blkback: Ring page grant table op failed (%d)", error); 2904 2905 for (ring_idx = 0, gnt = gnts; 2906 ring_idx < xbb->ring_config.ring_pages; 2907 ring_idx++, gnt++) { 2908 if (gnt->status != 0) { 2909 xbb->ring_config.va = 0; 2910 xenbus_dev_fatal(xbb->dev, EACCES, 2911 "Ring shared page mapping failed. " 2912 "Status %d.", gnt->status); 2913 return (EACCES); 2914 } 2915 xbb->ring_config.handle[ring_idx] = gnt->handle; 2916 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr; 2917 } 2918 2919 /* Initialize the ring based on ABI. */ 2920 switch (xbb->abi) { 2921 case BLKIF_PROTOCOL_NATIVE: 2922 { 2923 blkif_sring_t *sring; 2924 sring = (blkif_sring_t *)xbb->ring_config.va; 2925 BACK_RING_INIT(&xbb->rings.native, sring, 2926 xbb->ring_config.ring_pages * PAGE_SIZE); 2927 break; 2928 } 2929 case BLKIF_PROTOCOL_X86_32: 2930 { 2931 blkif_x86_32_sring_t *sring_x86_32; 2932 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va; 2933 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32, 2934 xbb->ring_config.ring_pages * PAGE_SIZE); 2935 break; 2936 } 2937 case BLKIF_PROTOCOL_X86_64: 2938 { 2939 blkif_x86_64_sring_t *sring_x86_64; 2940 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va; 2941 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64, 2942 xbb->ring_config.ring_pages * PAGE_SIZE); 2943 break; 2944 } 2945 default: 2946 panic("Unexpected blkif protocol ABI."); 2947 } 2948 2949 xbb->flags |= XBBF_RING_CONNECTED; 2950 2951 error = xen_intr_bind_remote_port(xbb->dev, 2952 xbb->otherend_id, 2953 xbb->ring_config.evtchn, 2954 xbb_filter, 2955 /*ithread_handler*/NULL, 2956 /*arg*/xbb, 2957 INTR_TYPE_BIO | INTR_MPSAFE, 2958 &xbb->xen_intr_handle); 2959 if (error) { 2960 (void)xbb_disconnect(xbb); 2961 xenbus_dev_fatal(xbb->dev, error, "binding event channel"); 2962 return (error); 2963 } 2964 2965 DPRINTF("rings connected!\n"); 2966 2967 return 0; 2968 } 2969 2970 /* Needed to make bit_alloc() macro work */ 2971 #define calloc(count, size) malloc((count)*(size), M_XENBLOCKBACK, \ 2972 M_NOWAIT|M_ZERO); 2973 2974 /** 2975 * Size KVA and pseudo-physical address allocations based on negotiated 2976 * values for the size and number of I/O requests, and the size of our 2977 * communication ring. 2978 * 2979 * \param xbb Per-instance xbb configuration structure. 2980 * 2981 * These address spaces are used to dynamically map pages in the 2982 * front-end's domain into our own. 2983 */ 2984 static int 2985 xbb_alloc_communication_mem(struct xbb_softc *xbb) 2986 { 2987 xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments; 2988 xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE; 2989 xbb->kva_size = xbb->reqlist_kva_size + 2990 (xbb->ring_config.ring_pages * PAGE_SIZE); 2991 2992 xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages); 2993 if (xbb->kva_free == NULL) 2994 return (ENOMEM); 2995 2996 DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n", 2997 device_get_nameunit(xbb->dev), xbb->kva_size, 2998 xbb->reqlist_kva_size); 2999 /* 3000 * Reserve a range of pseudo physical memory that we can map 3001 * into kva. These pages will only be backed by machine 3002 * pages ("real memory") during the lifetime of front-end requests 3003 * via grant table operations. 3004 */ 3005 xbb->pseudo_phys_res_id = 0; 3006 xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id, 3007 xbb->kva_size); 3008 if (xbb->pseudo_phys_res == NULL) { 3009 xbb->kva = 0; 3010 return (ENOMEM); 3011 } 3012 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res); 3013 xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res); 3014 3015 DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n", 3016 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva, 3017 (uintmax_t)xbb->gnt_base_addr); 3018 return (0); 3019 } 3020 3021 /** 3022 * Collect front-end information from the XenStore. 3023 * 3024 * \param xbb Per-instance xbb configuration structure. 3025 */ 3026 static int 3027 xbb_collect_frontend_info(struct xbb_softc *xbb) 3028 { 3029 char protocol_abi[64]; 3030 const char *otherend_path; 3031 int error; 3032 u_int ring_idx; 3033 u_int ring_page_order; 3034 size_t ring_size; 3035 3036 otherend_path = xenbus_get_otherend_path(xbb->dev); 3037 3038 /* 3039 * Protocol defaults valid even if all negotiation fails. 3040 */ 3041 xbb->ring_config.ring_pages = 1; 3042 xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; 3043 xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE; 3044 3045 /* 3046 * Mandatory data (used in all versions of the protocol) first. 3047 */ 3048 error = xs_scanf(XST_NIL, otherend_path, 3049 "event-channel", NULL, "%" PRIu32, 3050 &xbb->ring_config.evtchn); 3051 if (error != 0) { 3052 xenbus_dev_fatal(xbb->dev, error, 3053 "Unable to retrieve event-channel information " 3054 "from frontend %s. Unable to connect.", 3055 xenbus_get_otherend_path(xbb->dev)); 3056 return (error); 3057 } 3058 3059 /* 3060 * These fields are initialized to legacy protocol defaults 3061 * so we only need to fail if reading the updated value succeeds 3062 * and the new value is outside of its allowed range. 3063 * 3064 * \note xs_gather() returns on the first encountered error, so 3065 * we must use independant calls in order to guarantee 3066 * we don't miss information in a sparsly populated front-end 3067 * tree. 3068 * 3069 * \note xs_scanf() does not update variables for unmatched 3070 * fields. 3071 */ 3072 ring_page_order = 0; 3073 xbb->max_requests = 32; 3074 3075 (void)xs_scanf(XST_NIL, otherend_path, 3076 "ring-page-order", NULL, "%u", 3077 &ring_page_order); 3078 xbb->ring_config.ring_pages = 1 << ring_page_order; 3079 ring_size = PAGE_SIZE * xbb->ring_config.ring_pages; 3080 xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size); 3081 3082 if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) { 3083 xenbus_dev_fatal(xbb->dev, EINVAL, 3084 "Front-end specified ring-pages of %u " 3085 "exceeds backend limit of %u. " 3086 "Unable to connect.", 3087 xbb->ring_config.ring_pages, 3088 XBB_MAX_RING_PAGES); 3089 return (EINVAL); 3090 } 3091 3092 if (xbb->ring_config.ring_pages == 1) { 3093 error = xs_gather(XST_NIL, otherend_path, 3094 "ring-ref", "%" PRIu32, 3095 &xbb->ring_config.ring_ref[0], 3096 NULL); 3097 if (error != 0) { 3098 xenbus_dev_fatal(xbb->dev, error, 3099 "Unable to retrieve ring information " 3100 "from frontend %s. Unable to " 3101 "connect.", 3102 xenbus_get_otherend_path(xbb->dev)); 3103 return (error); 3104 } 3105 } else { 3106 /* Multi-page ring format. */ 3107 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages; 3108 ring_idx++) { 3109 char ring_ref_name[]= "ring_refXX"; 3110 3111 snprintf(ring_ref_name, sizeof(ring_ref_name), 3112 "ring-ref%u", ring_idx); 3113 error = xs_scanf(XST_NIL, otherend_path, 3114 ring_ref_name, NULL, "%" PRIu32, 3115 &xbb->ring_config.ring_ref[ring_idx]); 3116 if (error != 0) { 3117 xenbus_dev_fatal(xbb->dev, error, 3118 "Failed to retriev grant " 3119 "reference for page %u of " 3120 "shared ring. Unable " 3121 "to connect.", ring_idx); 3122 return (error); 3123 } 3124 } 3125 } 3126 3127 error = xs_gather(XST_NIL, otherend_path, 3128 "protocol", "%63s", protocol_abi, 3129 NULL); 3130 if (error != 0 3131 || !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) { 3132 /* 3133 * Assume native if the frontend has not 3134 * published ABI data or it has published and 3135 * matches our own ABI. 3136 */ 3137 xbb->abi = BLKIF_PROTOCOL_NATIVE; 3138 } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) { 3139 3140 xbb->abi = BLKIF_PROTOCOL_X86_32; 3141 } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) { 3142 3143 xbb->abi = BLKIF_PROTOCOL_X86_64; 3144 } else { 3145 3146 xenbus_dev_fatal(xbb->dev, EINVAL, 3147 "Unknown protocol ABI (%s) published by " 3148 "frontend. Unable to connect.", protocol_abi); 3149 return (EINVAL); 3150 } 3151 return (0); 3152 } 3153 3154 /** 3155 * Allocate per-request data structures given request size and number 3156 * information negotiated with the front-end. 3157 * 3158 * \param xbb Per-instance xbb configuration structure. 3159 */ 3160 static int 3161 xbb_alloc_requests(struct xbb_softc *xbb) 3162 { 3163 struct xbb_xen_req *req; 3164 struct xbb_xen_req *last_req; 3165 3166 /* 3167 * Allocate request book keeping datastructures. 3168 */ 3169 xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests), 3170 M_XENBLOCKBACK, M_NOWAIT|M_ZERO); 3171 if (xbb->requests == NULL) { 3172 xenbus_dev_fatal(xbb->dev, ENOMEM, 3173 "Unable to allocate request structures"); 3174 return (ENOMEM); 3175 } 3176 3177 req = xbb->requests; 3178 last_req = &xbb->requests[xbb->max_requests - 1]; 3179 STAILQ_INIT(&xbb->request_free_stailq); 3180 while (req <= last_req) { 3181 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links); 3182 req++; 3183 } 3184 return (0); 3185 } 3186 3187 static int 3188 xbb_alloc_request_lists(struct xbb_softc *xbb) 3189 { 3190 struct xbb_xen_reqlist *reqlist; 3191 int i; 3192 3193 /* 3194 * If no requests can be merged, we need 1 request list per 3195 * in flight request. 3196 */ 3197 xbb->request_lists = malloc(xbb->max_requests * 3198 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); 3199 if (xbb->request_lists == NULL) { 3200 xenbus_dev_fatal(xbb->dev, ENOMEM, 3201 "Unable to allocate request list structures"); 3202 return (ENOMEM); 3203 } 3204 3205 STAILQ_INIT(&xbb->reqlist_free_stailq); 3206 STAILQ_INIT(&xbb->reqlist_pending_stailq); 3207 for (i = 0; i < xbb->max_requests; i++) { 3208 int seg; 3209 3210 reqlist = &xbb->request_lists[i]; 3211 3212 reqlist->xbb = xbb; 3213 3214 #ifdef XBB_USE_BOUNCE_BUFFERS 3215 reqlist->bounce = malloc(xbb->max_reqlist_size, 3216 M_XENBLOCKBACK, M_NOWAIT); 3217 if (reqlist->bounce == NULL) { 3218 xenbus_dev_fatal(xbb->dev, ENOMEM, 3219 "Unable to allocate request " 3220 "bounce buffers"); 3221 return (ENOMEM); 3222 } 3223 #endif /* XBB_USE_BOUNCE_BUFFERS */ 3224 3225 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments * 3226 sizeof(*reqlist->gnt_handles), 3227 M_XENBLOCKBACK, M_NOWAIT|M_ZERO); 3228 if (reqlist->gnt_handles == NULL) { 3229 xenbus_dev_fatal(xbb->dev, ENOMEM, 3230 "Unable to allocate request " 3231 "grant references"); 3232 return (ENOMEM); 3233 } 3234 3235 for (seg = 0; seg < xbb->max_reqlist_segments; seg++) 3236 reqlist->gnt_handles[seg] = GRANT_REF_INVALID; 3237 3238 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); 3239 } 3240 return (0); 3241 } 3242 3243 /** 3244 * Supply information about the physical device to the frontend 3245 * via XenBus. 3246 * 3247 * \param xbb Per-instance xbb configuration structure. 3248 */ 3249 static int 3250 xbb_publish_backend_info(struct xbb_softc *xbb) 3251 { 3252 struct xs_transaction xst; 3253 const char *our_path; 3254 const char *leaf; 3255 int error; 3256 3257 our_path = xenbus_get_node(xbb->dev); 3258 while (1) { 3259 error = xs_transaction_start(&xst); 3260 if (error != 0) { 3261 xenbus_dev_fatal(xbb->dev, error, 3262 "Error publishing backend info " 3263 "(start transaction)"); 3264 return (error); 3265 } 3266 3267 leaf = "sectors"; 3268 error = xs_printf(xst, our_path, leaf, 3269 "%"PRIu64, xbb->media_num_sectors); 3270 if (error != 0) 3271 break; 3272 3273 /* XXX Support all VBD attributes here. */ 3274 leaf = "info"; 3275 error = xs_printf(xst, our_path, leaf, "%u", 3276 xbb->flags & XBBF_READ_ONLY 3277 ? VDISK_READONLY : 0); 3278 if (error != 0) 3279 break; 3280 3281 leaf = "sector-size"; 3282 error = xs_printf(xst, our_path, leaf, "%u", 3283 xbb->sector_size); 3284 if (error != 0) 3285 break; 3286 3287 error = xs_transaction_end(xst, 0); 3288 if (error == 0) { 3289 return (0); 3290 } else if (error != EAGAIN) { 3291 xenbus_dev_fatal(xbb->dev, error, "ending transaction"); 3292 return (error); 3293 } 3294 } 3295 3296 xenbus_dev_fatal(xbb->dev, error, "writing %s/%s", 3297 our_path, leaf); 3298 xs_transaction_end(xst, 1); 3299 return (error); 3300 } 3301 3302 /** 3303 * Connect to our blkfront peer now that it has completed publishing 3304 * its configuration into the XenStore. 3305 * 3306 * \param xbb Per-instance xbb configuration structure. 3307 */ 3308 static void 3309 xbb_connect(struct xbb_softc *xbb) 3310 { 3311 int error; 3312 3313 if (xenbus_get_state(xbb->dev) == XenbusStateConnected) 3314 return; 3315 3316 if (xbb_collect_frontend_info(xbb) != 0) 3317 return; 3318 3319 xbb->flags &= ~XBBF_SHUTDOWN; 3320 3321 /* 3322 * We limit the maximum number of reqlist segments to the maximum 3323 * number of segments in the ring, or our absolute maximum, 3324 * whichever is smaller. 3325 */ 3326 xbb->max_reqlist_segments = MIN(xbb->max_request_segments * 3327 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST); 3328 3329 /* 3330 * The maximum size is simply a function of the number of segments 3331 * we can handle. 3332 */ 3333 xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE; 3334 3335 /* Allocate resources whose size depends on front-end configuration. */ 3336 error = xbb_alloc_communication_mem(xbb); 3337 if (error != 0) { 3338 xenbus_dev_fatal(xbb->dev, error, 3339 "Unable to allocate communication memory"); 3340 return; 3341 } 3342 3343 error = xbb_alloc_requests(xbb); 3344 if (error != 0) { 3345 /* Specific errors are reported by xbb_alloc_requests(). */ 3346 return; 3347 } 3348 3349 error = xbb_alloc_request_lists(xbb); 3350 if (error != 0) { 3351 /* Specific errors are reported by xbb_alloc_request_lists(). */ 3352 return; 3353 } 3354 3355 /* 3356 * Connect communication channel. 3357 */ 3358 error = xbb_connect_ring(xbb); 3359 if (error != 0) { 3360 /* Specific errors are reported by xbb_connect_ring(). */ 3361 return; 3362 } 3363 3364 if (xbb_publish_backend_info(xbb) != 0) { 3365 /* 3366 * If we can't publish our data, we cannot participate 3367 * in this connection, and waiting for a front-end state 3368 * change will not help the situation. 3369 */ 3370 (void)xbb_disconnect(xbb); 3371 return; 3372 } 3373 3374 /* Ready for I/O. */ 3375 xenbus_set_state(xbb->dev, XenbusStateConnected); 3376 } 3377 3378 /*-------------------------- Device Teardown Support -------------------------*/ 3379 /** 3380 * Perform device shutdown functions. 3381 * 3382 * \param xbb Per-instance xbb configuration structure. 3383 * 3384 * Mark this instance as shutting down, wait for any active I/O on the 3385 * backend device/file to drain, disconnect from the front-end, and notify 3386 * any waiters (e.g. a thread invoking our detach method) that detach can 3387 * now proceed. 3388 */ 3389 static int 3390 xbb_shutdown(struct xbb_softc *xbb) 3391 { 3392 XenbusState frontState; 3393 int error; 3394 3395 DPRINTF("\n"); 3396 3397 /* 3398 * Due to the need to drop our mutex during some 3399 * xenbus operations, it is possible for two threads 3400 * to attempt to close out shutdown processing at 3401 * the same time. Tell the caller that hits this 3402 * race to try back later. 3403 */ 3404 if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0) 3405 return (EAGAIN); 3406 3407 xbb->flags |= XBBF_IN_SHUTDOWN; 3408 mtx_unlock(&xbb->lock); 3409 3410 if (xenbus_get_state(xbb->dev) < XenbusStateClosing) 3411 xenbus_set_state(xbb->dev, XenbusStateClosing); 3412 3413 frontState = xenbus_get_otherend_state(xbb->dev); 3414 mtx_lock(&xbb->lock); 3415 xbb->flags &= ~XBBF_IN_SHUTDOWN; 3416 3417 /* The front can submit I/O until entering the closed state. */ 3418 if (frontState < XenbusStateClosed) 3419 return (EAGAIN); 3420 3421 DPRINTF("\n"); 3422 3423 /* Indicate shutdown is in progress. */ 3424 xbb->flags |= XBBF_SHUTDOWN; 3425 3426 /* Disconnect from the front-end. */ 3427 error = xbb_disconnect(xbb); 3428 if (error != 0) { 3429 /* 3430 * Requests still outstanding. We'll be called again 3431 * once they complete. 3432 */ 3433 KASSERT(error == EAGAIN, 3434 ("%s: Unexpected xbb_disconnect() failure %d", 3435 __func__, error)); 3436 3437 return (error); 3438 } 3439 3440 DPRINTF("\n"); 3441 3442 /* Indicate to xbb_detach() that is it safe to proceed. */ 3443 wakeup(xbb); 3444 3445 return (0); 3446 } 3447 3448 /** 3449 * Report an attach time error to the console and Xen, and cleanup 3450 * this instance by forcing immediate detach processing. 3451 * 3452 * \param xbb Per-instance xbb configuration structure. 3453 * \param err Errno describing the error. 3454 * \param fmt Printf style format and arguments 3455 */ 3456 static void 3457 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...) 3458 { 3459 va_list ap; 3460 va_list ap_hotplug; 3461 3462 va_start(ap, fmt); 3463 va_copy(ap_hotplug, ap); 3464 xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev), 3465 "hotplug-error", fmt, ap_hotplug); 3466 va_end(ap_hotplug); 3467 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3468 "hotplug-status", "error"); 3469 3470 xenbus_dev_vfatal(xbb->dev, err, fmt, ap); 3471 va_end(ap); 3472 3473 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3474 "online", "0"); 3475 xbb_detach(xbb->dev); 3476 } 3477 3478 /*---------------------------- NewBus Entrypoints ----------------------------*/ 3479 /** 3480 * Inspect a XenBus device and claim it if is of the appropriate type. 3481 * 3482 * \param dev NewBus device object representing a candidate XenBus device. 3483 * 3484 * \return 0 for success, errno codes for failure. 3485 */ 3486 static int 3487 xbb_probe(device_t dev) 3488 { 3489 3490 if (!strcmp(xenbus_get_type(dev), "vbd")) { 3491 device_set_desc(dev, "Backend Virtual Block Device"); 3492 device_quiet(dev); 3493 return (0); 3494 } 3495 3496 return (ENXIO); 3497 } 3498 3499 /** 3500 * Setup sysctl variables to control various Block Back parameters. 3501 * 3502 * \param xbb Xen Block Back softc. 3503 * 3504 */ 3505 static void 3506 xbb_setup_sysctl(struct xbb_softc *xbb) 3507 { 3508 struct sysctl_ctx_list *sysctl_ctx = NULL; 3509 struct sysctl_oid *sysctl_tree = NULL; 3510 3511 sysctl_ctx = device_get_sysctl_ctx(xbb->dev); 3512 if (sysctl_ctx == NULL) 3513 return; 3514 3515 sysctl_tree = device_get_sysctl_tree(xbb->dev); 3516 if (sysctl_tree == NULL) 3517 return; 3518 3519 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3520 "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0, 3521 "fake the flush command"); 3522 3523 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3524 "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0, 3525 "send a real flush for N flush requests"); 3526 3527 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3528 "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0, 3529 "Don't coalesce contiguous requests"); 3530 3531 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3532 "reqs_received", CTLFLAG_RW, &xbb->reqs_received, 3533 "how many I/O requests we have received"); 3534 3535 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3536 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed, 3537 "how many I/O requests have been completed"); 3538 3539 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3540 "reqs_queued_for_completion", CTLFLAG_RW, 3541 &xbb->reqs_queued_for_completion, 3542 "how many I/O requests queued but not yet pushed"); 3543 3544 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3545 "reqs_completed_with_error", CTLFLAG_RW, 3546 &xbb->reqs_completed_with_error, 3547 "how many I/O requests completed with error status"); 3548 3549 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3550 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch, 3551 "how many I/O dispatches were forced"); 3552 3553 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3554 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch, 3555 "how many I/O dispatches were normal"); 3556 3557 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3558 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch, 3559 "total number of I/O dispatches"); 3560 3561 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3562 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages, 3563 "how many times we have run out of KVA"); 3564 3565 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3566 "request_shortages", CTLFLAG_RW, 3567 &xbb->request_shortages, 3568 "how many times we have run out of requests"); 3569 3570 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3571 "max_requests", CTLFLAG_RD, &xbb->max_requests, 0, 3572 "maximum outstanding requests (negotiated)"); 3573 3574 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3575 "max_request_segments", CTLFLAG_RD, 3576 &xbb->max_request_segments, 0, 3577 "maximum number of pages per requests (negotiated)"); 3578 3579 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3580 "max_request_size", CTLFLAG_RD, 3581 &xbb->max_request_size, 0, 3582 "maximum size in bytes of a request (negotiated)"); 3583 3584 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3585 "ring_pages", CTLFLAG_RD, 3586 &xbb->ring_config.ring_pages, 0, 3587 "communication channel pages (negotiated)"); 3588 } 3589 3590 /** 3591 * Attach to a XenBus device that has been claimed by our probe routine. 3592 * 3593 * \param dev NewBus device object representing this Xen Block Back instance. 3594 * 3595 * \return 0 for success, errno codes for failure. 3596 */ 3597 static int 3598 xbb_attach(device_t dev) 3599 { 3600 struct xbb_softc *xbb; 3601 int error; 3602 u_int max_ring_page_order; 3603 3604 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 3605 3606 /* 3607 * Basic initialization. 3608 * After this block it is safe to call xbb_detach() 3609 * to clean up any allocated data for this instance. 3610 */ 3611 xbb = device_get_softc(dev); 3612 xbb->dev = dev; 3613 xbb->otherend_id = xenbus_get_otherend_id(dev); 3614 TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb); 3615 mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF); 3616 3617 /* 3618 * Publish protocol capabilities for consumption by the 3619 * front-end. 3620 */ 3621 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3622 "feature-barrier", "1"); 3623 if (error) { 3624 xbb_attach_failed(xbb, error, "writing %s/feature-barrier", 3625 xenbus_get_node(xbb->dev)); 3626 return (error); 3627 } 3628 3629 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3630 "feature-flush-cache", "1"); 3631 if (error) { 3632 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache", 3633 xenbus_get_node(xbb->dev)); 3634 return (error); 3635 } 3636 3637 max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1; 3638 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3639 "max-ring-page-order", "%u", max_ring_page_order); 3640 if (error) { 3641 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order", 3642 xenbus_get_node(xbb->dev)); 3643 return (error); 3644 } 3645 3646 /* Collect physical device information. */ 3647 error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev), 3648 "device-type", NULL, &xbb->dev_type, 3649 NULL); 3650 if (error != 0) 3651 xbb->dev_type = NULL; 3652 3653 error = xs_gather(XST_NIL, xenbus_get_node(dev), 3654 "mode", NULL, &xbb->dev_mode, 3655 "params", NULL, &xbb->dev_name, 3656 NULL); 3657 if (error != 0) { 3658 xbb_attach_failed(xbb, error, "reading backend fields at %s", 3659 xenbus_get_node(dev)); 3660 return (ENXIO); 3661 } 3662 3663 /* Parse fopen style mode flags. */ 3664 if (strchr(xbb->dev_mode, 'w') == NULL) 3665 xbb->flags |= XBBF_READ_ONLY; 3666 3667 /* 3668 * Verify the physical device is present and can support 3669 * the desired I/O mode. 3670 */ 3671 DROP_GIANT(); 3672 error = xbb_open_backend(xbb); 3673 PICKUP_GIANT(); 3674 if (error != 0) { 3675 xbb_attach_failed(xbb, error, "Unable to open %s", 3676 xbb->dev_name); 3677 return (ENXIO); 3678 } 3679 3680 /* Use devstat(9) for recording statistics. */ 3681 xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev), 3682 xbb->sector_size, 3683 DEVSTAT_ALL_SUPPORTED, 3684 DEVSTAT_TYPE_DIRECT 3685 | DEVSTAT_TYPE_IF_OTHER, 3686 DEVSTAT_PRIORITY_OTHER); 3687 3688 xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev), 3689 xbb->sector_size, 3690 DEVSTAT_ALL_SUPPORTED, 3691 DEVSTAT_TYPE_DIRECT 3692 | DEVSTAT_TYPE_IF_OTHER, 3693 DEVSTAT_PRIORITY_OTHER); 3694 /* 3695 * Setup sysctl variables. 3696 */ 3697 xbb_setup_sysctl(xbb); 3698 3699 /* 3700 * Create a taskqueue for doing work that must occur from a 3701 * thread context. 3702 */ 3703 xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev), 3704 M_NOWAIT, 3705 taskqueue_thread_enqueue, 3706 /*contxt*/&xbb->io_taskqueue); 3707 if (xbb->io_taskqueue == NULL) { 3708 xbb_attach_failed(xbb, error, "Unable to create taskqueue"); 3709 return (ENOMEM); 3710 } 3711 3712 taskqueue_start_threads(&xbb->io_taskqueue, 3713 /*num threads*/1, 3714 /*priority*/PWAIT, 3715 /*thread name*/ 3716 "%s taskq", device_get_nameunit(dev)); 3717 3718 /* Update hot-plug status to satisfy xend. */ 3719 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3720 "hotplug-status", "connected"); 3721 if (error) { 3722 xbb_attach_failed(xbb, error, "writing %s/hotplug-status", 3723 xenbus_get_node(xbb->dev)); 3724 return (error); 3725 } 3726 3727 /* Tell the front end that we are ready to connect. */ 3728 xenbus_set_state(dev, XenbusStateInitWait); 3729 3730 return (0); 3731 } 3732 3733 /** 3734 * Detach from a block back device instance. 3735 * 3736 * \param dev NewBus device object representing this Xen Block Back instance. 3737 * 3738 * \return 0 for success, errno codes for failure. 3739 * 3740 * \note A block back device may be detached at any time in its life-cycle, 3741 * including part way through the attach process. For this reason, 3742 * initialization order and the intialization state checks in this 3743 * routine must be carefully coupled so that attach time failures 3744 * are gracefully handled. 3745 */ 3746 static int 3747 xbb_detach(device_t dev) 3748 { 3749 struct xbb_softc *xbb; 3750 3751 DPRINTF("\n"); 3752 3753 xbb = device_get_softc(dev); 3754 mtx_lock(&xbb->lock); 3755 while (xbb_shutdown(xbb) == EAGAIN) { 3756 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0, 3757 "xbb_shutdown", 0); 3758 } 3759 mtx_unlock(&xbb->lock); 3760 3761 DPRINTF("\n"); 3762 3763 if (xbb->io_taskqueue != NULL) 3764 taskqueue_free(xbb->io_taskqueue); 3765 3766 if (xbb->xbb_stats != NULL) 3767 devstat_remove_entry(xbb->xbb_stats); 3768 3769 if (xbb->xbb_stats_in != NULL) 3770 devstat_remove_entry(xbb->xbb_stats_in); 3771 3772 xbb_close_backend(xbb); 3773 3774 if (xbb->dev_mode != NULL) { 3775 free(xbb->dev_mode, M_XENSTORE); 3776 xbb->dev_mode = NULL; 3777 } 3778 3779 if (xbb->dev_type != NULL) { 3780 free(xbb->dev_type, M_XENSTORE); 3781 xbb->dev_type = NULL; 3782 } 3783 3784 if (xbb->dev_name != NULL) { 3785 free(xbb->dev_name, M_XENSTORE); 3786 xbb->dev_name = NULL; 3787 } 3788 3789 mtx_destroy(&xbb->lock); 3790 return (0); 3791 } 3792 3793 /** 3794 * Prepare this block back device for suspension of this VM. 3795 * 3796 * \param dev NewBus device object representing this Xen Block Back instance. 3797 * 3798 * \return 0 for success, errno codes for failure. 3799 */ 3800 static int 3801 xbb_suspend(device_t dev) 3802 { 3803 #ifdef NOT_YET 3804 struct xbb_softc *sc = device_get_softc(dev); 3805 3806 /* Prevent new requests being issued until we fix things up. */ 3807 mtx_lock(&sc->xb_io_lock); 3808 sc->connected = BLKIF_STATE_SUSPENDED; 3809 mtx_unlock(&sc->xb_io_lock); 3810 #endif 3811 3812 return (0); 3813 } 3814 3815 /** 3816 * Perform any processing required to recover from a suspended state. 3817 * 3818 * \param dev NewBus device object representing this Xen Block Back instance. 3819 * 3820 * \return 0 for success, errno codes for failure. 3821 */ 3822 static int 3823 xbb_resume(device_t dev) 3824 { 3825 return (0); 3826 } 3827 3828 /** 3829 * Handle state changes expressed via the XenStore by our front-end peer. 3830 * 3831 * \param dev NewBus device object representing this Xen 3832 * Block Back instance. 3833 * \param frontend_state The new state of the front-end. 3834 * 3835 * \return 0 for success, errno codes for failure. 3836 */ 3837 static void 3838 xbb_frontend_changed(device_t dev, XenbusState frontend_state) 3839 { 3840 struct xbb_softc *xbb = device_get_softc(dev); 3841 3842 DPRINTF("frontend_state=%s, xbb_state=%s\n", 3843 xenbus_strstate(frontend_state), 3844 xenbus_strstate(xenbus_get_state(xbb->dev))); 3845 3846 switch (frontend_state) { 3847 case XenbusStateInitialising: 3848 break; 3849 case XenbusStateInitialised: 3850 case XenbusStateConnected: 3851 xbb_connect(xbb); 3852 break; 3853 case XenbusStateClosing: 3854 case XenbusStateClosed: 3855 mtx_lock(&xbb->lock); 3856 xbb_shutdown(xbb); 3857 mtx_unlock(&xbb->lock); 3858 if (frontend_state == XenbusStateClosed) 3859 xenbus_set_state(xbb->dev, XenbusStateClosed); 3860 break; 3861 default: 3862 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend", 3863 frontend_state); 3864 break; 3865 } 3866 } 3867 3868 /*---------------------------- NewBus Registration ---------------------------*/ 3869 static device_method_t xbb_methods[] = { 3870 /* Device interface */ 3871 DEVMETHOD(device_probe, xbb_probe), 3872 DEVMETHOD(device_attach, xbb_attach), 3873 DEVMETHOD(device_detach, xbb_detach), 3874 DEVMETHOD(device_shutdown, bus_generic_shutdown), 3875 DEVMETHOD(device_suspend, xbb_suspend), 3876 DEVMETHOD(device_resume, xbb_resume), 3877 3878 /* Xenbus interface */ 3879 DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed), 3880 3881 { 0, 0 } 3882 }; 3883 3884 static driver_t xbb_driver = { 3885 "xbbd", 3886 xbb_methods, 3887 sizeof(struct xbb_softc), 3888 }; 3889 devclass_t xbb_devclass; 3890 3891 DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0); 3892