1 /*- 2 * Copyright (c) 2009-2012 Spectra Logic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * Authors: Justin T. Gibbs (Spectra Logic Corporation) 31 * Ken Merry (Spectra Logic Corporation) 32 */ 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /** 37 * \file blkback.c 38 * 39 * \brief Device driver supporting the vending of block storage from 40 * a FreeBSD domain to other domains. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 48 #include <sys/bio.h> 49 #include <sys/bus.h> 50 #include <sys/conf.h> 51 #include <sys/devicestat.h> 52 #include <sys/disk.h> 53 #include <sys/fcntl.h> 54 #include <sys/filedesc.h> 55 #include <sys/kdb.h> 56 #include <sys/module.h> 57 #include <sys/namei.h> 58 #include <sys/proc.h> 59 #include <sys/rman.h> 60 #include <sys/taskqueue.h> 61 #include <sys/types.h> 62 #include <sys/vnode.h> 63 #include <sys/mount.h> 64 #include <sys/sysctl.h> 65 #include <sys/bitstring.h> 66 #include <sys/sdt.h> 67 68 #include <geom/geom.h> 69 70 #include <machine/_inttypes.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_extern.h> 74 #include <vm/vm_kern.h> 75 76 #include <xen/xen-os.h> 77 #include <xen/blkif.h> 78 #include <xen/gnttab.h> 79 #include <xen/xen_intr.h> 80 81 #include <xen/interface/event_channel.h> 82 #include <xen/interface/grant_table.h> 83 84 #include <xen/xenbus/xenbusvar.h> 85 86 /*--------------------------- Compile-time Tunables --------------------------*/ 87 /** 88 * The maximum number of shared memory ring pages we will allow in a 89 * negotiated block-front/back communication channel. Allow enough 90 * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd. 91 */ 92 #define XBB_MAX_RING_PAGES 32 93 94 /** 95 * The maximum number of outstanding request blocks (request headers plus 96 * additional segment blocks) we will allow in a negotiated block-front/back 97 * communication channel. 98 */ 99 #define XBB_MAX_REQUESTS \ 100 __CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES) 101 102 /** 103 * \brief Define to force all I/O to be performed on memory owned by the 104 * backend device, with a copy-in/out to the remote domain's memory. 105 * 106 * \note This option is currently required when this driver's domain is 107 * operating in HVM mode on a system using an IOMMU. 108 * 109 * This driver uses Xen's grant table API to gain access to the memory of 110 * the remote domains it serves. When our domain is operating in PV mode, 111 * the grant table mechanism directly updates our domain's page table entries 112 * to point to the physical pages of the remote domain. This scheme guarantees 113 * that blkback and the backing devices it uses can safely perform DMA 114 * operations to satisfy requests. In HVM mode, Xen may use a HW IOMMU to 115 * insure that our domain cannot DMA to pages owned by another domain. As 116 * of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant 117 * table API. For this reason, in HVM mode, we must bounce all requests into 118 * memory that is mapped into our domain at domain startup and thus has 119 * valid IOMMU mappings. 120 */ 121 #define XBB_USE_BOUNCE_BUFFERS 122 123 /** 124 * \brief Define to enable rudimentary request logging to the console. 125 */ 126 #undef XBB_DEBUG 127 128 /*---------------------------------- Macros ----------------------------------*/ 129 /** 130 * Custom malloc type for all driver allocations. 131 */ 132 static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data"); 133 134 #ifdef XBB_DEBUG 135 #define DPRINTF(fmt, args...) \ 136 printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 137 #else 138 #define DPRINTF(fmt, args...) do {} while(0) 139 #endif 140 141 /** 142 * The maximum mapped region size per request we will allow in a negotiated 143 * block-front/back communication channel. 144 */ 145 #define XBB_MAX_REQUEST_SIZE \ 146 MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) 147 148 /** 149 * The maximum number of segments (within a request header and accompanying 150 * segment blocks) per request we will allow in a negotiated block-front/back 151 * communication channel. 152 */ 153 #define XBB_MAX_SEGMENTS_PER_REQUEST \ 154 (MIN(UIO_MAXIOV, \ 155 MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ 156 (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))) 157 158 /** 159 * The maximum number of ring pages that we can allow per request list. 160 * We limit this to the maximum number of segments per request, because 161 * that is already a reasonable number of segments to aggregate. This 162 * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST, 163 * because that would leave situations where we can't dispatch even one 164 * large request. 165 */ 166 #define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST 167 168 /*--------------------------- Forward Declarations ---------------------------*/ 169 struct xbb_softc; 170 struct xbb_xen_req; 171 172 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, 173 ...) __attribute__((format(printf, 3, 4))); 174 static int xbb_shutdown(struct xbb_softc *xbb); 175 176 /*------------------------------ Data Structures -----------------------------*/ 177 178 STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req); 179 180 typedef enum { 181 XBB_REQLIST_NONE = 0x00, 182 XBB_REQLIST_MAPPED = 0x01 183 } xbb_reqlist_flags; 184 185 struct xbb_xen_reqlist { 186 /** 187 * Back reference to the parent block back instance for this 188 * request. Used during bio_done handling. 189 */ 190 struct xbb_softc *xbb; 191 192 /** 193 * BLKIF_OP code for this request. 194 */ 195 int operation; 196 197 /** 198 * Set to BLKIF_RSP_* to indicate request status. 199 * 200 * This field allows an error status to be recorded even if the 201 * delivery of this status must be deferred. Deferred reporting 202 * is necessary, for example, when an error is detected during 203 * completion processing of one bio when other bios for this 204 * request are still outstanding. 205 */ 206 int status; 207 208 /** 209 * Number of 512 byte sectors not transferred. 210 */ 211 int residual_512b_sectors; 212 213 /** 214 * Starting sector number of the first request in the list. 215 */ 216 off_t starting_sector_number; 217 218 /** 219 * If we're going to coalesce, the next contiguous sector would be 220 * this one. 221 */ 222 off_t next_contig_sector; 223 224 /** 225 * Number of child requests in the list. 226 */ 227 int num_children; 228 229 /** 230 * Number of I/O requests still pending on the backend. 231 */ 232 int pendcnt; 233 234 /** 235 * Total number of segments for requests in the list. 236 */ 237 int nr_segments; 238 239 /** 240 * Flags for this particular request list. 241 */ 242 xbb_reqlist_flags flags; 243 244 /** 245 * Kernel virtual address space reserved for this request 246 * list structure and used to map the remote domain's pages for 247 * this I/O, into our domain's address space. 248 */ 249 uint8_t *kva; 250 251 /** 252 * Base, psuedo-physical address, corresponding to the start 253 * of this request's kva region. 254 */ 255 uint64_t gnt_base; 256 257 258 #ifdef XBB_USE_BOUNCE_BUFFERS 259 /** 260 * Pre-allocated domain local memory used to proxy remote 261 * domain memory during I/O operations. 262 */ 263 uint8_t *bounce; 264 #endif 265 266 /** 267 * Array of grant handles (one per page) used to map this request. 268 */ 269 grant_handle_t *gnt_handles; 270 271 /** 272 * Device statistics request ordering type (ordered or simple). 273 */ 274 devstat_tag_type ds_tag_type; 275 276 /** 277 * Device statistics request type (read, write, no_data). 278 */ 279 devstat_trans_flags ds_trans_type; 280 281 /** 282 * The start time for this request. 283 */ 284 struct bintime ds_t0; 285 286 /** 287 * Linked list of contiguous requests with the same operation type. 288 */ 289 struct xbb_xen_req_list contig_req_list; 290 291 /** 292 * Linked list links used to aggregate idle requests in the 293 * request list free pool (xbb->reqlist_free_stailq) and pending 294 * requests waiting for execution (xbb->reqlist_pending_stailq). 295 */ 296 STAILQ_ENTRY(xbb_xen_reqlist) links; 297 }; 298 299 STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist); 300 301 /** 302 * \brief Object tracking an in-flight I/O from a Xen VBD consumer. 303 */ 304 struct xbb_xen_req { 305 /** 306 * Linked list links used to aggregate requests into a reqlist 307 * and to store them in the request free pool. 308 */ 309 STAILQ_ENTRY(xbb_xen_req) links; 310 311 /** 312 * The remote domain's identifier for this I/O request. 313 */ 314 uint64_t id; 315 316 /** 317 * The number of pages currently mapped for this request. 318 */ 319 int nr_pages; 320 321 /** 322 * The number of 512 byte sectors comprising this requests. 323 */ 324 int nr_512b_sectors; 325 326 /** 327 * BLKIF_OP code for this request. 328 */ 329 int operation; 330 331 /** 332 * Storage used for non-native ring requests. 333 */ 334 blkif_request_t ring_req_storage; 335 336 /** 337 * Pointer to the Xen request in the ring. 338 */ 339 blkif_request_t *ring_req; 340 341 /** 342 * Consumer index for this request. 343 */ 344 RING_IDX req_ring_idx; 345 346 /** 347 * The start time for this request. 348 */ 349 struct bintime ds_t0; 350 351 /** 352 * Pointer back to our parent request list. 353 */ 354 struct xbb_xen_reqlist *reqlist; 355 }; 356 SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req); 357 358 /** 359 * \brief Configuration data for the shared memory request ring 360 * used to communicate with the front-end client of this 361 * this driver. 362 */ 363 struct xbb_ring_config { 364 /** KVA address where ring memory is mapped. */ 365 vm_offset_t va; 366 367 /** The pseudo-physical address where ring memory is mapped.*/ 368 uint64_t gnt_addr; 369 370 /** 371 * Grant table handles, one per-ring page, returned by the 372 * hyperpervisor upon mapping of the ring and required to 373 * unmap it when a connection is torn down. 374 */ 375 grant_handle_t handle[XBB_MAX_RING_PAGES]; 376 377 /** 378 * The device bus address returned by the hypervisor when 379 * mapping the ring and required to unmap it when a connection 380 * is torn down. 381 */ 382 uint64_t bus_addr[XBB_MAX_RING_PAGES]; 383 384 /** The number of ring pages mapped for the current connection. */ 385 u_int ring_pages; 386 387 /** 388 * The grant references, one per-ring page, supplied by the 389 * front-end, allowing us to reference the ring pages in the 390 * front-end's domain and to map these pages into our own domain. 391 */ 392 grant_ref_t ring_ref[XBB_MAX_RING_PAGES]; 393 394 /** The interrupt driven even channel used to signal ring events. */ 395 evtchn_port_t evtchn; 396 }; 397 398 /** 399 * Per-instance connection state flags. 400 */ 401 typedef enum 402 { 403 /** 404 * The front-end requested a read-only mount of the 405 * back-end device/file. 406 */ 407 XBBF_READ_ONLY = 0x01, 408 409 /** Communication with the front-end has been established. */ 410 XBBF_RING_CONNECTED = 0x02, 411 412 /** 413 * Front-end requests exist in the ring and are waiting for 414 * xbb_xen_req objects to free up. 415 */ 416 XBBF_RESOURCE_SHORTAGE = 0x04, 417 418 /** Connection teardown in progress. */ 419 XBBF_SHUTDOWN = 0x08, 420 421 /** A thread is already performing shutdown processing. */ 422 XBBF_IN_SHUTDOWN = 0x10 423 } xbb_flag_t; 424 425 /** Backend device type. */ 426 typedef enum { 427 /** Backend type unknown. */ 428 XBB_TYPE_NONE = 0x00, 429 430 /** 431 * Backend type disk (access via cdev switch 432 * strategy routine). 433 */ 434 XBB_TYPE_DISK = 0x01, 435 436 /** Backend type file (access vnode operations.). */ 437 XBB_TYPE_FILE = 0x02 438 } xbb_type; 439 440 /** 441 * \brief Structure used to memoize information about a per-request 442 * scatter-gather list. 443 * 444 * The chief benefit of using this data structure is it avoids having 445 * to reparse the possibly discontiguous S/G list in the original 446 * request. Due to the way that the mapping of the memory backing an 447 * I/O transaction is handled by Xen, a second pass is unavoidable. 448 * At least this way the second walk is a simple array traversal. 449 * 450 * \note A single Scatter/Gather element in the block interface covers 451 * at most 1 machine page. In this context a sector (blkif 452 * nomenclature, not what I'd choose) is a 512b aligned unit 453 * of mapping within the machine page referenced by an S/G 454 * element. 455 */ 456 struct xbb_sg { 457 /** The number of 512b data chunks mapped in this S/G element. */ 458 int16_t nsect; 459 460 /** 461 * The index (0 based) of the first 512b data chunk mapped 462 * in this S/G element. 463 */ 464 uint8_t first_sect; 465 466 /** 467 * The index (0 based) of the last 512b data chunk mapped 468 * in this S/G element. 469 */ 470 uint8_t last_sect; 471 }; 472 473 /** 474 * Character device backend specific configuration data. 475 */ 476 struct xbb_dev_data { 477 /** Cdev used for device backend access. */ 478 struct cdev *cdev; 479 480 /** Cdev switch used for device backend access. */ 481 struct cdevsw *csw; 482 483 /** Used to hold a reference on opened cdev backend devices. */ 484 int dev_ref; 485 }; 486 487 /** 488 * File backend specific configuration data. 489 */ 490 struct xbb_file_data { 491 /** Credentials to use for vnode backed (file based) I/O. */ 492 struct ucred *cred; 493 494 /** 495 * \brief Array of io vectors used to process file based I/O. 496 * 497 * Only a single file based request is outstanding per-xbb instance, 498 * so we only need one of these. 499 */ 500 struct iovec xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; 501 #ifdef XBB_USE_BOUNCE_BUFFERS 502 503 /** 504 * \brief Array of io vectors used to handle bouncing of file reads. 505 * 506 * Vnode operations are free to modify uio data during their 507 * exectuion. In the case of a read with bounce buffering active, 508 * we need some of the data from the original uio in order to 509 * bounce-out the read data. This array serves as the temporary 510 * storage for this saved data. 511 */ 512 struct iovec saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; 513 514 /** 515 * \brief Array of memoized bounce buffer kva offsets used 516 * in the file based backend. 517 * 518 * Due to the way that the mapping of the memory backing an 519 * I/O transaction is handled by Xen, a second pass through 520 * the request sg elements is unavoidable. We memoize the computed 521 * bounce address here to reduce the cost of the second walk. 522 */ 523 void *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST]; 524 #endif /* XBB_USE_BOUNCE_BUFFERS */ 525 }; 526 527 /** 528 * Collection of backend type specific data. 529 */ 530 union xbb_backend_data { 531 struct xbb_dev_data dev; 532 struct xbb_file_data file; 533 }; 534 535 /** 536 * Function signature of backend specific I/O handlers. 537 */ 538 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb, 539 struct xbb_xen_reqlist *reqlist, int operation, 540 int flags); 541 542 /** 543 * Per-instance configuration data. 544 */ 545 struct xbb_softc { 546 547 /** 548 * Task-queue used to process I/O requests. 549 */ 550 struct taskqueue *io_taskqueue; 551 552 /** 553 * Single "run the request queue" task enqueued 554 * on io_taskqueue. 555 */ 556 struct task io_task; 557 558 /** Device type for this instance. */ 559 xbb_type device_type; 560 561 /** NewBus device corresponding to this instance. */ 562 device_t dev; 563 564 /** Backend specific dispatch routine for this instance. */ 565 xbb_dispatch_t dispatch_io; 566 567 /** The number of requests outstanding on the backend device/file. */ 568 int active_request_count; 569 570 /** Free pool of request tracking structures. */ 571 struct xbb_xen_req_list request_free_stailq; 572 573 /** Array, sized at connection time, of request tracking structures. */ 574 struct xbb_xen_req *requests; 575 576 /** Free pool of request list structures. */ 577 struct xbb_xen_reqlist_list reqlist_free_stailq; 578 579 /** List of pending request lists awaiting execution. */ 580 struct xbb_xen_reqlist_list reqlist_pending_stailq; 581 582 /** Array, sized at connection time, of request list structures. */ 583 struct xbb_xen_reqlist *request_lists; 584 585 /** 586 * Global pool of kva used for mapping remote domain ring 587 * and I/O transaction data. 588 */ 589 vm_offset_t kva; 590 591 /** Psuedo-physical address corresponding to kva. */ 592 uint64_t gnt_base_addr; 593 594 /** The size of the global kva pool. */ 595 int kva_size; 596 597 /** The size of the KVA area used for request lists. */ 598 int reqlist_kva_size; 599 600 /** The number of pages of KVA used for request lists */ 601 int reqlist_kva_pages; 602 603 /** Bitmap of free KVA pages */ 604 bitstr_t *kva_free; 605 606 /** 607 * \brief Cached value of the front-end's domain id. 608 * 609 * This value is used at once for each mapped page in 610 * a transaction. We cache it to avoid incuring the 611 * cost of an ivar access every time this is needed. 612 */ 613 domid_t otherend_id; 614 615 /** 616 * \brief The blkif protocol abi in effect. 617 * 618 * There are situations where the back and front ends can 619 * have a different, native abi (e.g. intel x86_64 and 620 * 32bit x86 domains on the same machine). The back-end 621 * always accommodates the front-end's native abi. That 622 * value is pulled from the XenStore and recorded here. 623 */ 624 int abi; 625 626 /** 627 * \brief The maximum number of requests and request lists allowed 628 * to be in flight at a time. 629 * 630 * This value is negotiated via the XenStore. 631 */ 632 u_int max_requests; 633 634 /** 635 * \brief The maximum number of segments (1 page per segment) 636 * that can be mapped by a request. 637 * 638 * This value is negotiated via the XenStore. 639 */ 640 u_int max_request_segments; 641 642 /** 643 * \brief Maximum number of segments per request list. 644 * 645 * This value is derived from and will generally be larger than 646 * max_request_segments. 647 */ 648 u_int max_reqlist_segments; 649 650 /** 651 * The maximum size of any request to this back-end 652 * device. 653 * 654 * This value is negotiated via the XenStore. 655 */ 656 u_int max_request_size; 657 658 /** 659 * The maximum size of any request list. This is derived directly 660 * from max_reqlist_segments. 661 */ 662 u_int max_reqlist_size; 663 664 /** Various configuration and state bit flags. */ 665 xbb_flag_t flags; 666 667 /** Ring mapping and interrupt configuration data. */ 668 struct xbb_ring_config ring_config; 669 670 /** Runtime, cross-abi safe, structures for ring access. */ 671 blkif_back_rings_t rings; 672 673 /** IRQ mapping for the communication ring event channel. */ 674 xen_intr_handle_t xen_intr_handle; 675 676 /** 677 * \brief Backend access mode flags (e.g. write, or read-only). 678 * 679 * This value is passed to us by the front-end via the XenStore. 680 */ 681 char *dev_mode; 682 683 /** 684 * \brief Backend device type (e.g. "disk", "cdrom", "floppy"). 685 * 686 * This value is passed to us by the front-end via the XenStore. 687 * Currently unused. 688 */ 689 char *dev_type; 690 691 /** 692 * \brief Backend device/file identifier. 693 * 694 * This value is passed to us by the front-end via the XenStore. 695 * We expect this to be a POSIX path indicating the file or 696 * device to open. 697 */ 698 char *dev_name; 699 700 /** 701 * Vnode corresponding to the backend device node or file 702 * we are acessing. 703 */ 704 struct vnode *vn; 705 706 union xbb_backend_data backend; 707 708 /** The native sector size of the backend. */ 709 u_int sector_size; 710 711 /** log2 of sector_size. */ 712 u_int sector_size_shift; 713 714 /** Size in bytes of the backend device or file. */ 715 off_t media_size; 716 717 /** 718 * \brief media_size expressed in terms of the backend native 719 * sector size. 720 * 721 * (e.g. xbb->media_size >> xbb->sector_size_shift). 722 */ 723 uint64_t media_num_sectors; 724 725 /** 726 * \brief Array of memoized scatter gather data computed during the 727 * conversion of blkif ring requests to internal xbb_xen_req 728 * structures. 729 * 730 * Ring processing is serialized so we only need one of these. 731 */ 732 struct xbb_sg xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST]; 733 734 /** 735 * Temporary grant table map used in xbb_dispatch_io(). When 736 * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the 737 * stack could cause a stack overflow. 738 */ 739 struct gnttab_map_grant_ref maps[XBB_MAX_SEGMENTS_PER_REQLIST]; 740 741 /** Mutex protecting per-instance data. */ 742 struct mtx lock; 743 744 /** 745 * Resource representing allocated physical address space 746 * associated with our per-instance kva region. 747 */ 748 struct resource *pseudo_phys_res; 749 750 /** Resource id for allocated physical address space. */ 751 int pseudo_phys_res_id; 752 753 /** 754 * I/O statistics from BlockBack dispatch down. These are 755 * coalesced requests, and we start them right before execution. 756 */ 757 struct devstat *xbb_stats; 758 759 /** 760 * I/O statistics coming into BlockBack. These are the requests as 761 * we get them from BlockFront. They are started as soon as we 762 * receive a request, and completed when the I/O is complete. 763 */ 764 struct devstat *xbb_stats_in; 765 766 /** Disable sending flush to the backend */ 767 int disable_flush; 768 769 /** Send a real flush for every N flush requests */ 770 int flush_interval; 771 772 /** Count of flush requests in the interval */ 773 int flush_count; 774 775 /** Don't coalesce requests if this is set */ 776 int no_coalesce_reqs; 777 778 /** Number of requests we have received */ 779 uint64_t reqs_received; 780 781 /** Number of requests we have completed*/ 782 uint64_t reqs_completed; 783 784 /** Number of requests we queued but not pushed*/ 785 uint64_t reqs_queued_for_completion; 786 787 /** Number of requests we completed with an error status*/ 788 uint64_t reqs_completed_with_error; 789 790 /** How many forced dispatches (i.e. without coalescing) have happened */ 791 uint64_t forced_dispatch; 792 793 /** How many normal dispatches have happened */ 794 uint64_t normal_dispatch; 795 796 /** How many total dispatches have happened */ 797 uint64_t total_dispatch; 798 799 /** How many times we have run out of KVA */ 800 uint64_t kva_shortages; 801 802 /** How many times we have run out of request structures */ 803 uint64_t request_shortages; 804 805 /** Watch to wait for hotplug script execution */ 806 struct xs_watch hotplug_watch; 807 }; 808 809 /*---------------------------- Request Processing ----------------------------*/ 810 /** 811 * Allocate an internal transaction tracking structure from the free pool. 812 * 813 * \param xbb Per-instance xbb configuration structure. 814 * 815 * \return On success, a pointer to the allocated xbb_xen_req structure. 816 * Otherwise NULL. 817 */ 818 static inline struct xbb_xen_req * 819 xbb_get_req(struct xbb_softc *xbb) 820 { 821 struct xbb_xen_req *req; 822 823 req = NULL; 824 825 mtx_assert(&xbb->lock, MA_OWNED); 826 827 if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) { 828 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links); 829 xbb->active_request_count++; 830 } 831 832 return (req); 833 } 834 835 /** 836 * Return an allocated transaction tracking structure to the free pool. 837 * 838 * \param xbb Per-instance xbb configuration structure. 839 * \param req The request structure to free. 840 */ 841 static inline void 842 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req) 843 { 844 mtx_assert(&xbb->lock, MA_OWNED); 845 846 STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links); 847 xbb->active_request_count--; 848 849 KASSERT(xbb->active_request_count >= 0, 850 ("xbb_release_req: negative active count")); 851 } 852 853 /** 854 * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool. 855 * 856 * \param xbb Per-instance xbb configuration structure. 857 * \param req_list The list of requests to free. 858 * \param nreqs The number of items in the list. 859 */ 860 static inline void 861 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list, 862 int nreqs) 863 { 864 mtx_assert(&xbb->lock, MA_OWNED); 865 866 STAILQ_CONCAT(&xbb->request_free_stailq, req_list); 867 xbb->active_request_count -= nreqs; 868 869 KASSERT(xbb->active_request_count >= 0, 870 ("xbb_release_reqs: negative active count")); 871 } 872 873 /** 874 * Given a page index and 512b sector offset within that page, 875 * calculate an offset into a request's kva region. 876 * 877 * \param reqlist The request structure whose kva region will be accessed. 878 * \param pagenr The page index used to compute the kva offset. 879 * \param sector The 512b sector index used to compute the page relative 880 * kva offset. 881 * 882 * \return The computed global KVA offset. 883 */ 884 static inline uint8_t * 885 xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 886 { 887 return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9)); 888 } 889 890 #ifdef XBB_USE_BOUNCE_BUFFERS 891 /** 892 * Given a page index and 512b sector offset within that page, 893 * calculate an offset into a request's local bounce memory region. 894 * 895 * \param reqlist The request structure whose bounce region will be accessed. 896 * \param pagenr The page index used to compute the bounce offset. 897 * \param sector The 512b sector index used to compute the page relative 898 * bounce offset. 899 * 900 * \return The computed global bounce buffer address. 901 */ 902 static inline uint8_t * 903 xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 904 { 905 return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9)); 906 } 907 #endif 908 909 /** 910 * Given a page number and 512b sector offset within that page, 911 * calculate an offset into the request's memory region that the 912 * underlying backend device/file should use for I/O. 913 * 914 * \param reqlist The request structure whose I/O region will be accessed. 915 * \param pagenr The page index used to compute the I/O offset. 916 * \param sector The 512b sector index used to compute the page relative 917 * I/O offset. 918 * 919 * \return The computed global I/O address. 920 * 921 * Depending on configuration, this will either be a local bounce buffer 922 * or a pointer to the memory mapped in from the front-end domain for 923 * this request. 924 */ 925 static inline uint8_t * 926 xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 927 { 928 #ifdef XBB_USE_BOUNCE_BUFFERS 929 return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector)); 930 #else 931 return (xbb_reqlist_vaddr(reqlist, pagenr, sector)); 932 #endif 933 } 934 935 /** 936 * Given a page index and 512b sector offset within that page, calculate 937 * an offset into the local psuedo-physical address space used to map a 938 * front-end's request data into a request. 939 * 940 * \param reqlist The request list structure whose pseudo-physical region 941 * will be accessed. 942 * \param pagenr The page index used to compute the pseudo-physical offset. 943 * \param sector The 512b sector index used to compute the page relative 944 * pseudo-physical offset. 945 * 946 * \return The computed global pseudo-phsyical address. 947 * 948 * Depending on configuration, this will either be a local bounce buffer 949 * or a pointer to the memory mapped in from the front-end domain for 950 * this request. 951 */ 952 static inline uintptr_t 953 xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) 954 { 955 struct xbb_softc *xbb; 956 957 xbb = reqlist->xbb; 958 959 return ((uintptr_t)(xbb->gnt_base_addr + 960 (uintptr_t)(reqlist->kva - xbb->kva) + 961 (PAGE_SIZE * pagenr) + (sector << 9))); 962 } 963 964 /** 965 * Get Kernel Virtual Address space for mapping requests. 966 * 967 * \param xbb Per-instance xbb configuration structure. 968 * \param nr_pages Number of pages needed. 969 * \param check_only If set, check for free KVA but don't allocate it. 970 * \param have_lock If set, xbb lock is already held. 971 * 972 * \return On success, a pointer to the allocated KVA region. Otherwise NULL. 973 * 974 * Note: This should be unnecessary once we have either chaining or 975 * scatter/gather support for struct bio. At that point we'll be able to 976 * put multiple addresses and lengths in one bio/bio chain and won't need 977 * to map everything into one virtual segment. 978 */ 979 static uint8_t * 980 xbb_get_kva(struct xbb_softc *xbb, int nr_pages) 981 { 982 int first_clear; 983 int num_clear; 984 uint8_t *free_kva; 985 int i; 986 987 KASSERT(nr_pages != 0, ("xbb_get_kva of zero length")); 988 989 first_clear = 0; 990 free_kva = NULL; 991 992 mtx_lock(&xbb->lock); 993 994 /* 995 * Look for the first available page. If there are none, we're done. 996 */ 997 bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear); 998 999 if (first_clear == -1) 1000 goto bailout; 1001 1002 /* 1003 * Starting at the first available page, look for consecutive free 1004 * pages that will satisfy the user's request. 1005 */ 1006 for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) { 1007 /* 1008 * If this is true, the page is used, so we have to reset 1009 * the number of clear pages and the first clear page 1010 * (since it pointed to a region with an insufficient number 1011 * of clear pages). 1012 */ 1013 if (bit_test(xbb->kva_free, i)) { 1014 num_clear = 0; 1015 first_clear = -1; 1016 continue; 1017 } 1018 1019 if (first_clear == -1) 1020 first_clear = i; 1021 1022 /* 1023 * If this is true, we've found a large enough free region 1024 * to satisfy the request. 1025 */ 1026 if (++num_clear == nr_pages) { 1027 1028 bit_nset(xbb->kva_free, first_clear, 1029 first_clear + nr_pages - 1); 1030 1031 free_kva = xbb->kva + 1032 (uint8_t *)((intptr_t)first_clear * PAGE_SIZE); 1033 1034 KASSERT(free_kva >= (uint8_t *)xbb->kva && 1035 free_kva + (nr_pages * PAGE_SIZE) <= 1036 (uint8_t *)xbb->ring_config.va, 1037 ("Free KVA %p len %d out of range, " 1038 "kva = %#jx, ring VA = %#jx\n", free_kva, 1039 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva, 1040 (uintmax_t)xbb->ring_config.va)); 1041 break; 1042 } 1043 } 1044 1045 bailout: 1046 1047 if (free_kva == NULL) { 1048 xbb->flags |= XBBF_RESOURCE_SHORTAGE; 1049 xbb->kva_shortages++; 1050 } 1051 1052 mtx_unlock(&xbb->lock); 1053 1054 return (free_kva); 1055 } 1056 1057 /** 1058 * Free allocated KVA. 1059 * 1060 * \param xbb Per-instance xbb configuration structure. 1061 * \param kva_ptr Pointer to allocated KVA region. 1062 * \param nr_pages Number of pages in the KVA region. 1063 */ 1064 static void 1065 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages) 1066 { 1067 intptr_t start_page; 1068 1069 mtx_assert(&xbb->lock, MA_OWNED); 1070 1071 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT; 1072 bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1); 1073 1074 } 1075 1076 /** 1077 * Unmap the front-end pages associated with this I/O request. 1078 * 1079 * \param req The request structure to unmap. 1080 */ 1081 static void 1082 xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist) 1083 { 1084 struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST]; 1085 u_int i; 1086 u_int invcount; 1087 int error; 1088 1089 invcount = 0; 1090 for (i = 0; i < reqlist->nr_segments; i++) { 1091 1092 if (reqlist->gnt_handles[i] == GRANT_REF_INVALID) 1093 continue; 1094 1095 unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0); 1096 unmap[invcount].dev_bus_addr = 0; 1097 unmap[invcount].handle = reqlist->gnt_handles[i]; 1098 reqlist->gnt_handles[i] = GRANT_REF_INVALID; 1099 invcount++; 1100 } 1101 1102 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, 1103 unmap, invcount); 1104 KASSERT(error == 0, ("Grant table operation failed")); 1105 } 1106 1107 /** 1108 * Allocate an internal transaction tracking structure from the free pool. 1109 * 1110 * \param xbb Per-instance xbb configuration structure. 1111 * 1112 * \return On success, a pointer to the allocated xbb_xen_reqlist structure. 1113 * Otherwise NULL. 1114 */ 1115 static inline struct xbb_xen_reqlist * 1116 xbb_get_reqlist(struct xbb_softc *xbb) 1117 { 1118 struct xbb_xen_reqlist *reqlist; 1119 1120 reqlist = NULL; 1121 1122 mtx_assert(&xbb->lock, MA_OWNED); 1123 1124 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) { 1125 1126 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links); 1127 reqlist->flags = XBB_REQLIST_NONE; 1128 reqlist->kva = NULL; 1129 reqlist->status = BLKIF_RSP_OKAY; 1130 reqlist->residual_512b_sectors = 0; 1131 reqlist->num_children = 0; 1132 reqlist->nr_segments = 0; 1133 STAILQ_INIT(&reqlist->contig_req_list); 1134 } 1135 1136 return (reqlist); 1137 } 1138 1139 /** 1140 * Return an allocated transaction tracking structure to the free pool. 1141 * 1142 * \param xbb Per-instance xbb configuration structure. 1143 * \param req The request list structure to free. 1144 * \param wakeup If set, wakeup the work thread if freeing this reqlist 1145 * during a resource shortage condition. 1146 */ 1147 static inline void 1148 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, 1149 int wakeup) 1150 { 1151 1152 mtx_assert(&xbb->lock, MA_OWNED); 1153 1154 if (wakeup) { 1155 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE; 1156 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE; 1157 } 1158 1159 if (reqlist->kva != NULL) 1160 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments); 1161 1162 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children); 1163 1164 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); 1165 1166 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { 1167 /* 1168 * Shutdown is in progress. See if we can 1169 * progress further now that one more request 1170 * has completed and been returned to the 1171 * free pool. 1172 */ 1173 xbb_shutdown(xbb); 1174 } 1175 1176 if (wakeup != 0) 1177 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 1178 } 1179 1180 /** 1181 * Request resources and do basic request setup. 1182 * 1183 * \param xbb Per-instance xbb configuration structure. 1184 * \param reqlist Pointer to reqlist pointer. 1185 * \param ring_req Pointer to a block ring request. 1186 * \param ring_index The ring index of this request. 1187 * 1188 * \return 0 for success, non-zero for failure. 1189 */ 1190 static int 1191 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist, 1192 blkif_request_t *ring_req, RING_IDX ring_idx) 1193 { 1194 struct xbb_xen_reqlist *nreqlist; 1195 struct xbb_xen_req *nreq; 1196 1197 nreqlist = NULL; 1198 nreq = NULL; 1199 1200 mtx_lock(&xbb->lock); 1201 1202 /* 1203 * We don't allow new resources to be allocated if we're in the 1204 * process of shutting down. 1205 */ 1206 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { 1207 mtx_unlock(&xbb->lock); 1208 return (1); 1209 } 1210 1211 /* 1212 * Allocate a reqlist if the caller doesn't have one already. 1213 */ 1214 if (*reqlist == NULL) { 1215 nreqlist = xbb_get_reqlist(xbb); 1216 if (nreqlist == NULL) 1217 goto bailout_error; 1218 } 1219 1220 /* We always allocate a request. */ 1221 nreq = xbb_get_req(xbb); 1222 if (nreq == NULL) 1223 goto bailout_error; 1224 1225 mtx_unlock(&xbb->lock); 1226 1227 if (*reqlist == NULL) { 1228 *reqlist = nreqlist; 1229 nreqlist->operation = ring_req->operation; 1230 nreqlist->starting_sector_number = ring_req->sector_number; 1231 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist, 1232 links); 1233 } 1234 1235 nreq->reqlist = *reqlist; 1236 nreq->req_ring_idx = ring_idx; 1237 nreq->id = ring_req->id; 1238 nreq->operation = ring_req->operation; 1239 1240 if (xbb->abi != BLKIF_PROTOCOL_NATIVE) { 1241 bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req)); 1242 nreq->ring_req = &nreq->ring_req_storage; 1243 } else { 1244 nreq->ring_req = ring_req; 1245 } 1246 1247 binuptime(&nreq->ds_t0); 1248 devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0); 1249 STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links); 1250 (*reqlist)->num_children++; 1251 (*reqlist)->nr_segments += ring_req->nr_segments; 1252 1253 return (0); 1254 1255 bailout_error: 1256 1257 /* 1258 * We're out of resources, so set the shortage flag. The next time 1259 * a request is released, we'll try waking up the work thread to 1260 * see if we can allocate more resources. 1261 */ 1262 xbb->flags |= XBBF_RESOURCE_SHORTAGE; 1263 xbb->request_shortages++; 1264 1265 if (nreq != NULL) 1266 xbb_release_req(xbb, nreq); 1267 1268 if (nreqlist != NULL) 1269 xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0); 1270 1271 mtx_unlock(&xbb->lock); 1272 1273 return (1); 1274 } 1275 1276 /** 1277 * Create and queue a response to a blkif request. 1278 * 1279 * \param xbb Per-instance xbb configuration structure. 1280 * \param req The request structure to which to respond. 1281 * \param status The status code to report. See BLKIF_RSP_* 1282 * in sys/xen/interface/io/blkif.h. 1283 */ 1284 static void 1285 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status) 1286 { 1287 blkif_response_t *resp; 1288 1289 /* 1290 * The mutex is required here, and should be held across this call 1291 * until after the subsequent call to xbb_push_responses(). This 1292 * is to guarantee that another context won't queue responses and 1293 * push them while we're active. 1294 * 1295 * That could lead to the other end being notified of responses 1296 * before the resources have been freed on this end. The other end 1297 * would then be able to queue additional I/O, and we may run out 1298 * of resources because we haven't freed them all yet. 1299 */ 1300 mtx_assert(&xbb->lock, MA_OWNED); 1301 1302 /* 1303 * Place on the response ring for the relevant domain. 1304 * For now, only the spacing between entries is different 1305 * in the different ABIs, not the response entry layout. 1306 */ 1307 switch (xbb->abi) { 1308 case BLKIF_PROTOCOL_NATIVE: 1309 resp = RING_GET_RESPONSE(&xbb->rings.native, 1310 xbb->rings.native.rsp_prod_pvt); 1311 break; 1312 case BLKIF_PROTOCOL_X86_32: 1313 resp = (blkif_response_t *) 1314 RING_GET_RESPONSE(&xbb->rings.x86_32, 1315 xbb->rings.x86_32.rsp_prod_pvt); 1316 break; 1317 case BLKIF_PROTOCOL_X86_64: 1318 resp = (blkif_response_t *) 1319 RING_GET_RESPONSE(&xbb->rings.x86_64, 1320 xbb->rings.x86_64.rsp_prod_pvt); 1321 break; 1322 default: 1323 panic("Unexpected blkif protocol ABI."); 1324 } 1325 1326 resp->id = req->id; 1327 resp->operation = req->operation; 1328 resp->status = status; 1329 1330 if (status != BLKIF_RSP_OKAY) 1331 xbb->reqs_completed_with_error++; 1332 1333 xbb->rings.common.rsp_prod_pvt++; 1334 1335 xbb->reqs_queued_for_completion++; 1336 1337 } 1338 1339 /** 1340 * Send queued responses to blkif requests. 1341 * 1342 * \param xbb Per-instance xbb configuration structure. 1343 * \param run_taskqueue Flag that is set to 1 if the taskqueue 1344 * should be run, 0 if it does not need to be run. 1345 * \param notify Flag that is set to 1 if the other end should be 1346 * notified via irq, 0 if the other end should not be 1347 * notified. 1348 */ 1349 static void 1350 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify) 1351 { 1352 int more_to_do; 1353 1354 /* 1355 * The mutex is required here. 1356 */ 1357 mtx_assert(&xbb->lock, MA_OWNED); 1358 1359 more_to_do = 0; 1360 1361 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify); 1362 1363 if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) { 1364 1365 /* 1366 * Tail check for pending requests. Allows frontend to avoid 1367 * notifications if requests are already in flight (lower 1368 * overheads and promotes batching). 1369 */ 1370 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do); 1371 } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) { 1372 1373 more_to_do = 1; 1374 } 1375 1376 xbb->reqs_completed += xbb->reqs_queued_for_completion; 1377 xbb->reqs_queued_for_completion = 0; 1378 1379 *run_taskqueue = more_to_do; 1380 } 1381 1382 /** 1383 * Complete a request list. 1384 * 1385 * \param xbb Per-instance xbb configuration structure. 1386 * \param reqlist Allocated internal request list structure. 1387 */ 1388 static void 1389 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) 1390 { 1391 struct xbb_xen_req *nreq; 1392 off_t sectors_sent; 1393 int notify, run_taskqueue; 1394 1395 sectors_sent = 0; 1396 1397 if (reqlist->flags & XBB_REQLIST_MAPPED) 1398 xbb_unmap_reqlist(reqlist); 1399 1400 mtx_lock(&xbb->lock); 1401 1402 /* 1403 * All I/O is done, send the response. A lock is not necessary 1404 * to protect the request list, because all requests have 1405 * completed. Therefore this is the only context accessing this 1406 * reqlist right now. However, in order to make sure that no one 1407 * else queues responses onto the queue or pushes them to the other 1408 * side while we're active, we need to hold the lock across the 1409 * calls to xbb_queue_response() and xbb_push_responses(). 1410 */ 1411 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { 1412 off_t cur_sectors_sent; 1413 1414 /* Put this response on the ring, but don't push yet */ 1415 xbb_queue_response(xbb, nreq, reqlist->status); 1416 1417 /* We don't report bytes sent if there is an error. */ 1418 if (reqlist->status == BLKIF_RSP_OKAY) 1419 cur_sectors_sent = nreq->nr_512b_sectors; 1420 else 1421 cur_sectors_sent = 0; 1422 1423 sectors_sent += cur_sectors_sent; 1424 1425 devstat_end_transaction(xbb->xbb_stats_in, 1426 /*bytes*/cur_sectors_sent << 9, 1427 reqlist->ds_tag_type, 1428 reqlist->ds_trans_type, 1429 /*now*/NULL, 1430 /*then*/&nreq->ds_t0); 1431 } 1432 1433 /* 1434 * Take out any sectors not sent. If we wind up negative (which 1435 * might happen if an error is reported as well as a residual), just 1436 * report 0 sectors sent. 1437 */ 1438 sectors_sent -= reqlist->residual_512b_sectors; 1439 if (sectors_sent < 0) 1440 sectors_sent = 0; 1441 1442 devstat_end_transaction(xbb->xbb_stats, 1443 /*bytes*/ sectors_sent << 9, 1444 reqlist->ds_tag_type, 1445 reqlist->ds_trans_type, 1446 /*now*/NULL, 1447 /*then*/&reqlist->ds_t0); 1448 1449 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1); 1450 1451 xbb_push_responses(xbb, &run_taskqueue, ¬ify); 1452 1453 mtx_unlock(&xbb->lock); 1454 1455 if (run_taskqueue) 1456 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 1457 1458 if (notify) 1459 xen_intr_signal(xbb->xen_intr_handle); 1460 } 1461 1462 /** 1463 * Completion handler for buffer I/O requests issued by the device 1464 * backend driver. 1465 * 1466 * \param bio The buffer I/O request on which to perform completion 1467 * processing. 1468 */ 1469 static void 1470 xbb_bio_done(struct bio *bio) 1471 { 1472 struct xbb_softc *xbb; 1473 struct xbb_xen_reqlist *reqlist; 1474 1475 reqlist = bio->bio_caller1; 1476 xbb = reqlist->xbb; 1477 1478 reqlist->residual_512b_sectors += bio->bio_resid >> 9; 1479 1480 /* 1481 * This is a bit imprecise. With aggregated I/O a single 1482 * request list can contain multiple front-end requests and 1483 * a multiple bios may point to a single request. By carefully 1484 * walking the request list, we could map residuals and errors 1485 * back to the original front-end request, but the interface 1486 * isn't sufficiently rich for us to properly report the error. 1487 * So, we just treat the entire request list as having failed if an 1488 * error occurs on any part. And, if an error occurs, we treat 1489 * the amount of data transferred as 0. 1490 * 1491 * For residuals, we report it on the overall aggregated device, 1492 * but not on the individual requests, since we don't currently 1493 * do the work to determine which front-end request to which the 1494 * residual applies. 1495 */ 1496 if (bio->bio_error) { 1497 DPRINTF("BIO returned error %d for operation on device %s\n", 1498 bio->bio_error, xbb->dev_name); 1499 reqlist->status = BLKIF_RSP_ERROR; 1500 1501 if (bio->bio_error == ENXIO 1502 && xenbus_get_state(xbb->dev) == XenbusStateConnected) { 1503 1504 /* 1505 * Backend device has disappeared. Signal the 1506 * front-end that we (the device proxy) want to 1507 * go away. 1508 */ 1509 xenbus_set_state(xbb->dev, XenbusStateClosing); 1510 } 1511 } 1512 1513 #ifdef XBB_USE_BOUNCE_BUFFERS 1514 if (bio->bio_cmd == BIO_READ) { 1515 vm_offset_t kva_offset; 1516 1517 kva_offset = (vm_offset_t)bio->bio_data 1518 - (vm_offset_t)reqlist->bounce; 1519 memcpy((uint8_t *)reqlist->kva + kva_offset, 1520 bio->bio_data, bio->bio_bcount); 1521 } 1522 #endif /* XBB_USE_BOUNCE_BUFFERS */ 1523 1524 /* 1525 * Decrement the pending count for the request list. When we're 1526 * done with the requests, send status back for all of them. 1527 */ 1528 if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1) 1529 xbb_complete_reqlist(xbb, reqlist); 1530 1531 g_destroy_bio(bio); 1532 } 1533 1534 /** 1535 * Parse a blkif request into an internal request structure and send 1536 * it to the backend for processing. 1537 * 1538 * \param xbb Per-instance xbb configuration structure. 1539 * \param reqlist Allocated internal request list structure. 1540 * 1541 * \return On success, 0. For resource shortages, non-zero. 1542 * 1543 * This routine performs the backend common aspects of request parsing 1544 * including compiling an internal request structure, parsing the S/G 1545 * list and any secondary ring requests in which they may reside, and 1546 * the mapping of front-end I/O pages into our domain. 1547 */ 1548 static int 1549 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) 1550 { 1551 struct xbb_sg *xbb_sg; 1552 struct gnttab_map_grant_ref *map; 1553 struct blkif_request_segment *sg; 1554 struct blkif_request_segment *last_block_sg; 1555 struct xbb_xen_req *nreq; 1556 u_int nseg; 1557 u_int seg_idx; 1558 u_int block_segs; 1559 int nr_sects; 1560 int total_sects; 1561 int operation; 1562 uint8_t bio_flags; 1563 int error; 1564 1565 reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1566 bio_flags = 0; 1567 total_sects = 0; 1568 nr_sects = 0; 1569 1570 /* 1571 * First determine whether we have enough free KVA to satisfy this 1572 * request list. If not, tell xbb_run_queue() so it can go to 1573 * sleep until we have more KVA. 1574 */ 1575 reqlist->kva = NULL; 1576 if (reqlist->nr_segments != 0) { 1577 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments); 1578 if (reqlist->kva == NULL) { 1579 /* 1580 * If we're out of KVA, return ENOMEM. 1581 */ 1582 return (ENOMEM); 1583 } 1584 } 1585 1586 binuptime(&reqlist->ds_t0); 1587 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0); 1588 1589 switch (reqlist->operation) { 1590 case BLKIF_OP_WRITE_BARRIER: 1591 bio_flags |= BIO_ORDERED; 1592 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; 1593 /* FALLTHROUGH */ 1594 case BLKIF_OP_WRITE: 1595 operation = BIO_WRITE; 1596 reqlist->ds_trans_type = DEVSTAT_WRITE; 1597 if ((xbb->flags & XBBF_READ_ONLY) != 0) { 1598 DPRINTF("Attempt to write to read only device %s\n", 1599 xbb->dev_name); 1600 reqlist->status = BLKIF_RSP_ERROR; 1601 goto send_response; 1602 } 1603 break; 1604 case BLKIF_OP_READ: 1605 operation = BIO_READ; 1606 reqlist->ds_trans_type = DEVSTAT_READ; 1607 break; 1608 case BLKIF_OP_FLUSH_DISKCACHE: 1609 /* 1610 * If this is true, the user has requested that we disable 1611 * flush support. So we just complete the requests 1612 * successfully. 1613 */ 1614 if (xbb->disable_flush != 0) { 1615 goto send_response; 1616 } 1617 1618 /* 1619 * The user has requested that we only send a real flush 1620 * for every N flush requests. So keep count, and either 1621 * complete the request immediately or queue it for the 1622 * backend. 1623 */ 1624 if (xbb->flush_interval != 0) { 1625 if (++(xbb->flush_count) < xbb->flush_interval) { 1626 goto send_response; 1627 } else 1628 xbb->flush_count = 0; 1629 } 1630 1631 operation = BIO_FLUSH; 1632 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; 1633 reqlist->ds_trans_type = DEVSTAT_NO_DATA; 1634 goto do_dispatch; 1635 /*NOTREACHED*/ 1636 default: 1637 DPRINTF("error: unknown block io operation [%d]\n", 1638 reqlist->operation); 1639 reqlist->status = BLKIF_RSP_ERROR; 1640 goto send_response; 1641 } 1642 1643 reqlist->xbb = xbb; 1644 xbb_sg = xbb->xbb_sgs; 1645 map = xbb->maps; 1646 seg_idx = 0; 1647 1648 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { 1649 blkif_request_t *ring_req; 1650 RING_IDX req_ring_idx; 1651 u_int req_seg_idx; 1652 1653 ring_req = nreq->ring_req; 1654 req_ring_idx = nreq->req_ring_idx; 1655 nr_sects = 0; 1656 nseg = ring_req->nr_segments; 1657 nreq->nr_pages = nseg; 1658 nreq->nr_512b_sectors = 0; 1659 req_seg_idx = 0; 1660 sg = NULL; 1661 1662 /* Check that number of segments is sane. */ 1663 if (__predict_false(nseg == 0) 1664 || __predict_false(nseg > xbb->max_request_segments)) { 1665 DPRINTF("Bad number of segments in request (%d)\n", 1666 nseg); 1667 reqlist->status = BLKIF_RSP_ERROR; 1668 goto send_response; 1669 } 1670 1671 block_segs = nseg; 1672 sg = ring_req->seg; 1673 last_block_sg = sg + block_segs; 1674 1675 while (sg < last_block_sg) { 1676 KASSERT(seg_idx < 1677 XBB_MAX_SEGMENTS_PER_REQLIST, 1678 ("seg_idx %d is too large, max " 1679 "segs %d\n", seg_idx, 1680 XBB_MAX_SEGMENTS_PER_REQLIST)); 1681 1682 xbb_sg->first_sect = sg->first_sect; 1683 xbb_sg->last_sect = sg->last_sect; 1684 xbb_sg->nsect = 1685 (int8_t)(sg->last_sect - 1686 sg->first_sect + 1); 1687 1688 if ((sg->last_sect >= (PAGE_SIZE >> 9)) 1689 || (xbb_sg->nsect <= 0)) { 1690 reqlist->status = BLKIF_RSP_ERROR; 1691 goto send_response; 1692 } 1693 1694 nr_sects += xbb_sg->nsect; 1695 map->host_addr = xbb_get_gntaddr(reqlist, 1696 seg_idx, /*sector*/0); 1697 KASSERT(map->host_addr + PAGE_SIZE <= 1698 xbb->ring_config.gnt_addr, 1699 ("Host address %#jx len %d overlaps " 1700 "ring address %#jx\n", 1701 (uintmax_t)map->host_addr, PAGE_SIZE, 1702 (uintmax_t)xbb->ring_config.gnt_addr)); 1703 1704 map->flags = GNTMAP_host_map; 1705 map->ref = sg->gref; 1706 map->dom = xbb->otherend_id; 1707 if (operation == BIO_WRITE) 1708 map->flags |= GNTMAP_readonly; 1709 sg++; 1710 map++; 1711 xbb_sg++; 1712 seg_idx++; 1713 req_seg_idx++; 1714 } 1715 1716 /* Convert to the disk's sector size */ 1717 nreq->nr_512b_sectors = nr_sects; 1718 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift; 1719 total_sects += nr_sects; 1720 1721 if ((nreq->nr_512b_sectors & 1722 ((xbb->sector_size >> 9) - 1)) != 0) { 1723 device_printf(xbb->dev, "%s: I/O size (%d) is not " 1724 "a multiple of the backing store sector " 1725 "size (%d)\n", __func__, 1726 nreq->nr_512b_sectors << 9, 1727 xbb->sector_size); 1728 reqlist->status = BLKIF_RSP_ERROR; 1729 goto send_response; 1730 } 1731 } 1732 1733 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 1734 xbb->maps, reqlist->nr_segments); 1735 if (error != 0) 1736 panic("Grant table operation failed (%d)", error); 1737 1738 reqlist->flags |= XBB_REQLIST_MAPPED; 1739 1740 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments; 1741 seg_idx++, map++){ 1742 1743 if (__predict_false(map->status != 0)) { 1744 DPRINTF("invalid buffer -- could not remap " 1745 "it (%d)\n", map->status); 1746 DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags " 1747 "0x%x ref 0x%x, dom %d\n", seg_idx, 1748 map->host_addr, map->flags, map->ref, 1749 map->dom); 1750 reqlist->status = BLKIF_RSP_ERROR; 1751 goto send_response; 1752 } 1753 1754 reqlist->gnt_handles[seg_idx] = map->handle; 1755 } 1756 if (reqlist->starting_sector_number + total_sects > 1757 xbb->media_num_sectors) { 1758 1759 DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] " 1760 "extends past end of device %s\n", 1761 operation == BIO_READ ? "read" : "write", 1762 reqlist->starting_sector_number, 1763 reqlist->starting_sector_number + total_sects, 1764 xbb->dev_name); 1765 reqlist->status = BLKIF_RSP_ERROR; 1766 goto send_response; 1767 } 1768 1769 do_dispatch: 1770 1771 error = xbb->dispatch_io(xbb, 1772 reqlist, 1773 operation, 1774 bio_flags); 1775 1776 if (error != 0) { 1777 reqlist->status = BLKIF_RSP_ERROR; 1778 goto send_response; 1779 } 1780 1781 return (0); 1782 1783 send_response: 1784 1785 xbb_complete_reqlist(xbb, reqlist); 1786 1787 return (0); 1788 } 1789 1790 static __inline int 1791 xbb_count_sects(blkif_request_t *ring_req) 1792 { 1793 int i; 1794 int cur_size = 0; 1795 1796 for (i = 0; i < ring_req->nr_segments; i++) { 1797 int nsect; 1798 1799 nsect = (int8_t)(ring_req->seg[i].last_sect - 1800 ring_req->seg[i].first_sect + 1); 1801 if (nsect <= 0) 1802 break; 1803 1804 cur_size += nsect; 1805 } 1806 1807 return (cur_size); 1808 } 1809 1810 /** 1811 * Process incoming requests from the shared communication ring in response 1812 * to a signal on the ring's event channel. 1813 * 1814 * \param context Callback argument registerd during task initialization - 1815 * the xbb_softc for this instance. 1816 * \param pending The number of taskqueue_enqueue events that have 1817 * occurred since this handler was last run. 1818 */ 1819 static void 1820 xbb_run_queue(void *context, int pending) 1821 { 1822 struct xbb_softc *xbb; 1823 blkif_back_rings_t *rings; 1824 RING_IDX rp; 1825 uint64_t cur_sector; 1826 int cur_operation; 1827 struct xbb_xen_reqlist *reqlist; 1828 1829 1830 xbb = (struct xbb_softc *)context; 1831 rings = &xbb->rings; 1832 1833 /* 1834 * Work gather and dispatch loop. Note that we have a bias here 1835 * towards gathering I/O sent by blockfront. We first gather up 1836 * everything in the ring, as long as we have resources. Then we 1837 * dispatch one request, and then attempt to gather up any 1838 * additional requests that have come in while we were dispatching 1839 * the request. 1840 * 1841 * This allows us to get a clearer picture (via devstat) of how 1842 * many requests blockfront is queueing to us at any given time. 1843 */ 1844 for (;;) { 1845 int retval; 1846 1847 /* 1848 * Initialize reqlist to the last element in the pending 1849 * queue, if there is one. This allows us to add more 1850 * requests to that request list, if we have room. 1851 */ 1852 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq, 1853 xbb_xen_reqlist, links); 1854 if (reqlist != NULL) { 1855 cur_sector = reqlist->next_contig_sector; 1856 cur_operation = reqlist->operation; 1857 } else { 1858 cur_operation = 0; 1859 cur_sector = 0; 1860 } 1861 1862 /* 1863 * Cache req_prod to avoid accessing a cache line shared 1864 * with the frontend. 1865 */ 1866 rp = rings->common.sring->req_prod; 1867 1868 /* Ensure we see queued requests up to 'rp'. */ 1869 rmb(); 1870 1871 /** 1872 * Run so long as there is work to consume and the generation 1873 * of a response will not overflow the ring. 1874 * 1875 * @note There's a 1 to 1 relationship between requests and 1876 * responses, so an overflow should never occur. This 1877 * test is to protect our domain from digesting bogus 1878 * data. Shouldn't we log this? 1879 */ 1880 while (rings->common.req_cons != rp 1881 && RING_REQUEST_CONS_OVERFLOW(&rings->common, 1882 rings->common.req_cons) == 0){ 1883 blkif_request_t ring_req_storage; 1884 blkif_request_t *ring_req; 1885 int cur_size; 1886 1887 switch (xbb->abi) { 1888 case BLKIF_PROTOCOL_NATIVE: 1889 ring_req = RING_GET_REQUEST(&xbb->rings.native, 1890 rings->common.req_cons); 1891 break; 1892 case BLKIF_PROTOCOL_X86_32: 1893 { 1894 struct blkif_x86_32_request *ring_req32; 1895 1896 ring_req32 = RING_GET_REQUEST( 1897 &xbb->rings.x86_32, rings->common.req_cons); 1898 blkif_get_x86_32_req(&ring_req_storage, 1899 ring_req32); 1900 ring_req = &ring_req_storage; 1901 break; 1902 } 1903 case BLKIF_PROTOCOL_X86_64: 1904 { 1905 struct blkif_x86_64_request *ring_req64; 1906 1907 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64, 1908 rings->common.req_cons); 1909 blkif_get_x86_64_req(&ring_req_storage, 1910 ring_req64); 1911 ring_req = &ring_req_storage; 1912 break; 1913 } 1914 default: 1915 panic("Unexpected blkif protocol ABI."); 1916 /* NOTREACHED */ 1917 } 1918 1919 /* 1920 * Check for situations that would require closing 1921 * off this I/O for further coalescing: 1922 * - Coalescing is turned off. 1923 * - Current I/O is out of sequence with the previous 1924 * I/O. 1925 * - Coalesced I/O would be too large. 1926 */ 1927 if ((reqlist != NULL) 1928 && ((xbb->no_coalesce_reqs != 0) 1929 || ((xbb->no_coalesce_reqs == 0) 1930 && ((ring_req->sector_number != cur_sector) 1931 || (ring_req->operation != cur_operation) 1932 || ((ring_req->nr_segments + reqlist->nr_segments) > 1933 xbb->max_reqlist_segments))))) { 1934 reqlist = NULL; 1935 } 1936 1937 /* 1938 * Grab and check for all resources in one shot. 1939 * If we can't get all of the resources we need, 1940 * the shortage is noted and the thread will get 1941 * woken up when more resources are available. 1942 */ 1943 retval = xbb_get_resources(xbb, &reqlist, ring_req, 1944 xbb->rings.common.req_cons); 1945 1946 if (retval != 0) { 1947 /* 1948 * Resource shortage has been recorded. 1949 * We'll be scheduled to run once a request 1950 * object frees up due to a completion. 1951 */ 1952 break; 1953 } 1954 1955 /* 1956 * Signify that we can overwrite this request with 1957 * a response by incrementing our consumer index. 1958 * The response won't be generated until after 1959 * we've already consumed all necessary data out 1960 * of the version of the request in the ring buffer 1961 * (for native mode). We must update the consumer 1962 * index before issuing back-end I/O so there is 1963 * no possibility that it will complete and a 1964 * response be generated before we make room in 1965 * the queue for that response. 1966 */ 1967 xbb->rings.common.req_cons++; 1968 xbb->reqs_received++; 1969 1970 cur_size = xbb_count_sects(ring_req); 1971 cur_sector = ring_req->sector_number + cur_size; 1972 reqlist->next_contig_sector = cur_sector; 1973 cur_operation = ring_req->operation; 1974 } 1975 1976 /* Check for I/O to dispatch */ 1977 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); 1978 if (reqlist == NULL) { 1979 /* 1980 * We're out of work to do, put the task queue to 1981 * sleep. 1982 */ 1983 break; 1984 } 1985 1986 /* 1987 * Grab the first request off the queue and attempt 1988 * to dispatch it. 1989 */ 1990 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links); 1991 1992 retval = xbb_dispatch_io(xbb, reqlist); 1993 if (retval != 0) { 1994 /* 1995 * xbb_dispatch_io() returns non-zero only when 1996 * there is a resource shortage. If that's the 1997 * case, re-queue this request on the head of the 1998 * queue, and go to sleep until we have more 1999 * resources. 2000 */ 2001 STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq, 2002 reqlist, links); 2003 break; 2004 } else { 2005 /* 2006 * If we still have anything on the queue after 2007 * removing the head entry, that is because we 2008 * met one of the criteria to create a new 2009 * request list (outlined above), and we'll call 2010 * that a forced dispatch for statistical purposes. 2011 * 2012 * Otherwise, if there is only one element on the 2013 * queue, we coalesced everything available on 2014 * the ring and we'll call that a normal dispatch. 2015 */ 2016 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); 2017 2018 if (reqlist != NULL) 2019 xbb->forced_dispatch++; 2020 else 2021 xbb->normal_dispatch++; 2022 2023 xbb->total_dispatch++; 2024 } 2025 } 2026 } 2027 2028 /** 2029 * Interrupt handler bound to the shared ring's event channel. 2030 * 2031 * \param arg Callback argument registerd during event channel 2032 * binding - the xbb_softc for this instance. 2033 */ 2034 static int 2035 xbb_filter(void *arg) 2036 { 2037 struct xbb_softc *xbb; 2038 2039 /* Defer to taskqueue thread. */ 2040 xbb = (struct xbb_softc *)arg; 2041 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); 2042 2043 return (FILTER_HANDLED); 2044 } 2045 2046 SDT_PROVIDER_DEFINE(xbb); 2047 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int"); 2048 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t", 2049 "uint64_t"); 2050 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int", 2051 "uint64_t", "uint64_t"); 2052 2053 /*----------------------------- Backend Handlers -----------------------------*/ 2054 /** 2055 * Backend handler for character device access. 2056 * 2057 * \param xbb Per-instance xbb configuration structure. 2058 * \param reqlist Allocated internal request list structure. 2059 * \param operation BIO_* I/O operation code. 2060 * \param bio_flags Additional bio_flag data to pass to any generated 2061 * bios (e.g. BIO_ORDERED).. 2062 * 2063 * \return 0 for success, errno codes for failure. 2064 */ 2065 static int 2066 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, 2067 int operation, int bio_flags) 2068 { 2069 struct xbb_dev_data *dev_data; 2070 struct bio *bios[XBB_MAX_SEGMENTS_PER_REQLIST]; 2071 off_t bio_offset; 2072 struct bio *bio; 2073 struct xbb_sg *xbb_sg; 2074 u_int nbio; 2075 u_int bio_idx; 2076 u_int nseg; 2077 u_int seg_idx; 2078 int error; 2079 2080 dev_data = &xbb->backend.dev; 2081 bio_offset = (off_t)reqlist->starting_sector_number 2082 << xbb->sector_size_shift; 2083 error = 0; 2084 nbio = 0; 2085 bio_idx = 0; 2086 2087 if (operation == BIO_FLUSH) { 2088 bio = g_new_bio(); 2089 if (__predict_false(bio == NULL)) { 2090 DPRINTF("Unable to allocate bio for BIO_FLUSH\n"); 2091 error = ENOMEM; 2092 return (error); 2093 } 2094 2095 bio->bio_cmd = BIO_FLUSH; 2096 bio->bio_flags |= BIO_ORDERED; 2097 bio->bio_dev = dev_data->cdev; 2098 bio->bio_offset = 0; 2099 bio->bio_data = 0; 2100 bio->bio_done = xbb_bio_done; 2101 bio->bio_caller1 = reqlist; 2102 bio->bio_pblkno = 0; 2103 2104 reqlist->pendcnt = 1; 2105 2106 SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush, 2107 device_get_unit(xbb->dev)); 2108 2109 (*dev_data->csw->d_strategy)(bio); 2110 2111 return (0); 2112 } 2113 2114 xbb_sg = xbb->xbb_sgs; 2115 bio = NULL; 2116 nseg = reqlist->nr_segments; 2117 2118 for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { 2119 2120 /* 2121 * KVA will not be contiguous, so any additional 2122 * I/O will need to be represented in a new bio. 2123 */ 2124 if ((bio != NULL) 2125 && (xbb_sg->first_sect != 0)) { 2126 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { 2127 printf("%s: Discontiguous I/O request " 2128 "from domain %d ends on " 2129 "non-sector boundary\n", 2130 __func__, xbb->otherend_id); 2131 error = EINVAL; 2132 goto fail_free_bios; 2133 } 2134 bio = NULL; 2135 } 2136 2137 if (bio == NULL) { 2138 /* 2139 * Make sure that the start of this bio is 2140 * aligned to a device sector. 2141 */ 2142 if ((bio_offset & (xbb->sector_size - 1)) != 0){ 2143 printf("%s: Misaligned I/O request " 2144 "from domain %d\n", __func__, 2145 xbb->otherend_id); 2146 error = EINVAL; 2147 goto fail_free_bios; 2148 } 2149 2150 bio = bios[nbio++] = g_new_bio(); 2151 if (__predict_false(bio == NULL)) { 2152 error = ENOMEM; 2153 goto fail_free_bios; 2154 } 2155 bio->bio_cmd = operation; 2156 bio->bio_flags |= bio_flags; 2157 bio->bio_dev = dev_data->cdev; 2158 bio->bio_offset = bio_offset; 2159 bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx, 2160 xbb_sg->first_sect); 2161 bio->bio_done = xbb_bio_done; 2162 bio->bio_caller1 = reqlist; 2163 bio->bio_pblkno = bio_offset >> xbb->sector_size_shift; 2164 } 2165 2166 bio->bio_length += xbb_sg->nsect << 9; 2167 bio->bio_bcount = bio->bio_length; 2168 bio_offset += xbb_sg->nsect << 9; 2169 2170 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) { 2171 2172 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { 2173 printf("%s: Discontiguous I/O request " 2174 "from domain %d ends on " 2175 "non-sector boundary\n", 2176 __func__, xbb->otherend_id); 2177 error = EINVAL; 2178 goto fail_free_bios; 2179 } 2180 /* 2181 * KVA will not be contiguous, so any additional 2182 * I/O will need to be represented in a new bio. 2183 */ 2184 bio = NULL; 2185 } 2186 } 2187 2188 reqlist->pendcnt = nbio; 2189 2190 for (bio_idx = 0; bio_idx < nbio; bio_idx++) 2191 { 2192 #ifdef XBB_USE_BOUNCE_BUFFERS 2193 vm_offset_t kva_offset; 2194 2195 kva_offset = (vm_offset_t)bios[bio_idx]->bio_data 2196 - (vm_offset_t)reqlist->bounce; 2197 if (operation == BIO_WRITE) { 2198 memcpy(bios[bio_idx]->bio_data, 2199 (uint8_t *)reqlist->kva + kva_offset, 2200 bios[bio_idx]->bio_bcount); 2201 } 2202 #endif 2203 if (operation == BIO_READ) { 2204 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read, 2205 device_get_unit(xbb->dev), 2206 bios[bio_idx]->bio_offset, 2207 bios[bio_idx]->bio_length); 2208 } else if (operation == BIO_WRITE) { 2209 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write, 2210 device_get_unit(xbb->dev), 2211 bios[bio_idx]->bio_offset, 2212 bios[bio_idx]->bio_length); 2213 } 2214 (*dev_data->csw->d_strategy)(bios[bio_idx]); 2215 } 2216 2217 return (error); 2218 2219 fail_free_bios: 2220 for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++) 2221 g_destroy_bio(bios[bio_idx]); 2222 2223 return (error); 2224 } 2225 2226 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int"); 2227 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t", 2228 "uint64_t"); 2229 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int", 2230 "uint64_t", "uint64_t"); 2231 2232 /** 2233 * Backend handler for file access. 2234 * 2235 * \param xbb Per-instance xbb configuration structure. 2236 * \param reqlist Allocated internal request list. 2237 * \param operation BIO_* I/O operation code. 2238 * \param flags Additional bio_flag data to pass to any generated bios 2239 * (e.g. BIO_ORDERED).. 2240 * 2241 * \return 0 for success, errno codes for failure. 2242 */ 2243 static int 2244 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, 2245 int operation, int flags) 2246 { 2247 struct xbb_file_data *file_data; 2248 u_int seg_idx; 2249 u_int nseg; 2250 struct uio xuio; 2251 struct xbb_sg *xbb_sg; 2252 struct iovec *xiovec; 2253 #ifdef XBB_USE_BOUNCE_BUFFERS 2254 void **p_vaddr; 2255 int saved_uio_iovcnt; 2256 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2257 int error; 2258 2259 file_data = &xbb->backend.file; 2260 error = 0; 2261 bzero(&xuio, sizeof(xuio)); 2262 2263 switch (operation) { 2264 case BIO_READ: 2265 xuio.uio_rw = UIO_READ; 2266 break; 2267 case BIO_WRITE: 2268 xuio.uio_rw = UIO_WRITE; 2269 break; 2270 case BIO_FLUSH: { 2271 struct mount *mountpoint; 2272 2273 SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush, 2274 device_get_unit(xbb->dev)); 2275 2276 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT); 2277 2278 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); 2279 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread); 2280 VOP_UNLOCK(xbb->vn, 0); 2281 2282 vn_finished_write(mountpoint); 2283 2284 goto bailout_send_response; 2285 /* NOTREACHED */ 2286 } 2287 default: 2288 panic("invalid operation %d", operation); 2289 /* NOTREACHED */ 2290 } 2291 xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number 2292 << xbb->sector_size_shift; 2293 xuio.uio_segflg = UIO_SYSSPACE; 2294 xuio.uio_iov = file_data->xiovecs; 2295 xuio.uio_iovcnt = 0; 2296 xbb_sg = xbb->xbb_sgs; 2297 nseg = reqlist->nr_segments; 2298 2299 for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { 2300 2301 /* 2302 * If the first sector is not 0, the KVA will 2303 * not be contiguous and we'll need to go on 2304 * to another segment. 2305 */ 2306 if (xbb_sg->first_sect != 0) 2307 xiovec = NULL; 2308 2309 if (xiovec == NULL) { 2310 xiovec = &file_data->xiovecs[xuio.uio_iovcnt]; 2311 xiovec->iov_base = xbb_reqlist_ioaddr(reqlist, 2312 seg_idx, xbb_sg->first_sect); 2313 #ifdef XBB_USE_BOUNCE_BUFFERS 2314 /* 2315 * Store the address of the incoming 2316 * buffer at this particular offset 2317 * as well, so we can do the copy 2318 * later without having to do more 2319 * work to recalculate this address. 2320 */ 2321 p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt]; 2322 *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx, 2323 xbb_sg->first_sect); 2324 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2325 xiovec->iov_len = 0; 2326 xuio.uio_iovcnt++; 2327 } 2328 2329 xiovec->iov_len += xbb_sg->nsect << 9; 2330 2331 xuio.uio_resid += xbb_sg->nsect << 9; 2332 2333 /* 2334 * If the last sector is not the full page 2335 * size count, the next segment will not be 2336 * contiguous in KVA and we need a new iovec. 2337 */ 2338 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) 2339 xiovec = NULL; 2340 } 2341 2342 xuio.uio_td = curthread; 2343 2344 #ifdef XBB_USE_BOUNCE_BUFFERS 2345 saved_uio_iovcnt = xuio.uio_iovcnt; 2346 2347 if (operation == BIO_WRITE) { 2348 /* Copy the write data to the local buffer. */ 2349 for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, 2350 xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt; 2351 seg_idx++, xiovec++, p_vaddr++) { 2352 2353 memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len); 2354 } 2355 } else { 2356 /* 2357 * We only need to save off the iovecs in the case of a 2358 * read, because the copy for the read happens after the 2359 * VOP_READ(). (The uio will get modified in that call 2360 * sequence.) 2361 */ 2362 memcpy(file_data->saved_xiovecs, xuio.uio_iov, 2363 xuio.uio_iovcnt * sizeof(xuio.uio_iov[0])); 2364 } 2365 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2366 2367 switch (operation) { 2368 case BIO_READ: 2369 2370 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read, 2371 device_get_unit(xbb->dev), xuio.uio_offset, 2372 xuio.uio_resid); 2373 2374 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); 2375 2376 /* 2377 * UFS pays attention to IO_DIRECT for reads. If the 2378 * DIRECTIO option is configured into the kernel, it calls 2379 * ffs_rawread(). But that only works for single-segment 2380 * uios with user space addresses. In our case, with a 2381 * kernel uio, it still reads into the buffer cache, but it 2382 * will just try to release the buffer from the cache later 2383 * on in ffs_read(). 2384 * 2385 * ZFS does not pay attention to IO_DIRECT for reads. 2386 * 2387 * UFS does not pay attention to IO_SYNC for reads. 2388 * 2389 * ZFS pays attention to IO_SYNC (which translates into the 2390 * Solaris define FRSYNC for zfs_read()) for reads. It 2391 * attempts to sync the file before reading. 2392 * 2393 * So, to attempt to provide some barrier semantics in the 2394 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 2395 */ 2396 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ? 2397 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 2398 2399 VOP_UNLOCK(xbb->vn, 0); 2400 break; 2401 case BIO_WRITE: { 2402 struct mount *mountpoint; 2403 2404 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write, 2405 device_get_unit(xbb->dev), xuio.uio_offset, 2406 xuio.uio_resid); 2407 2408 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT); 2409 2410 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); 2411 2412 /* 2413 * UFS pays attention to IO_DIRECT for writes. The write 2414 * is done asynchronously. (Normally the write would just 2415 * get put into cache. 2416 * 2417 * UFS pays attention to IO_SYNC for writes. It will 2418 * attempt to write the buffer out synchronously if that 2419 * flag is set. 2420 * 2421 * ZFS does not pay attention to IO_DIRECT for writes. 2422 * 2423 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 2424 * for writes. It will flush the transaction from the 2425 * cache before returning. 2426 * 2427 * So if we've got the BIO_ORDERED flag set, we want 2428 * IO_SYNC in either the UFS or ZFS case. 2429 */ 2430 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ? 2431 IO_SYNC : 0, file_data->cred); 2432 VOP_UNLOCK(xbb->vn, 0); 2433 2434 vn_finished_write(mountpoint); 2435 2436 break; 2437 } 2438 default: 2439 panic("invalid operation %d", operation); 2440 /* NOTREACHED */ 2441 } 2442 2443 #ifdef XBB_USE_BOUNCE_BUFFERS 2444 /* We only need to copy here for read operations */ 2445 if (operation == BIO_READ) { 2446 2447 for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, 2448 xiovec = file_data->saved_xiovecs; 2449 seg_idx < saved_uio_iovcnt; seg_idx++, 2450 xiovec++, p_vaddr++) { 2451 2452 /* 2453 * Note that we have to use the copy of the 2454 * io vector we made above. uiomove() modifies 2455 * the uio and its referenced vector as uiomove 2456 * performs the copy, so we can't rely on any 2457 * state from the original uio. 2458 */ 2459 memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len); 2460 } 2461 } 2462 #endif /* XBB_USE_BOUNCE_BUFFERS */ 2463 2464 bailout_send_response: 2465 2466 if (error != 0) 2467 reqlist->status = BLKIF_RSP_ERROR; 2468 2469 xbb_complete_reqlist(xbb, reqlist); 2470 2471 return (0); 2472 } 2473 2474 /*--------------------------- Backend Configuration --------------------------*/ 2475 /** 2476 * Close and cleanup any backend device/file specific state for this 2477 * block back instance. 2478 * 2479 * \param xbb Per-instance xbb configuration structure. 2480 */ 2481 static void 2482 xbb_close_backend(struct xbb_softc *xbb) 2483 { 2484 DROP_GIANT(); 2485 DPRINTF("closing dev=%s\n", xbb->dev_name); 2486 if (xbb->vn) { 2487 int flags = FREAD; 2488 2489 if ((xbb->flags & XBBF_READ_ONLY) == 0) 2490 flags |= FWRITE; 2491 2492 switch (xbb->device_type) { 2493 case XBB_TYPE_DISK: 2494 if (xbb->backend.dev.csw) { 2495 dev_relthread(xbb->backend.dev.cdev, 2496 xbb->backend.dev.dev_ref); 2497 xbb->backend.dev.csw = NULL; 2498 xbb->backend.dev.cdev = NULL; 2499 } 2500 break; 2501 case XBB_TYPE_FILE: 2502 break; 2503 case XBB_TYPE_NONE: 2504 default: 2505 panic("Unexpected backend type."); 2506 break; 2507 } 2508 2509 (void)vn_close(xbb->vn, flags, NOCRED, curthread); 2510 xbb->vn = NULL; 2511 2512 switch (xbb->device_type) { 2513 case XBB_TYPE_DISK: 2514 break; 2515 case XBB_TYPE_FILE: 2516 if (xbb->backend.file.cred != NULL) { 2517 crfree(xbb->backend.file.cred); 2518 xbb->backend.file.cred = NULL; 2519 } 2520 break; 2521 case XBB_TYPE_NONE: 2522 default: 2523 panic("Unexpected backend type."); 2524 break; 2525 } 2526 } 2527 PICKUP_GIANT(); 2528 } 2529 2530 /** 2531 * Open a character device to be used for backend I/O. 2532 * 2533 * \param xbb Per-instance xbb configuration structure. 2534 * 2535 * \return 0 for success, errno codes for failure. 2536 */ 2537 static int 2538 xbb_open_dev(struct xbb_softc *xbb) 2539 { 2540 struct vattr vattr; 2541 struct cdev *dev; 2542 struct cdevsw *devsw; 2543 int error; 2544 2545 xbb->device_type = XBB_TYPE_DISK; 2546 xbb->dispatch_io = xbb_dispatch_dev; 2547 xbb->backend.dev.cdev = xbb->vn->v_rdev; 2548 xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev, 2549 &xbb->backend.dev.dev_ref); 2550 if (xbb->backend.dev.csw == NULL) 2551 panic("Unable to retrieve device switch"); 2552 2553 error = VOP_GETATTR(xbb->vn, &vattr, NOCRED); 2554 if (error) { 2555 xenbus_dev_fatal(xbb->dev, error, "error getting " 2556 "vnode attributes for device %s", 2557 xbb->dev_name); 2558 return (error); 2559 } 2560 2561 2562 dev = xbb->vn->v_rdev; 2563 devsw = dev->si_devsw; 2564 if (!devsw->d_ioctl) { 2565 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for " 2566 "device %s!", xbb->dev_name); 2567 return (ENODEV); 2568 } 2569 2570 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 2571 (caddr_t)&xbb->sector_size, FREAD, 2572 curthread); 2573 if (error) { 2574 xenbus_dev_fatal(xbb->dev, error, 2575 "error calling ioctl DIOCGSECTORSIZE " 2576 "for device %s", xbb->dev_name); 2577 return (error); 2578 } 2579 2580 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2581 (caddr_t)&xbb->media_size, FREAD, 2582 curthread); 2583 if (error) { 2584 xenbus_dev_fatal(xbb->dev, error, 2585 "error calling ioctl DIOCGMEDIASIZE " 2586 "for device %s", xbb->dev_name); 2587 return (error); 2588 } 2589 2590 return (0); 2591 } 2592 2593 /** 2594 * Open a file to be used for backend I/O. 2595 * 2596 * \param xbb Per-instance xbb configuration structure. 2597 * 2598 * \return 0 for success, errno codes for failure. 2599 */ 2600 static int 2601 xbb_open_file(struct xbb_softc *xbb) 2602 { 2603 struct xbb_file_data *file_data; 2604 struct vattr vattr; 2605 int error; 2606 2607 file_data = &xbb->backend.file; 2608 xbb->device_type = XBB_TYPE_FILE; 2609 xbb->dispatch_io = xbb_dispatch_file; 2610 error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred); 2611 if (error != 0) { 2612 xenbus_dev_fatal(xbb->dev, error, 2613 "error calling VOP_GETATTR()" 2614 "for file %s", xbb->dev_name); 2615 return (error); 2616 } 2617 2618 /* 2619 * Verify that we have the ability to upgrade to exclusive 2620 * access on this file so we can trap errors at open instead 2621 * of reporting them during first access. 2622 */ 2623 if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { 2624 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); 2625 if (xbb->vn->v_iflag & VI_DOOMED) { 2626 error = EBADF; 2627 xenbus_dev_fatal(xbb->dev, error, 2628 "error locking file %s", 2629 xbb->dev_name); 2630 2631 return (error); 2632 } 2633 } 2634 2635 file_data->cred = crhold(curthread->td_ucred); 2636 xbb->media_size = vattr.va_size; 2637 2638 /* 2639 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 2640 * With ZFS, it is 131072 bytes. Block sizes that large don't work 2641 * with disklabel and UFS on FreeBSD at least. Large block sizes 2642 * may not work with other OSes as well. So just export a sector 2643 * size of 512 bytes, which should work with any OS or 2644 * application. Since our backing is a file, any block size will 2645 * work fine for the backing store. 2646 */ 2647 #if 0 2648 xbb->sector_size = vattr.va_blocksize; 2649 #endif 2650 xbb->sector_size = 512; 2651 2652 /* 2653 * Sanity check. The media size has to be at least one 2654 * sector long. 2655 */ 2656 if (xbb->media_size < xbb->sector_size) { 2657 error = EINVAL; 2658 xenbus_dev_fatal(xbb->dev, error, 2659 "file %s size %ju < block size %u", 2660 xbb->dev_name, 2661 (uintmax_t)xbb->media_size, 2662 xbb->sector_size); 2663 } 2664 return (error); 2665 } 2666 2667 /** 2668 * Open the backend provider for this connection. 2669 * 2670 * \param xbb Per-instance xbb configuration structure. 2671 * 2672 * \return 0 for success, errno codes for failure. 2673 */ 2674 static int 2675 xbb_open_backend(struct xbb_softc *xbb) 2676 { 2677 struct nameidata nd; 2678 int flags; 2679 int error; 2680 2681 flags = FREAD; 2682 error = 0; 2683 2684 DPRINTF("opening dev=%s\n", xbb->dev_name); 2685 2686 if (rootvnode == NULL) { 2687 xenbus_dev_fatal(xbb->dev, ENOENT, 2688 "Root file system not mounted"); 2689 return (ENOENT); 2690 } 2691 2692 if ((xbb->flags & XBBF_READ_ONLY) == 0) 2693 flags |= FWRITE; 2694 2695 pwd_ensure_dirs(); 2696 2697 again: 2698 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread); 2699 error = vn_open(&nd, &flags, 0, NULL); 2700 if (error) { 2701 /* 2702 * This is the only reasonable guess we can make as far as 2703 * path if the user doesn't give us a fully qualified path. 2704 * If they want to specify a file, they need to specify the 2705 * full path. 2706 */ 2707 if (xbb->dev_name[0] != '/') { 2708 char *dev_path = "/dev/"; 2709 char *dev_name; 2710 2711 /* Try adding device path at beginning of name */ 2712 dev_name = malloc(strlen(xbb->dev_name) 2713 + strlen(dev_path) + 1, 2714 M_XENBLOCKBACK, M_NOWAIT); 2715 if (dev_name) { 2716 sprintf(dev_name, "%s%s", dev_path, 2717 xbb->dev_name); 2718 free(xbb->dev_name, M_XENBLOCKBACK); 2719 xbb->dev_name = dev_name; 2720 goto again; 2721 } 2722 } 2723 xenbus_dev_fatal(xbb->dev, error, "error opening device %s", 2724 xbb->dev_name); 2725 return (error); 2726 } 2727 2728 NDFREE(&nd, NDF_ONLY_PNBUF); 2729 2730 xbb->vn = nd.ni_vp; 2731 2732 /* We only support disks and files. */ 2733 if (vn_isdisk(xbb->vn, &error)) { 2734 error = xbb_open_dev(xbb); 2735 } else if (xbb->vn->v_type == VREG) { 2736 error = xbb_open_file(xbb); 2737 } else { 2738 error = EINVAL; 2739 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk " 2740 "or file", xbb->dev_name); 2741 } 2742 VOP_UNLOCK(xbb->vn, 0); 2743 2744 if (error != 0) { 2745 xbb_close_backend(xbb); 2746 return (error); 2747 } 2748 2749 xbb->sector_size_shift = fls(xbb->sector_size) - 1; 2750 xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift; 2751 2752 DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n", 2753 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file", 2754 xbb->dev_name, xbb->sector_size, xbb->media_size); 2755 2756 return (0); 2757 } 2758 2759 /*------------------------ Inter-Domain Communication ------------------------*/ 2760 /** 2761 * Free dynamically allocated KVA or pseudo-physical address allocations. 2762 * 2763 * \param xbb Per-instance xbb configuration structure. 2764 */ 2765 static void 2766 xbb_free_communication_mem(struct xbb_softc *xbb) 2767 { 2768 if (xbb->kva != 0) { 2769 if (xbb->pseudo_phys_res != NULL) { 2770 xenmem_free(xbb->dev, xbb->pseudo_phys_res_id, 2771 xbb->pseudo_phys_res); 2772 xbb->pseudo_phys_res = NULL; 2773 } 2774 } 2775 xbb->kva = 0; 2776 xbb->gnt_base_addr = 0; 2777 if (xbb->kva_free != NULL) { 2778 free(xbb->kva_free, M_XENBLOCKBACK); 2779 xbb->kva_free = NULL; 2780 } 2781 } 2782 2783 /** 2784 * Cleanup all inter-domain communication mechanisms. 2785 * 2786 * \param xbb Per-instance xbb configuration structure. 2787 */ 2788 static int 2789 xbb_disconnect(struct xbb_softc *xbb) 2790 { 2791 struct gnttab_unmap_grant_ref ops[XBB_MAX_RING_PAGES]; 2792 struct gnttab_unmap_grant_ref *op; 2793 u_int ring_idx; 2794 int error; 2795 2796 DPRINTF("\n"); 2797 2798 if ((xbb->flags & XBBF_RING_CONNECTED) == 0) 2799 return (0); 2800 2801 xen_intr_unbind(&xbb->xen_intr_handle); 2802 2803 mtx_unlock(&xbb->lock); 2804 taskqueue_drain(xbb->io_taskqueue, &xbb->io_task); 2805 mtx_lock(&xbb->lock); 2806 2807 /* 2808 * No new interrupts can generate work, but we must wait 2809 * for all currently active requests to drain. 2810 */ 2811 if (xbb->active_request_count != 0) 2812 return (EAGAIN); 2813 2814 for (ring_idx = 0, op = ops; 2815 ring_idx < xbb->ring_config.ring_pages; 2816 ring_idx++, op++) { 2817 2818 op->host_addr = xbb->ring_config.gnt_addr 2819 + (ring_idx * PAGE_SIZE); 2820 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx]; 2821 op->handle = xbb->ring_config.handle[ring_idx]; 2822 } 2823 2824 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops, 2825 xbb->ring_config.ring_pages); 2826 if (error != 0) 2827 panic("Grant table op failed (%d)", error); 2828 2829 xbb_free_communication_mem(xbb); 2830 2831 if (xbb->requests != NULL) { 2832 free(xbb->requests, M_XENBLOCKBACK); 2833 xbb->requests = NULL; 2834 } 2835 2836 if (xbb->request_lists != NULL) { 2837 struct xbb_xen_reqlist *reqlist; 2838 int i; 2839 2840 /* There is one request list for ever allocated request. */ 2841 for (i = 0, reqlist = xbb->request_lists; 2842 i < xbb->max_requests; i++, reqlist++){ 2843 #ifdef XBB_USE_BOUNCE_BUFFERS 2844 if (reqlist->bounce != NULL) { 2845 free(reqlist->bounce, M_XENBLOCKBACK); 2846 reqlist->bounce = NULL; 2847 } 2848 #endif 2849 if (reqlist->gnt_handles != NULL) { 2850 free(reqlist->gnt_handles, M_XENBLOCKBACK); 2851 reqlist->gnt_handles = NULL; 2852 } 2853 } 2854 free(xbb->request_lists, M_XENBLOCKBACK); 2855 xbb->request_lists = NULL; 2856 } 2857 2858 xbb->flags &= ~XBBF_RING_CONNECTED; 2859 return (0); 2860 } 2861 2862 /** 2863 * Map shared memory ring into domain local address space, initialize 2864 * ring control structures, and bind an interrupt to the event channel 2865 * used to notify us of ring changes. 2866 * 2867 * \param xbb Per-instance xbb configuration structure. 2868 */ 2869 static int 2870 xbb_connect_ring(struct xbb_softc *xbb) 2871 { 2872 struct gnttab_map_grant_ref gnts[XBB_MAX_RING_PAGES]; 2873 struct gnttab_map_grant_ref *gnt; 2874 u_int ring_idx; 2875 int error; 2876 2877 if ((xbb->flags & XBBF_RING_CONNECTED) != 0) 2878 return (0); 2879 2880 /* 2881 * Kva for our ring is at the tail of the region of kva allocated 2882 * by xbb_alloc_communication_mem(). 2883 */ 2884 xbb->ring_config.va = xbb->kva 2885 + (xbb->kva_size 2886 - (xbb->ring_config.ring_pages * PAGE_SIZE)); 2887 xbb->ring_config.gnt_addr = xbb->gnt_base_addr 2888 + (xbb->kva_size 2889 - (xbb->ring_config.ring_pages * PAGE_SIZE)); 2890 2891 for (ring_idx = 0, gnt = gnts; 2892 ring_idx < xbb->ring_config.ring_pages; 2893 ring_idx++, gnt++) { 2894 2895 gnt->host_addr = xbb->ring_config.gnt_addr 2896 + (ring_idx * PAGE_SIZE); 2897 gnt->flags = GNTMAP_host_map; 2898 gnt->ref = xbb->ring_config.ring_ref[ring_idx]; 2899 gnt->dom = xbb->otherend_id; 2900 } 2901 2902 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts, 2903 xbb->ring_config.ring_pages); 2904 if (error) 2905 panic("blkback: Ring page grant table op failed (%d)", error); 2906 2907 for (ring_idx = 0, gnt = gnts; 2908 ring_idx < xbb->ring_config.ring_pages; 2909 ring_idx++, gnt++) { 2910 if (gnt->status != 0) { 2911 xbb->ring_config.va = 0; 2912 xenbus_dev_fatal(xbb->dev, EACCES, 2913 "Ring shared page mapping failed. " 2914 "Status %d.", gnt->status); 2915 return (EACCES); 2916 } 2917 xbb->ring_config.handle[ring_idx] = gnt->handle; 2918 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr; 2919 } 2920 2921 /* Initialize the ring based on ABI. */ 2922 switch (xbb->abi) { 2923 case BLKIF_PROTOCOL_NATIVE: 2924 { 2925 blkif_sring_t *sring; 2926 sring = (blkif_sring_t *)xbb->ring_config.va; 2927 BACK_RING_INIT(&xbb->rings.native, sring, 2928 xbb->ring_config.ring_pages * PAGE_SIZE); 2929 break; 2930 } 2931 case BLKIF_PROTOCOL_X86_32: 2932 { 2933 blkif_x86_32_sring_t *sring_x86_32; 2934 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va; 2935 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32, 2936 xbb->ring_config.ring_pages * PAGE_SIZE); 2937 break; 2938 } 2939 case BLKIF_PROTOCOL_X86_64: 2940 { 2941 blkif_x86_64_sring_t *sring_x86_64; 2942 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va; 2943 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64, 2944 xbb->ring_config.ring_pages * PAGE_SIZE); 2945 break; 2946 } 2947 default: 2948 panic("Unexpected blkif protocol ABI."); 2949 } 2950 2951 xbb->flags |= XBBF_RING_CONNECTED; 2952 2953 error = xen_intr_bind_remote_port(xbb->dev, 2954 xbb->otherend_id, 2955 xbb->ring_config.evtchn, 2956 xbb_filter, 2957 /*ithread_handler*/NULL, 2958 /*arg*/xbb, 2959 INTR_TYPE_BIO | INTR_MPSAFE, 2960 &xbb->xen_intr_handle); 2961 if (error) { 2962 (void)xbb_disconnect(xbb); 2963 xenbus_dev_fatal(xbb->dev, error, "binding event channel"); 2964 return (error); 2965 } 2966 2967 DPRINTF("rings connected!\n"); 2968 2969 return 0; 2970 } 2971 2972 /** 2973 * Size KVA and pseudo-physical address allocations based on negotiated 2974 * values for the size and number of I/O requests, and the size of our 2975 * communication ring. 2976 * 2977 * \param xbb Per-instance xbb configuration structure. 2978 * 2979 * These address spaces are used to dynamically map pages in the 2980 * front-end's domain into our own. 2981 */ 2982 static int 2983 xbb_alloc_communication_mem(struct xbb_softc *xbb) 2984 { 2985 xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments; 2986 xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE; 2987 xbb->kva_size = xbb->reqlist_kva_size + 2988 (xbb->ring_config.ring_pages * PAGE_SIZE); 2989 2990 xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT); 2991 if (xbb->kva_free == NULL) 2992 return (ENOMEM); 2993 2994 DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n", 2995 device_get_nameunit(xbb->dev), xbb->kva_size, 2996 xbb->reqlist_kva_size); 2997 /* 2998 * Reserve a range of pseudo physical memory that we can map 2999 * into kva. These pages will only be backed by machine 3000 * pages ("real memory") during the lifetime of front-end requests 3001 * via grant table operations. 3002 */ 3003 xbb->pseudo_phys_res_id = 0; 3004 xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id, 3005 xbb->kva_size); 3006 if (xbb->pseudo_phys_res == NULL) { 3007 xbb->kva = 0; 3008 return (ENOMEM); 3009 } 3010 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res); 3011 xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res); 3012 3013 DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n", 3014 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva, 3015 (uintmax_t)xbb->gnt_base_addr); 3016 return (0); 3017 } 3018 3019 /** 3020 * Collect front-end information from the XenStore. 3021 * 3022 * \param xbb Per-instance xbb configuration structure. 3023 */ 3024 static int 3025 xbb_collect_frontend_info(struct xbb_softc *xbb) 3026 { 3027 char protocol_abi[64]; 3028 const char *otherend_path; 3029 int error; 3030 u_int ring_idx; 3031 u_int ring_page_order; 3032 size_t ring_size; 3033 3034 otherend_path = xenbus_get_otherend_path(xbb->dev); 3035 3036 /* 3037 * Protocol defaults valid even if all negotiation fails. 3038 */ 3039 xbb->ring_config.ring_pages = 1; 3040 xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; 3041 xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE; 3042 3043 /* 3044 * Mandatory data (used in all versions of the protocol) first. 3045 */ 3046 error = xs_scanf(XST_NIL, otherend_path, 3047 "event-channel", NULL, "%" PRIu32, 3048 &xbb->ring_config.evtchn); 3049 if (error != 0) { 3050 xenbus_dev_fatal(xbb->dev, error, 3051 "Unable to retrieve event-channel information " 3052 "from frontend %s. Unable to connect.", 3053 xenbus_get_otherend_path(xbb->dev)); 3054 return (error); 3055 } 3056 3057 /* 3058 * These fields are initialized to legacy protocol defaults 3059 * so we only need to fail if reading the updated value succeeds 3060 * and the new value is outside of its allowed range. 3061 * 3062 * \note xs_gather() returns on the first encountered error, so 3063 * we must use independent calls in order to guarantee 3064 * we don't miss information in a sparsly populated front-end 3065 * tree. 3066 * 3067 * \note xs_scanf() does not update variables for unmatched 3068 * fields. 3069 */ 3070 ring_page_order = 0; 3071 xbb->max_requests = 32; 3072 3073 (void)xs_scanf(XST_NIL, otherend_path, 3074 "ring-page-order", NULL, "%u", 3075 &ring_page_order); 3076 xbb->ring_config.ring_pages = 1 << ring_page_order; 3077 ring_size = PAGE_SIZE * xbb->ring_config.ring_pages; 3078 xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size); 3079 3080 if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) { 3081 xenbus_dev_fatal(xbb->dev, EINVAL, 3082 "Front-end specified ring-pages of %u " 3083 "exceeds backend limit of %u. " 3084 "Unable to connect.", 3085 xbb->ring_config.ring_pages, 3086 XBB_MAX_RING_PAGES); 3087 return (EINVAL); 3088 } 3089 3090 if (xbb->ring_config.ring_pages == 1) { 3091 error = xs_gather(XST_NIL, otherend_path, 3092 "ring-ref", "%" PRIu32, 3093 &xbb->ring_config.ring_ref[0], 3094 NULL); 3095 if (error != 0) { 3096 xenbus_dev_fatal(xbb->dev, error, 3097 "Unable to retrieve ring information " 3098 "from frontend %s. Unable to " 3099 "connect.", 3100 xenbus_get_otherend_path(xbb->dev)); 3101 return (error); 3102 } 3103 } else { 3104 /* Multi-page ring format. */ 3105 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages; 3106 ring_idx++) { 3107 char ring_ref_name[]= "ring_refXX"; 3108 3109 snprintf(ring_ref_name, sizeof(ring_ref_name), 3110 "ring-ref%u", ring_idx); 3111 error = xs_scanf(XST_NIL, otherend_path, 3112 ring_ref_name, NULL, "%" PRIu32, 3113 &xbb->ring_config.ring_ref[ring_idx]); 3114 if (error != 0) { 3115 xenbus_dev_fatal(xbb->dev, error, 3116 "Failed to retriev grant " 3117 "reference for page %u of " 3118 "shared ring. Unable " 3119 "to connect.", ring_idx); 3120 return (error); 3121 } 3122 } 3123 } 3124 3125 error = xs_gather(XST_NIL, otherend_path, 3126 "protocol", "%63s", protocol_abi, 3127 NULL); 3128 if (error != 0 3129 || !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) { 3130 /* 3131 * Assume native if the frontend has not 3132 * published ABI data or it has published and 3133 * matches our own ABI. 3134 */ 3135 xbb->abi = BLKIF_PROTOCOL_NATIVE; 3136 } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) { 3137 3138 xbb->abi = BLKIF_PROTOCOL_X86_32; 3139 } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) { 3140 3141 xbb->abi = BLKIF_PROTOCOL_X86_64; 3142 } else { 3143 3144 xenbus_dev_fatal(xbb->dev, EINVAL, 3145 "Unknown protocol ABI (%s) published by " 3146 "frontend. Unable to connect.", protocol_abi); 3147 return (EINVAL); 3148 } 3149 return (0); 3150 } 3151 3152 /** 3153 * Allocate per-request data structures given request size and number 3154 * information negotiated with the front-end. 3155 * 3156 * \param xbb Per-instance xbb configuration structure. 3157 */ 3158 static int 3159 xbb_alloc_requests(struct xbb_softc *xbb) 3160 { 3161 struct xbb_xen_req *req; 3162 struct xbb_xen_req *last_req; 3163 3164 /* 3165 * Allocate request book keeping datastructures. 3166 */ 3167 xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests), 3168 M_XENBLOCKBACK, M_NOWAIT|M_ZERO); 3169 if (xbb->requests == NULL) { 3170 xenbus_dev_fatal(xbb->dev, ENOMEM, 3171 "Unable to allocate request structures"); 3172 return (ENOMEM); 3173 } 3174 3175 req = xbb->requests; 3176 last_req = &xbb->requests[xbb->max_requests - 1]; 3177 STAILQ_INIT(&xbb->request_free_stailq); 3178 while (req <= last_req) { 3179 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links); 3180 req++; 3181 } 3182 return (0); 3183 } 3184 3185 static int 3186 xbb_alloc_request_lists(struct xbb_softc *xbb) 3187 { 3188 struct xbb_xen_reqlist *reqlist; 3189 int i; 3190 3191 /* 3192 * If no requests can be merged, we need 1 request list per 3193 * in flight request. 3194 */ 3195 xbb->request_lists = malloc(xbb->max_requests * 3196 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); 3197 if (xbb->request_lists == NULL) { 3198 xenbus_dev_fatal(xbb->dev, ENOMEM, 3199 "Unable to allocate request list structures"); 3200 return (ENOMEM); 3201 } 3202 3203 STAILQ_INIT(&xbb->reqlist_free_stailq); 3204 STAILQ_INIT(&xbb->reqlist_pending_stailq); 3205 for (i = 0; i < xbb->max_requests; i++) { 3206 int seg; 3207 3208 reqlist = &xbb->request_lists[i]; 3209 3210 reqlist->xbb = xbb; 3211 3212 #ifdef XBB_USE_BOUNCE_BUFFERS 3213 reqlist->bounce = malloc(xbb->max_reqlist_size, 3214 M_XENBLOCKBACK, M_NOWAIT); 3215 if (reqlist->bounce == NULL) { 3216 xenbus_dev_fatal(xbb->dev, ENOMEM, 3217 "Unable to allocate request " 3218 "bounce buffers"); 3219 return (ENOMEM); 3220 } 3221 #endif /* XBB_USE_BOUNCE_BUFFERS */ 3222 3223 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments * 3224 sizeof(*reqlist->gnt_handles), 3225 M_XENBLOCKBACK, M_NOWAIT|M_ZERO); 3226 if (reqlist->gnt_handles == NULL) { 3227 xenbus_dev_fatal(xbb->dev, ENOMEM, 3228 "Unable to allocate request " 3229 "grant references"); 3230 return (ENOMEM); 3231 } 3232 3233 for (seg = 0; seg < xbb->max_reqlist_segments; seg++) 3234 reqlist->gnt_handles[seg] = GRANT_REF_INVALID; 3235 3236 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); 3237 } 3238 return (0); 3239 } 3240 3241 /** 3242 * Supply information about the physical device to the frontend 3243 * via XenBus. 3244 * 3245 * \param xbb Per-instance xbb configuration structure. 3246 */ 3247 static int 3248 xbb_publish_backend_info(struct xbb_softc *xbb) 3249 { 3250 struct xs_transaction xst; 3251 const char *our_path; 3252 const char *leaf; 3253 int error; 3254 3255 our_path = xenbus_get_node(xbb->dev); 3256 while (1) { 3257 error = xs_transaction_start(&xst); 3258 if (error != 0) { 3259 xenbus_dev_fatal(xbb->dev, error, 3260 "Error publishing backend info " 3261 "(start transaction)"); 3262 return (error); 3263 } 3264 3265 leaf = "sectors"; 3266 error = xs_printf(xst, our_path, leaf, 3267 "%"PRIu64, xbb->media_num_sectors); 3268 if (error != 0) 3269 break; 3270 3271 /* XXX Support all VBD attributes here. */ 3272 leaf = "info"; 3273 error = xs_printf(xst, our_path, leaf, "%u", 3274 xbb->flags & XBBF_READ_ONLY 3275 ? VDISK_READONLY : 0); 3276 if (error != 0) 3277 break; 3278 3279 leaf = "sector-size"; 3280 error = xs_printf(xst, our_path, leaf, "%u", 3281 xbb->sector_size); 3282 if (error != 0) 3283 break; 3284 3285 error = xs_transaction_end(xst, 0); 3286 if (error == 0) { 3287 return (0); 3288 } else if (error != EAGAIN) { 3289 xenbus_dev_fatal(xbb->dev, error, "ending transaction"); 3290 return (error); 3291 } 3292 } 3293 3294 xenbus_dev_fatal(xbb->dev, error, "writing %s/%s", 3295 our_path, leaf); 3296 xs_transaction_end(xst, 1); 3297 return (error); 3298 } 3299 3300 /** 3301 * Connect to our blkfront peer now that it has completed publishing 3302 * its configuration into the XenStore. 3303 * 3304 * \param xbb Per-instance xbb configuration structure. 3305 */ 3306 static void 3307 xbb_connect(struct xbb_softc *xbb) 3308 { 3309 int error; 3310 3311 if (xenbus_get_state(xbb->dev) != XenbusStateInitialised) 3312 return; 3313 3314 if (xbb_collect_frontend_info(xbb) != 0) 3315 return; 3316 3317 xbb->flags &= ~XBBF_SHUTDOWN; 3318 3319 /* 3320 * We limit the maximum number of reqlist segments to the maximum 3321 * number of segments in the ring, or our absolute maximum, 3322 * whichever is smaller. 3323 */ 3324 xbb->max_reqlist_segments = MIN(xbb->max_request_segments * 3325 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST); 3326 3327 /* 3328 * The maximum size is simply a function of the number of segments 3329 * we can handle. 3330 */ 3331 xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE; 3332 3333 /* Allocate resources whose size depends on front-end configuration. */ 3334 error = xbb_alloc_communication_mem(xbb); 3335 if (error != 0) { 3336 xenbus_dev_fatal(xbb->dev, error, 3337 "Unable to allocate communication memory"); 3338 return; 3339 } 3340 3341 error = xbb_alloc_requests(xbb); 3342 if (error != 0) { 3343 /* Specific errors are reported by xbb_alloc_requests(). */ 3344 return; 3345 } 3346 3347 error = xbb_alloc_request_lists(xbb); 3348 if (error != 0) { 3349 /* Specific errors are reported by xbb_alloc_request_lists(). */ 3350 return; 3351 } 3352 3353 /* 3354 * Connect communication channel. 3355 */ 3356 error = xbb_connect_ring(xbb); 3357 if (error != 0) { 3358 /* Specific errors are reported by xbb_connect_ring(). */ 3359 return; 3360 } 3361 3362 if (xbb_publish_backend_info(xbb) != 0) { 3363 /* 3364 * If we can't publish our data, we cannot participate 3365 * in this connection, and waiting for a front-end state 3366 * change will not help the situation. 3367 */ 3368 (void)xbb_disconnect(xbb); 3369 return; 3370 } 3371 3372 /* Ready for I/O. */ 3373 xenbus_set_state(xbb->dev, XenbusStateConnected); 3374 } 3375 3376 /*-------------------------- Device Teardown Support -------------------------*/ 3377 /** 3378 * Perform device shutdown functions. 3379 * 3380 * \param xbb Per-instance xbb configuration structure. 3381 * 3382 * Mark this instance as shutting down, wait for any active I/O on the 3383 * backend device/file to drain, disconnect from the front-end, and notify 3384 * any waiters (e.g. a thread invoking our detach method) that detach can 3385 * now proceed. 3386 */ 3387 static int 3388 xbb_shutdown(struct xbb_softc *xbb) 3389 { 3390 XenbusState frontState; 3391 int error; 3392 3393 DPRINTF("\n"); 3394 3395 /* 3396 * Due to the need to drop our mutex during some 3397 * xenbus operations, it is possible for two threads 3398 * to attempt to close out shutdown processing at 3399 * the same time. Tell the caller that hits this 3400 * race to try back later. 3401 */ 3402 if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0) 3403 return (EAGAIN); 3404 3405 xbb->flags |= XBBF_IN_SHUTDOWN; 3406 mtx_unlock(&xbb->lock); 3407 3408 if (xbb->hotplug_watch.node != NULL) { 3409 xs_unregister_watch(&xbb->hotplug_watch); 3410 free(xbb->hotplug_watch.node, M_XENBLOCKBACK); 3411 xbb->hotplug_watch.node = NULL; 3412 } 3413 3414 if (xenbus_get_state(xbb->dev) < XenbusStateClosing) 3415 xenbus_set_state(xbb->dev, XenbusStateClosing); 3416 3417 frontState = xenbus_get_otherend_state(xbb->dev); 3418 mtx_lock(&xbb->lock); 3419 xbb->flags &= ~XBBF_IN_SHUTDOWN; 3420 3421 /* Wait for the frontend to disconnect (if it's connected). */ 3422 if (frontState == XenbusStateConnected) 3423 return (EAGAIN); 3424 3425 DPRINTF("\n"); 3426 3427 /* Indicate shutdown is in progress. */ 3428 xbb->flags |= XBBF_SHUTDOWN; 3429 3430 /* Disconnect from the front-end. */ 3431 error = xbb_disconnect(xbb); 3432 if (error != 0) { 3433 /* 3434 * Requests still outstanding. We'll be called again 3435 * once they complete. 3436 */ 3437 KASSERT(error == EAGAIN, 3438 ("%s: Unexpected xbb_disconnect() failure %d", 3439 __func__, error)); 3440 3441 return (error); 3442 } 3443 3444 DPRINTF("\n"); 3445 3446 /* Indicate to xbb_detach() that is it safe to proceed. */ 3447 wakeup(xbb); 3448 3449 return (0); 3450 } 3451 3452 /** 3453 * Report an attach time error to the console and Xen, and cleanup 3454 * this instance by forcing immediate detach processing. 3455 * 3456 * \param xbb Per-instance xbb configuration structure. 3457 * \param err Errno describing the error. 3458 * \param fmt Printf style format and arguments 3459 */ 3460 static void 3461 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...) 3462 { 3463 va_list ap; 3464 va_list ap_hotplug; 3465 3466 va_start(ap, fmt); 3467 va_copy(ap_hotplug, ap); 3468 xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev), 3469 "hotplug-error", fmt, ap_hotplug); 3470 va_end(ap_hotplug); 3471 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3472 "hotplug-status", "error"); 3473 3474 xenbus_dev_vfatal(xbb->dev, err, fmt, ap); 3475 va_end(ap); 3476 3477 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3478 "online", "0"); 3479 mtx_lock(&xbb->lock); 3480 xbb_shutdown(xbb); 3481 mtx_unlock(&xbb->lock); 3482 } 3483 3484 /*---------------------------- NewBus Entrypoints ----------------------------*/ 3485 /** 3486 * Inspect a XenBus device and claim it if is of the appropriate type. 3487 * 3488 * \param dev NewBus device object representing a candidate XenBus device. 3489 * 3490 * \return 0 for success, errno codes for failure. 3491 */ 3492 static int 3493 xbb_probe(device_t dev) 3494 { 3495 3496 if (!strcmp(xenbus_get_type(dev), "vbd")) { 3497 device_set_desc(dev, "Backend Virtual Block Device"); 3498 device_quiet(dev); 3499 return (0); 3500 } 3501 3502 return (ENXIO); 3503 } 3504 3505 /** 3506 * Setup sysctl variables to control various Block Back parameters. 3507 * 3508 * \param xbb Xen Block Back softc. 3509 * 3510 */ 3511 static void 3512 xbb_setup_sysctl(struct xbb_softc *xbb) 3513 { 3514 struct sysctl_ctx_list *sysctl_ctx = NULL; 3515 struct sysctl_oid *sysctl_tree = NULL; 3516 3517 sysctl_ctx = device_get_sysctl_ctx(xbb->dev); 3518 if (sysctl_ctx == NULL) 3519 return; 3520 3521 sysctl_tree = device_get_sysctl_tree(xbb->dev); 3522 if (sysctl_tree == NULL) 3523 return; 3524 3525 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3526 "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0, 3527 "fake the flush command"); 3528 3529 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3530 "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0, 3531 "send a real flush for N flush requests"); 3532 3533 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3534 "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0, 3535 "Don't coalesce contiguous requests"); 3536 3537 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3538 "reqs_received", CTLFLAG_RW, &xbb->reqs_received, 3539 "how many I/O requests we have received"); 3540 3541 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3542 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed, 3543 "how many I/O requests have been completed"); 3544 3545 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3546 "reqs_queued_for_completion", CTLFLAG_RW, 3547 &xbb->reqs_queued_for_completion, 3548 "how many I/O requests queued but not yet pushed"); 3549 3550 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3551 "reqs_completed_with_error", CTLFLAG_RW, 3552 &xbb->reqs_completed_with_error, 3553 "how many I/O requests completed with error status"); 3554 3555 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3556 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch, 3557 "how many I/O dispatches were forced"); 3558 3559 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3560 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch, 3561 "how many I/O dispatches were normal"); 3562 3563 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3564 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch, 3565 "total number of I/O dispatches"); 3566 3567 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3568 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages, 3569 "how many times we have run out of KVA"); 3570 3571 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3572 "request_shortages", CTLFLAG_RW, 3573 &xbb->request_shortages, 3574 "how many times we have run out of requests"); 3575 3576 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3577 "max_requests", CTLFLAG_RD, &xbb->max_requests, 0, 3578 "maximum outstanding requests (negotiated)"); 3579 3580 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3581 "max_request_segments", CTLFLAG_RD, 3582 &xbb->max_request_segments, 0, 3583 "maximum number of pages per requests (negotiated)"); 3584 3585 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3586 "max_request_size", CTLFLAG_RD, 3587 &xbb->max_request_size, 0, 3588 "maximum size in bytes of a request (negotiated)"); 3589 3590 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, 3591 "ring_pages", CTLFLAG_RD, 3592 &xbb->ring_config.ring_pages, 0, 3593 "communication channel pages (negotiated)"); 3594 } 3595 3596 static void 3597 xbb_attach_disk(struct xs_watch *watch, const char **vec, unsigned int len) 3598 { 3599 device_t dev; 3600 struct xbb_softc *xbb; 3601 int error; 3602 3603 dev = (device_t) watch->callback_data; 3604 xbb = device_get_softc(dev); 3605 3606 error = xs_gather(XST_NIL, xenbus_get_node(dev), "physical-device-path", 3607 NULL, &xbb->dev_name, NULL); 3608 if (error != 0) 3609 return; 3610 3611 xs_unregister_watch(watch); 3612 free(watch->node, M_XENBLOCKBACK); 3613 watch->node = NULL; 3614 3615 /* Collect physical device information. */ 3616 error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev), 3617 "device-type", NULL, &xbb->dev_type, 3618 NULL); 3619 if (error != 0) 3620 xbb->dev_type = NULL; 3621 3622 error = xs_gather(XST_NIL, xenbus_get_node(dev), 3623 "mode", NULL, &xbb->dev_mode, 3624 NULL); 3625 if (error != 0) { 3626 xbb_attach_failed(xbb, error, "reading backend fields at %s", 3627 xenbus_get_node(dev)); 3628 return; 3629 } 3630 3631 /* Parse fopen style mode flags. */ 3632 if (strchr(xbb->dev_mode, 'w') == NULL) 3633 xbb->flags |= XBBF_READ_ONLY; 3634 3635 /* 3636 * Verify the physical device is present and can support 3637 * the desired I/O mode. 3638 */ 3639 error = xbb_open_backend(xbb); 3640 if (error != 0) { 3641 xbb_attach_failed(xbb, error, "Unable to open %s", 3642 xbb->dev_name); 3643 return; 3644 } 3645 3646 /* Use devstat(9) for recording statistics. */ 3647 xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev), 3648 xbb->sector_size, 3649 DEVSTAT_ALL_SUPPORTED, 3650 DEVSTAT_TYPE_DIRECT 3651 | DEVSTAT_TYPE_IF_OTHER, 3652 DEVSTAT_PRIORITY_OTHER); 3653 3654 xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev), 3655 xbb->sector_size, 3656 DEVSTAT_ALL_SUPPORTED, 3657 DEVSTAT_TYPE_DIRECT 3658 | DEVSTAT_TYPE_IF_OTHER, 3659 DEVSTAT_PRIORITY_OTHER); 3660 /* 3661 * Setup sysctl variables. 3662 */ 3663 xbb_setup_sysctl(xbb); 3664 3665 /* 3666 * Create a taskqueue for doing work that must occur from a 3667 * thread context. 3668 */ 3669 xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev), 3670 M_NOWAIT, 3671 taskqueue_thread_enqueue, 3672 /*contxt*/&xbb->io_taskqueue); 3673 if (xbb->io_taskqueue == NULL) { 3674 xbb_attach_failed(xbb, error, "Unable to create taskqueue"); 3675 return; 3676 } 3677 3678 taskqueue_start_threads(&xbb->io_taskqueue, 3679 /*num threads*/1, 3680 /*priority*/PWAIT, 3681 /*thread name*/ 3682 "%s taskq", device_get_nameunit(dev)); 3683 3684 /* Update hot-plug status to satisfy xend. */ 3685 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3686 "hotplug-status", "connected"); 3687 if (error) { 3688 xbb_attach_failed(xbb, error, "writing %s/hotplug-status", 3689 xenbus_get_node(xbb->dev)); 3690 return; 3691 } 3692 3693 /* Tell the front end that we are ready to connect. */ 3694 xenbus_set_state(dev, XenbusStateInitialised); 3695 } 3696 3697 /** 3698 * Attach to a XenBus device that has been claimed by our probe routine. 3699 * 3700 * \param dev NewBus device object representing this Xen Block Back instance. 3701 * 3702 * \return 0 for success, errno codes for failure. 3703 */ 3704 static int 3705 xbb_attach(device_t dev) 3706 { 3707 struct xbb_softc *xbb; 3708 int error; 3709 u_int max_ring_page_order; 3710 struct sbuf *watch_path; 3711 3712 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 3713 3714 /* 3715 * Basic initialization. 3716 * After this block it is safe to call xbb_detach() 3717 * to clean up any allocated data for this instance. 3718 */ 3719 xbb = device_get_softc(dev); 3720 xbb->dev = dev; 3721 xbb->otherend_id = xenbus_get_otherend_id(dev); 3722 TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb); 3723 mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF); 3724 3725 /* 3726 * Publish protocol capabilities for consumption by the 3727 * front-end. 3728 */ 3729 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3730 "feature-barrier", "1"); 3731 if (error) { 3732 xbb_attach_failed(xbb, error, "writing %s/feature-barrier", 3733 xenbus_get_node(xbb->dev)); 3734 return (error); 3735 } 3736 3737 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3738 "feature-flush-cache", "1"); 3739 if (error) { 3740 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache", 3741 xenbus_get_node(xbb->dev)); 3742 return (error); 3743 } 3744 3745 max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1; 3746 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), 3747 "max-ring-page-order", "%u", max_ring_page_order); 3748 if (error) { 3749 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order", 3750 xenbus_get_node(xbb->dev)); 3751 return (error); 3752 } 3753 3754 /* 3755 * We need to wait for hotplug script execution before 3756 * moving forward. 3757 */ 3758 watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path"); 3759 xbb->hotplug_watch.callback_data = (uintptr_t)dev; 3760 xbb->hotplug_watch.callback = xbb_attach_disk; 3761 KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup")); 3762 xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK); 3763 sbuf_delete(watch_path); 3764 error = xs_register_watch(&xbb->hotplug_watch); 3765 if (error != 0) { 3766 xbb_attach_failed(xbb, error, "failed to create watch on %s", 3767 xbb->hotplug_watch.node); 3768 free(xbb->hotplug_watch.node, M_XENBLOCKBACK); 3769 return (error); 3770 } 3771 3772 /* Tell the toolstack blkback has attached. */ 3773 xenbus_set_state(dev, XenbusStateInitWait); 3774 3775 return (0); 3776 } 3777 3778 /** 3779 * Detach from a block back device instance. 3780 * 3781 * \param dev NewBus device object representing this Xen Block Back instance. 3782 * 3783 * \return 0 for success, errno codes for failure. 3784 * 3785 * \note A block back device may be detached at any time in its life-cycle, 3786 * including part way through the attach process. For this reason, 3787 * initialization order and the initialization state checks in this 3788 * routine must be carefully coupled so that attach time failures 3789 * are gracefully handled. 3790 */ 3791 static int 3792 xbb_detach(device_t dev) 3793 { 3794 struct xbb_softc *xbb; 3795 3796 DPRINTF("\n"); 3797 3798 xbb = device_get_softc(dev); 3799 mtx_lock(&xbb->lock); 3800 while (xbb_shutdown(xbb) == EAGAIN) { 3801 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0, 3802 "xbb_shutdown", 0); 3803 } 3804 mtx_unlock(&xbb->lock); 3805 3806 DPRINTF("\n"); 3807 3808 if (xbb->io_taskqueue != NULL) 3809 taskqueue_free(xbb->io_taskqueue); 3810 3811 if (xbb->xbb_stats != NULL) 3812 devstat_remove_entry(xbb->xbb_stats); 3813 3814 if (xbb->xbb_stats_in != NULL) 3815 devstat_remove_entry(xbb->xbb_stats_in); 3816 3817 xbb_close_backend(xbb); 3818 3819 if (xbb->dev_mode != NULL) { 3820 free(xbb->dev_mode, M_XENSTORE); 3821 xbb->dev_mode = NULL; 3822 } 3823 3824 if (xbb->dev_type != NULL) { 3825 free(xbb->dev_type, M_XENSTORE); 3826 xbb->dev_type = NULL; 3827 } 3828 3829 if (xbb->dev_name != NULL) { 3830 free(xbb->dev_name, M_XENSTORE); 3831 xbb->dev_name = NULL; 3832 } 3833 3834 mtx_destroy(&xbb->lock); 3835 return (0); 3836 } 3837 3838 /** 3839 * Prepare this block back device for suspension of this VM. 3840 * 3841 * \param dev NewBus device object representing this Xen Block Back instance. 3842 * 3843 * \return 0 for success, errno codes for failure. 3844 */ 3845 static int 3846 xbb_suspend(device_t dev) 3847 { 3848 #ifdef NOT_YET 3849 struct xbb_softc *sc = device_get_softc(dev); 3850 3851 /* Prevent new requests being issued until we fix things up. */ 3852 mtx_lock(&sc->xb_io_lock); 3853 sc->connected = BLKIF_STATE_SUSPENDED; 3854 mtx_unlock(&sc->xb_io_lock); 3855 #endif 3856 3857 return (0); 3858 } 3859 3860 /** 3861 * Perform any processing required to recover from a suspended state. 3862 * 3863 * \param dev NewBus device object representing this Xen Block Back instance. 3864 * 3865 * \return 0 for success, errno codes for failure. 3866 */ 3867 static int 3868 xbb_resume(device_t dev) 3869 { 3870 return (0); 3871 } 3872 3873 /** 3874 * Handle state changes expressed via the XenStore by our front-end peer. 3875 * 3876 * \param dev NewBus device object representing this Xen 3877 * Block Back instance. 3878 * \param frontend_state The new state of the front-end. 3879 * 3880 * \return 0 for success, errno codes for failure. 3881 */ 3882 static void 3883 xbb_frontend_changed(device_t dev, XenbusState frontend_state) 3884 { 3885 struct xbb_softc *xbb = device_get_softc(dev); 3886 3887 DPRINTF("frontend_state=%s, xbb_state=%s\n", 3888 xenbus_strstate(frontend_state), 3889 xenbus_strstate(xenbus_get_state(xbb->dev))); 3890 3891 switch (frontend_state) { 3892 case XenbusStateInitialising: 3893 break; 3894 case XenbusStateInitialised: 3895 case XenbusStateConnected: 3896 xbb_connect(xbb); 3897 break; 3898 case XenbusStateClosing: 3899 case XenbusStateClosed: 3900 mtx_lock(&xbb->lock); 3901 xbb_shutdown(xbb); 3902 mtx_unlock(&xbb->lock); 3903 if (frontend_state == XenbusStateClosed) 3904 xenbus_set_state(xbb->dev, XenbusStateClosed); 3905 break; 3906 default: 3907 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend", 3908 frontend_state); 3909 break; 3910 } 3911 } 3912 3913 /*---------------------------- NewBus Registration ---------------------------*/ 3914 static device_method_t xbb_methods[] = { 3915 /* Device interface */ 3916 DEVMETHOD(device_probe, xbb_probe), 3917 DEVMETHOD(device_attach, xbb_attach), 3918 DEVMETHOD(device_detach, xbb_detach), 3919 DEVMETHOD(device_shutdown, bus_generic_shutdown), 3920 DEVMETHOD(device_suspend, xbb_suspend), 3921 DEVMETHOD(device_resume, xbb_resume), 3922 3923 /* Xenbus interface */ 3924 DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed), 3925 3926 { 0, 0 } 3927 }; 3928 3929 static driver_t xbb_driver = { 3930 "xbbd", 3931 xbb_methods, 3932 sizeof(struct xbb_softc), 3933 }; 3934 devclass_t xbb_devclass; 3935 3936 DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0); 3937