xref: /freebsd/sys/dev/xen/blkback/blkback.c (revision 119b75925c562202145d7bac7b676b98029c6cb9)
1 /*-
2  * Copyright (c) 2009-2012 Spectra Logic Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    substantially similar to the "NO WARRANTY" disclaimer below
13  *    ("Disclaimer") and any redistribution must be conditioned upon
14  *    including a substantially similar Disclaimer requirement for further
15  *    binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGES.
29  *
30  * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
31  *          Ken Merry           (Spectra Logic Corporation)
32  */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /**
37  * \file blkback.c
38  *
39  * \brief Device driver supporting the vending of block storage from
40  *        a FreeBSD domain to other domains.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 
48 #include <sys/bio.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/devicestat.h>
52 #include <sys/disk.h>
53 #include <sys/fcntl.h>
54 #include <sys/filedesc.h>
55 #include <sys/kdb.h>
56 #include <sys/module.h>
57 #include <sys/namei.h>
58 #include <sys/proc.h>
59 #include <sys/rman.h>
60 #include <sys/taskqueue.h>
61 #include <sys/types.h>
62 #include <sys/vnode.h>
63 #include <sys/mount.h>
64 #include <sys/sysctl.h>
65 #include <sys/bitstring.h>
66 #include <sys/sdt.h>
67 
68 #include <geom/geom.h>
69 
70 #include <machine/_inttypes.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_kern.h>
75 
76 #include <xen/xen-os.h>
77 #include <xen/blkif.h>
78 #include <xen/gnttab.h>
79 #include <xen/xen_intr.h>
80 
81 #include <xen/interface/event_channel.h>
82 #include <xen/interface/grant_table.h>
83 
84 #include <xen/xenbus/xenbusvar.h>
85 
86 /*--------------------------- Compile-time Tunables --------------------------*/
87 /**
88  * The maximum number of shared memory ring pages we will allow in a
89  * negotiated block-front/back communication channel.  Allow enough
90  * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd.
91  */
92 #define	XBB_MAX_RING_PAGES		32
93 
94 /**
95  * The maximum number of outstanding request blocks (request headers plus
96  * additional segment blocks) we will allow in a negotiated block-front/back
97  * communication channel.
98  */
99 #define	XBB_MAX_REQUESTS 					\
100 	__CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES)
101 
102 /**
103  * \brief Define to force all I/O to be performed on memory owned by the
104  *        backend device, with a copy-in/out to the remote domain's memory.
105  *
106  * \note  This option is currently required when this driver's domain is
107  *        operating in HVM mode on a system using an IOMMU.
108  *
109  * This driver uses Xen's grant table API to gain access to the memory of
110  * the remote domains it serves.  When our domain is operating in PV mode,
111  * the grant table mechanism directly updates our domain's page table entries
112  * to point to the physical pages of the remote domain.  This scheme guarantees
113  * that blkback and the backing devices it uses can safely perform DMA
114  * operations to satisfy requests.  In HVM mode, Xen may use a HW IOMMU to
115  * insure that our domain cannot DMA to pages owned by another domain.  As
116  * of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant
117  * table API.  For this reason, in HVM mode, we must bounce all requests into
118  * memory that is mapped into our domain at domain startup and thus has
119  * valid IOMMU mappings.
120  */
121 #define XBB_USE_BOUNCE_BUFFERS
122 
123 /**
124  * \brief Define to enable rudimentary request logging to the console.
125  */
126 #undef XBB_DEBUG
127 
128 /*---------------------------------- Macros ----------------------------------*/
129 /**
130  * Custom malloc type for all driver allocations.
131  */
132 static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
133 
134 #ifdef XBB_DEBUG
135 #define DPRINTF(fmt, args...)					\
136     printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
137 #else
138 #define DPRINTF(fmt, args...) do {} while(0)
139 #endif
140 
141 /**
142  * The maximum mapped region size per request we will allow in a negotiated
143  * block-front/back communication channel.
144  */
145 #define	XBB_MAX_REQUEST_SIZE					\
146 	MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE)
147 
148 /**
149  * The maximum number of segments (within a request header and accompanying
150  * segment blocks) per request we will allow in a negotiated block-front/back
151  * communication channel.
152  */
153 #define	XBB_MAX_SEGMENTS_PER_REQUEST				\
154 	(MIN(UIO_MAXIOV,					\
155 	     MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST,		\
156 		 (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1)))
157 
158 /**
159  * The maximum number of ring pages that we can allow per request list.
160  * We limit this to the maximum number of segments per request, because
161  * that is already a reasonable number of segments to aggregate.  This
162  * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST,
163  * because that would leave situations where we can't dispatch even one
164  * large request.
165  */
166 #define	XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST
167 
168 /*--------------------------- Forward Declarations ---------------------------*/
169 struct xbb_softc;
170 struct xbb_xen_req;
171 
172 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
173 			      ...) __attribute__((format(printf, 3, 4)));
174 static int  xbb_shutdown(struct xbb_softc *xbb);
175 static int  xbb_detach(device_t dev);
176 
177 /*------------------------------ Data Structures -----------------------------*/
178 
179 STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req);
180 
181 typedef enum {
182 	XBB_REQLIST_NONE	= 0x00,
183 	XBB_REQLIST_MAPPED	= 0x01
184 } xbb_reqlist_flags;
185 
186 struct xbb_xen_reqlist {
187 	/**
188 	 * Back reference to the parent block back instance for this
189 	 * request.  Used during bio_done handling.
190 	 */
191 	struct xbb_softc        *xbb;
192 
193 	/**
194 	 * BLKIF_OP code for this request.
195 	 */
196 	int			 operation;
197 
198 	/**
199 	 * Set to BLKIF_RSP_* to indicate request status.
200 	 *
201 	 * This field allows an error status to be recorded even if the
202 	 * delivery of this status must be deferred.  Deferred reporting
203 	 * is necessary, for example, when an error is detected during
204 	 * completion processing of one bio when other bios for this
205 	 * request are still outstanding.
206 	 */
207 	int			 status;
208 
209 	/**
210 	 * Number of 512 byte sectors not transferred.
211 	 */
212 	int			 residual_512b_sectors;
213 
214 	/**
215 	 * Starting sector number of the first request in the list.
216 	 */
217 	off_t			 starting_sector_number;
218 
219 	/**
220 	 * If we're going to coalesce, the next contiguous sector would be
221 	 * this one.
222 	 */
223 	off_t			 next_contig_sector;
224 
225 	/**
226 	 * Number of child requests in the list.
227 	 */
228 	int			 num_children;
229 
230 	/**
231 	 * Number of I/O requests still pending on the backend.
232 	 */
233 	int			 pendcnt;
234 
235 	/**
236 	 * Total number of segments for requests in the list.
237 	 */
238 	int			 nr_segments;
239 
240 	/**
241 	 * Flags for this particular request list.
242 	 */
243 	xbb_reqlist_flags	 flags;
244 
245 	/**
246 	 * Kernel virtual address space reserved for this request
247 	 * list structure and used to map the remote domain's pages for
248 	 * this I/O, into our domain's address space.
249 	 */
250 	uint8_t			*kva;
251 
252 	/**
253 	 * Base, psuedo-physical address, corresponding to the start
254 	 * of this request's kva region.
255 	 */
256 	uint64_t	 	 gnt_base;
257 
258 
259 #ifdef XBB_USE_BOUNCE_BUFFERS
260 	/**
261 	 * Pre-allocated domain local memory used to proxy remote
262 	 * domain memory during I/O operations.
263 	 */
264 	uint8_t			*bounce;
265 #endif
266 
267 	/**
268 	 * Array of grant handles (one per page) used to map this request.
269 	 */
270 	grant_handle_t		*gnt_handles;
271 
272 	/**
273 	 * Device statistics request ordering type (ordered or simple).
274 	 */
275 	devstat_tag_type	 ds_tag_type;
276 
277 	/**
278 	 * Device statistics request type (read, write, no_data).
279 	 */
280 	devstat_trans_flags	 ds_trans_type;
281 
282 	/**
283 	 * The start time for this request.
284 	 */
285 	struct bintime		 ds_t0;
286 
287 	/**
288 	 * Linked list of contiguous requests with the same operation type.
289 	 */
290 	struct xbb_xen_req_list	 contig_req_list;
291 
292 	/**
293 	 * Linked list links used to aggregate idle requests in the
294 	 * request list free pool (xbb->reqlist_free_stailq) and pending
295 	 * requests waiting for execution (xbb->reqlist_pending_stailq).
296 	 */
297 	STAILQ_ENTRY(xbb_xen_reqlist) links;
298 };
299 
300 STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist);
301 
302 /**
303  * \brief Object tracking an in-flight I/O from a Xen VBD consumer.
304  */
305 struct xbb_xen_req {
306 	/**
307 	 * Linked list links used to aggregate requests into a reqlist
308 	 * and to store them in the request free pool.
309 	 */
310 	STAILQ_ENTRY(xbb_xen_req) links;
311 
312 	/**
313 	 * The remote domain's identifier for this I/O request.
314 	 */
315 	uint64_t		  id;
316 
317 	/**
318 	 * The number of pages currently mapped for this request.
319 	 */
320 	int			  nr_pages;
321 
322 	/**
323 	 * The number of 512 byte sectors comprising this requests.
324 	 */
325 	int			  nr_512b_sectors;
326 
327 	/**
328 	 * BLKIF_OP code for this request.
329 	 */
330 	int			  operation;
331 
332 	/**
333 	 * Storage used for non-native ring requests.
334 	 */
335 	blkif_request_t		 ring_req_storage;
336 
337 	/**
338 	 * Pointer to the Xen request in the ring.
339 	 */
340 	blkif_request_t		*ring_req;
341 
342 	/**
343 	 * Consumer index for this request.
344 	 */
345 	RING_IDX		 req_ring_idx;
346 
347 	/**
348 	 * The start time for this request.
349 	 */
350 	struct bintime		 ds_t0;
351 
352 	/**
353 	 * Pointer back to our parent request list.
354 	 */
355 	struct xbb_xen_reqlist  *reqlist;
356 };
357 SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req);
358 
359 /**
360  * \brief Configuration data for the shared memory request ring
361  *        used to communicate with the front-end client of this
362  *        this driver.
363  */
364 struct xbb_ring_config {
365 	/** KVA address where ring memory is mapped. */
366 	vm_offset_t	va;
367 
368 	/** The pseudo-physical address where ring memory is mapped.*/
369 	uint64_t	gnt_addr;
370 
371 	/**
372 	 * Grant table handles, one per-ring page, returned by the
373 	 * hyperpervisor upon mapping of the ring and required to
374 	 * unmap it when a connection is torn down.
375 	 */
376 	grant_handle_t	handle[XBB_MAX_RING_PAGES];
377 
378 	/**
379 	 * The device bus address returned by the hypervisor when
380 	 * mapping the ring and required to unmap it when a connection
381 	 * is torn down.
382 	 */
383 	uint64_t	bus_addr[XBB_MAX_RING_PAGES];
384 
385 	/** The number of ring pages mapped for the current connection. */
386 	u_int		ring_pages;
387 
388 	/**
389 	 * The grant references, one per-ring page, supplied by the
390 	 * front-end, allowing us to reference the ring pages in the
391 	 * front-end's domain and to map these pages into our own domain.
392 	 */
393 	grant_ref_t	ring_ref[XBB_MAX_RING_PAGES];
394 
395 	/** The interrupt driven even channel used to signal ring events. */
396 	evtchn_port_t   evtchn;
397 };
398 
399 /**
400  * Per-instance connection state flags.
401  */
402 typedef enum
403 {
404 	/**
405 	 * The front-end requested a read-only mount of the
406 	 * back-end device/file.
407 	 */
408 	XBBF_READ_ONLY         = 0x01,
409 
410 	/** Communication with the front-end has been established. */
411 	XBBF_RING_CONNECTED    = 0x02,
412 
413 	/**
414 	 * Front-end requests exist in the ring and are waiting for
415 	 * xbb_xen_req objects to free up.
416 	 */
417 	XBBF_RESOURCE_SHORTAGE = 0x04,
418 
419 	/** Connection teardown in progress. */
420 	XBBF_SHUTDOWN          = 0x08,
421 
422 	/** A thread is already performing shutdown processing. */
423 	XBBF_IN_SHUTDOWN       = 0x10
424 } xbb_flag_t;
425 
426 /** Backend device type.  */
427 typedef enum {
428 	/** Backend type unknown. */
429 	XBB_TYPE_NONE		= 0x00,
430 
431 	/**
432 	 * Backend type disk (access via cdev switch
433 	 * strategy routine).
434 	 */
435 	XBB_TYPE_DISK		= 0x01,
436 
437 	/** Backend type file (access vnode operations.). */
438 	XBB_TYPE_FILE		= 0x02
439 } xbb_type;
440 
441 /**
442  * \brief Structure used to memoize information about a per-request
443  *        scatter-gather list.
444  *
445  * The chief benefit of using this data structure is it avoids having
446  * to reparse the possibly discontiguous S/G list in the original
447  * request.  Due to the way that the mapping of the memory backing an
448  * I/O transaction is handled by Xen, a second pass is unavoidable.
449  * At least this way the second walk is a simple array traversal.
450  *
451  * \note A single Scatter/Gather element in the block interface covers
452  *       at most 1 machine page.  In this context a sector (blkif
453  *       nomenclature, not what I'd choose) is a 512b aligned unit
454  *       of mapping within the machine page referenced by an S/G
455  *       element.
456  */
457 struct xbb_sg {
458 	/** The number of 512b data chunks mapped in this S/G element. */
459 	int16_t nsect;
460 
461 	/**
462 	 * The index (0 based) of the first 512b data chunk mapped
463 	 * in this S/G element.
464 	 */
465 	uint8_t first_sect;
466 
467 	/**
468 	 * The index (0 based) of the last 512b data chunk mapped
469 	 * in this S/G element.
470 	 */
471 	uint8_t last_sect;
472 };
473 
474 /**
475  * Character device backend specific configuration data.
476  */
477 struct xbb_dev_data {
478 	/** Cdev used for device backend access.  */
479 	struct cdev   *cdev;
480 
481 	/** Cdev switch used for device backend access.  */
482 	struct cdevsw *csw;
483 
484 	/** Used to hold a reference on opened cdev backend devices. */
485 	int	       dev_ref;
486 };
487 
488 /**
489  * File backend specific configuration data.
490  */
491 struct xbb_file_data {
492 	/** Credentials to use for vnode backed (file based) I/O. */
493 	struct ucred   *cred;
494 
495 	/**
496 	 * \brief Array of io vectors used to process file based I/O.
497 	 *
498 	 * Only a single file based request is outstanding per-xbb instance,
499 	 * so we only need one of these.
500 	 */
501 	struct iovec	xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
502 #ifdef XBB_USE_BOUNCE_BUFFERS
503 
504 	/**
505 	 * \brief Array of io vectors used to handle bouncing of file reads.
506 	 *
507 	 * Vnode operations are free to modify uio data during their
508 	 * exectuion.  In the case of a read with bounce buffering active,
509 	 * we need some of the data from the original uio in order to
510 	 * bounce-out the read data.  This array serves as the temporary
511 	 * storage for this saved data.
512 	 */
513 	struct iovec	saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
514 
515 	/**
516 	 * \brief Array of memoized bounce buffer kva offsets used
517 	 *        in the file based backend.
518 	 *
519 	 * Due to the way that the mapping of the memory backing an
520 	 * I/O transaction is handled by Xen, a second pass through
521 	 * the request sg elements is unavoidable. We memoize the computed
522 	 * bounce address here to reduce the cost of the second walk.
523 	 */
524 	void		*xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST];
525 #endif /* XBB_USE_BOUNCE_BUFFERS */
526 };
527 
528 /**
529  * Collection of backend type specific data.
530  */
531 union xbb_backend_data {
532 	struct xbb_dev_data  dev;
533 	struct xbb_file_data file;
534 };
535 
536 /**
537  * Function signature of backend specific I/O handlers.
538  */
539 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
540 			      struct xbb_xen_reqlist *reqlist, int operation,
541 			      int flags);
542 
543 /**
544  * Per-instance configuration data.
545  */
546 struct xbb_softc {
547 
548 	/**
549 	 * Task-queue used to process I/O requests.
550 	 */
551 	struct taskqueue	 *io_taskqueue;
552 
553 	/**
554 	 * Single "run the request queue" task enqueued
555 	 * on io_taskqueue.
556 	 */
557 	struct task		  io_task;
558 
559 	/** Device type for this instance. */
560 	xbb_type		  device_type;
561 
562 	/** NewBus device corresponding to this instance. */
563 	device_t		  dev;
564 
565 	/** Backend specific dispatch routine for this instance. */
566 	xbb_dispatch_t		  dispatch_io;
567 
568 	/** The number of requests outstanding on the backend device/file. */
569 	int			  active_request_count;
570 
571 	/** Free pool of request tracking structures. */
572 	struct xbb_xen_req_list   request_free_stailq;
573 
574 	/** Array, sized at connection time, of request tracking structures. */
575 	struct xbb_xen_req	 *requests;
576 
577 	/** Free pool of request list structures. */
578 	struct xbb_xen_reqlist_list reqlist_free_stailq;
579 
580 	/** List of pending request lists awaiting execution. */
581 	struct xbb_xen_reqlist_list reqlist_pending_stailq;
582 
583 	/** Array, sized at connection time, of request list structures. */
584 	struct xbb_xen_reqlist	 *request_lists;
585 
586 	/**
587 	 * Global pool of kva used for mapping remote domain ring
588 	 * and I/O transaction data.
589 	 */
590 	vm_offset_t		  kva;
591 
592 	/** Psuedo-physical address corresponding to kva. */
593 	uint64_t		  gnt_base_addr;
594 
595 	/** The size of the global kva pool. */
596 	int			  kva_size;
597 
598 	/** The size of the KVA area used for request lists. */
599 	int			  reqlist_kva_size;
600 
601 	/** The number of pages of KVA used for request lists */
602 	int			  reqlist_kva_pages;
603 
604 	/** Bitmap of free KVA pages */
605 	bitstr_t		 *kva_free;
606 
607 	/**
608 	 * \brief Cached value of the front-end's domain id.
609 	 *
610 	 * This value is used at once for each mapped page in
611 	 * a transaction.  We cache it to avoid incuring the
612 	 * cost of an ivar access every time this is needed.
613 	 */
614 	domid_t			  otherend_id;
615 
616 	/**
617 	 * \brief The blkif protocol abi in effect.
618 	 *
619 	 * There are situations where the back and front ends can
620 	 * have a different, native abi (e.g. intel x86_64 and
621 	 * 32bit x86 domains on the same machine).  The back-end
622 	 * always accomodates the front-end's native abi.  That
623 	 * value is pulled from the XenStore and recorded here.
624 	 */
625 	int			  abi;
626 
627 	/**
628 	 * \brief The maximum number of requests and request lists allowed
629 	 *        to be in flight at a time.
630 	 *
631 	 * This value is negotiated via the XenStore.
632 	 */
633 	u_int			  max_requests;
634 
635 	/**
636 	 * \brief The maximum number of segments (1 page per segment)
637 	 *	  that can be mapped by a request.
638 	 *
639 	 * This value is negotiated via the XenStore.
640 	 */
641 	u_int			  max_request_segments;
642 
643 	/**
644 	 * \brief Maximum number of segments per request list.
645 	 *
646 	 * This value is derived from and will generally be larger than
647 	 * max_request_segments.
648 	 */
649 	u_int			  max_reqlist_segments;
650 
651 	/**
652 	 * The maximum size of any request to this back-end
653 	 * device.
654 	 *
655 	 * This value is negotiated via the XenStore.
656 	 */
657 	u_int			  max_request_size;
658 
659 	/**
660 	 * The maximum size of any request list.  This is derived directly
661 	 * from max_reqlist_segments.
662 	 */
663 	u_int			  max_reqlist_size;
664 
665 	/** Various configuration and state bit flags. */
666 	xbb_flag_t		  flags;
667 
668 	/** Ring mapping and interrupt configuration data. */
669 	struct xbb_ring_config	  ring_config;
670 
671 	/** Runtime, cross-abi safe, structures for ring access. */
672 	blkif_back_rings_t	  rings;
673 
674 	/** IRQ mapping for the communication ring event channel. */
675 	xen_intr_handle_t	  xen_intr_handle;
676 
677 	/**
678 	 * \brief Backend access mode flags (e.g. write, or read-only).
679 	 *
680 	 * This value is passed to us by the front-end via the XenStore.
681 	 */
682 	char			 *dev_mode;
683 
684 	/**
685 	 * \brief Backend device type (e.g. "disk", "cdrom", "floppy").
686 	 *
687 	 * This value is passed to us by the front-end via the XenStore.
688 	 * Currently unused.
689 	 */
690 	char			 *dev_type;
691 
692 	/**
693 	 * \brief Backend device/file identifier.
694 	 *
695 	 * This value is passed to us by the front-end via the XenStore.
696 	 * We expect this to be a POSIX path indicating the file or
697 	 * device to open.
698 	 */
699 	char			 *dev_name;
700 
701 	/**
702 	 * Vnode corresponding to the backend device node or file
703 	 * we are acessing.
704 	 */
705 	struct vnode		 *vn;
706 
707 	union xbb_backend_data	  backend;
708 
709 	/** The native sector size of the backend. */
710 	u_int			  sector_size;
711 
712 	/** log2 of sector_size.  */
713 	u_int			  sector_size_shift;
714 
715 	/** Size in bytes of the backend device or file.  */
716 	off_t			  media_size;
717 
718 	/**
719 	 * \brief media_size expressed in terms of the backend native
720 	 *	  sector size.
721 	 *
722 	 * (e.g. xbb->media_size >> xbb->sector_size_shift).
723 	 */
724 	uint64_t		  media_num_sectors;
725 
726 	/**
727 	 * \brief Array of memoized scatter gather data computed during the
728 	 *	  conversion of blkif ring requests to internal xbb_xen_req
729 	 *	  structures.
730 	 *
731 	 * Ring processing is serialized so we only need one of these.
732 	 */
733 	struct xbb_sg		  xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST];
734 
735 	/**
736 	 * Temporary grant table map used in xbb_dispatch_io().  When
737 	 * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the
738 	 * stack could cause a stack overflow.
739 	 */
740 	struct gnttab_map_grant_ref   maps[XBB_MAX_SEGMENTS_PER_REQLIST];
741 
742 	/** Mutex protecting per-instance data. */
743 	struct mtx		  lock;
744 
745 	/**
746 	 * Resource representing allocated physical address space
747 	 * associated with our per-instance kva region.
748 	 */
749 	struct resource		 *pseudo_phys_res;
750 
751 	/** Resource id for allocated physical address space. */
752 	int			  pseudo_phys_res_id;
753 
754 	/**
755 	 * I/O statistics from BlockBack dispatch down.  These are
756 	 * coalesced requests, and we start them right before execution.
757 	 */
758 	struct devstat		 *xbb_stats;
759 
760 	/**
761 	 * I/O statistics coming into BlockBack.  These are the requests as
762 	 * we get them from BlockFront.  They are started as soon as we
763 	 * receive a request, and completed when the I/O is complete.
764 	 */
765 	struct devstat		 *xbb_stats_in;
766 
767 	/** Disable sending flush to the backend */
768 	int			  disable_flush;
769 
770 	/** Send a real flush for every N flush requests */
771 	int			  flush_interval;
772 
773 	/** Count of flush requests in the interval */
774 	int			  flush_count;
775 
776 	/** Don't coalesce requests if this is set */
777 	int			  no_coalesce_reqs;
778 
779 	/** Number of requests we have received */
780 	uint64_t		  reqs_received;
781 
782 	/** Number of requests we have completed*/
783 	uint64_t		  reqs_completed;
784 
785 	/** Number of requests we queued but not pushed*/
786 	uint64_t		  reqs_queued_for_completion;
787 
788 	/** Number of requests we completed with an error status*/
789 	uint64_t		  reqs_completed_with_error;
790 
791 	/** How many forced dispatches (i.e. without coalescing) have happend */
792 	uint64_t		  forced_dispatch;
793 
794 	/** How many normal dispatches have happend */
795 	uint64_t		  normal_dispatch;
796 
797 	/** How many total dispatches have happend */
798 	uint64_t		  total_dispatch;
799 
800 	/** How many times we have run out of KVA */
801 	uint64_t		  kva_shortages;
802 
803 	/** How many times we have run out of request structures */
804 	uint64_t		  request_shortages;
805 };
806 
807 /*---------------------------- Request Processing ----------------------------*/
808 /**
809  * Allocate an internal transaction tracking structure from the free pool.
810  *
811  * \param xbb  Per-instance xbb configuration structure.
812  *
813  * \return  On success, a pointer to the allocated xbb_xen_req structure.
814  *          Otherwise NULL.
815  */
816 static inline struct xbb_xen_req *
817 xbb_get_req(struct xbb_softc *xbb)
818 {
819 	struct xbb_xen_req *req;
820 
821 	req = NULL;
822 
823 	mtx_assert(&xbb->lock, MA_OWNED);
824 
825 	if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
826 		STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
827 		xbb->active_request_count++;
828 	}
829 
830 	return (req);
831 }
832 
833 /**
834  * Return an allocated transaction tracking structure to the free pool.
835  *
836  * \param xbb  Per-instance xbb configuration structure.
837  * \param req  The request structure to free.
838  */
839 static inline void
840 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
841 {
842 	mtx_assert(&xbb->lock, MA_OWNED);
843 
844 	STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
845 	xbb->active_request_count--;
846 
847 	KASSERT(xbb->active_request_count >= 0,
848 		("xbb_release_req: negative active count"));
849 }
850 
851 /**
852  * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool.
853  *
854  * \param xbb	    Per-instance xbb configuration structure.
855  * \param req_list  The list of requests to free.
856  * \param nreqs	    The number of items in the list.
857  */
858 static inline void
859 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
860 		 int nreqs)
861 {
862 	mtx_assert(&xbb->lock, MA_OWNED);
863 
864 	STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
865 	xbb->active_request_count -= nreqs;
866 
867 	KASSERT(xbb->active_request_count >= 0,
868 		("xbb_release_reqs: negative active count"));
869 }
870 
871 /**
872  * Given a page index and 512b sector offset within that page,
873  * calculate an offset into a request's kva region.
874  *
875  * \param reqlist The request structure whose kva region will be accessed.
876  * \param pagenr  The page index used to compute the kva offset.
877  * \param sector  The 512b sector index used to compute the page relative
878  *                kva offset.
879  *
880  * \return  The computed global KVA offset.
881  */
882 static inline uint8_t *
883 xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
884 {
885 	return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
886 }
887 
888 #ifdef XBB_USE_BOUNCE_BUFFERS
889 /**
890  * Given a page index and 512b sector offset within that page,
891  * calculate an offset into a request's local bounce memory region.
892  *
893  * \param reqlist The request structure whose bounce region will be accessed.
894  * \param pagenr  The page index used to compute the bounce offset.
895  * \param sector  The 512b sector index used to compute the page relative
896  *                bounce offset.
897  *
898  * \return  The computed global bounce buffer address.
899  */
900 static inline uint8_t *
901 xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
902 {
903 	return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
904 }
905 #endif
906 
907 /**
908  * Given a page number and 512b sector offset within that page,
909  * calculate an offset into the request's memory region that the
910  * underlying backend device/file should use for I/O.
911  *
912  * \param reqlist The request structure whose I/O region will be accessed.
913  * \param pagenr  The page index used to compute the I/O offset.
914  * \param sector  The 512b sector index used to compute the page relative
915  *                I/O offset.
916  *
917  * \return  The computed global I/O address.
918  *
919  * Depending on configuration, this will either be a local bounce buffer
920  * or a pointer to the memory mapped in from the front-end domain for
921  * this request.
922  */
923 static inline uint8_t *
924 xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
925 {
926 #ifdef XBB_USE_BOUNCE_BUFFERS
927 	return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector));
928 #else
929 	return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
930 #endif
931 }
932 
933 /**
934  * Given a page index and 512b sector offset within that page, calculate
935  * an offset into the local psuedo-physical address space used to map a
936  * front-end's request data into a request.
937  *
938  * \param reqlist The request list structure whose pseudo-physical region
939  *                will be accessed.
940  * \param pagenr  The page index used to compute the pseudo-physical offset.
941  * \param sector  The 512b sector index used to compute the page relative
942  *                pseudo-physical offset.
943  *
944  * \return  The computed global pseudo-phsyical address.
945  *
946  * Depending on configuration, this will either be a local bounce buffer
947  * or a pointer to the memory mapped in from the front-end domain for
948  * this request.
949  */
950 static inline uintptr_t
951 xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
952 {
953 	struct xbb_softc *xbb;
954 
955 	xbb = reqlist->xbb;
956 
957 	return ((uintptr_t)(xbb->gnt_base_addr +
958 		(uintptr_t)(reqlist->kva - xbb->kva) +
959 		(PAGE_SIZE * pagenr) + (sector << 9)));
960 }
961 
962 /**
963  * Get Kernel Virtual Address space for mapping requests.
964  *
965  * \param xbb         Per-instance xbb configuration structure.
966  * \param nr_pages    Number of pages needed.
967  * \param check_only  If set, check for free KVA but don't allocate it.
968  * \param have_lock   If set, xbb lock is already held.
969  *
970  * \return  On success, a pointer to the allocated KVA region.  Otherwise NULL.
971  *
972  * Note:  This should be unnecessary once we have either chaining or
973  * scatter/gather support for struct bio.  At that point we'll be able to
974  * put multiple addresses and lengths in one bio/bio chain and won't need
975  * to map everything into one virtual segment.
976  */
977 static uint8_t *
978 xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
979 {
980 	intptr_t first_clear;
981 	intptr_t num_clear;
982 	uint8_t *free_kva;
983 	int      i;
984 
985 	KASSERT(nr_pages != 0, ("xbb_get_kva of zero length"));
986 
987 	first_clear = 0;
988 	free_kva = NULL;
989 
990 	mtx_lock(&xbb->lock);
991 
992 	/*
993 	 * Look for the first available page.  If there are none, we're done.
994 	 */
995 	bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
996 
997 	if (first_clear == -1)
998 		goto bailout;
999 
1000 	/*
1001 	 * Starting at the first available page, look for consecutive free
1002 	 * pages that will satisfy the user's request.
1003 	 */
1004 	for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
1005 		/*
1006 		 * If this is true, the page is used, so we have to reset
1007 		 * the number of clear pages and the first clear page
1008 		 * (since it pointed to a region with an insufficient number
1009 		 * of clear pages).
1010 		 */
1011 		if (bit_test(xbb->kva_free, i)) {
1012 			num_clear = 0;
1013 			first_clear = -1;
1014 			continue;
1015 		}
1016 
1017 		if (first_clear == -1)
1018 			first_clear = i;
1019 
1020 		/*
1021 		 * If this is true, we've found a large enough free region
1022 		 * to satisfy the request.
1023 		 */
1024 		if (++num_clear == nr_pages) {
1025 
1026 			bit_nset(xbb->kva_free, first_clear,
1027 				 first_clear + nr_pages - 1);
1028 
1029 			free_kva = xbb->kva +
1030 				(uint8_t *)(first_clear * PAGE_SIZE);
1031 
1032 			KASSERT(free_kva >= (uint8_t *)xbb->kva &&
1033 				free_kva + (nr_pages * PAGE_SIZE) <=
1034 				(uint8_t *)xbb->ring_config.va,
1035 				("Free KVA %p len %d out of range, "
1036 				 "kva = %#jx, ring VA = %#jx\n", free_kva,
1037 				 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
1038 				 (uintmax_t)xbb->ring_config.va));
1039 			break;
1040 		}
1041 	}
1042 
1043 bailout:
1044 
1045 	if (free_kva == NULL) {
1046 		xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1047 		xbb->kva_shortages++;
1048 	}
1049 
1050 	mtx_unlock(&xbb->lock);
1051 
1052 	return (free_kva);
1053 }
1054 
1055 /**
1056  * Free allocated KVA.
1057  *
1058  * \param xbb	    Per-instance xbb configuration structure.
1059  * \param kva_ptr   Pointer to allocated KVA region.
1060  * \param nr_pages  Number of pages in the KVA region.
1061  */
1062 static void
1063 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
1064 {
1065 	intptr_t start_page;
1066 
1067 	mtx_assert(&xbb->lock, MA_OWNED);
1068 
1069 	start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
1070 	bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
1071 
1072 }
1073 
1074 /**
1075  * Unmap the front-end pages associated with this I/O request.
1076  *
1077  * \param req  The request structure to unmap.
1078  */
1079 static void
1080 xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
1081 {
1082 	struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST];
1083 	u_int			      i;
1084 	u_int			      invcount;
1085 	int			      error;
1086 
1087 	invcount = 0;
1088 	for (i = 0; i < reqlist->nr_segments; i++) {
1089 
1090 		if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
1091 			continue;
1092 
1093 		unmap[invcount].host_addr    = xbb_get_gntaddr(reqlist, i, 0);
1094 		unmap[invcount].dev_bus_addr = 0;
1095 		unmap[invcount].handle       = reqlist->gnt_handles[i];
1096 		reqlist->gnt_handles[i]	     = GRANT_REF_INVALID;
1097 		invcount++;
1098 	}
1099 
1100 	error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
1101 					  unmap, invcount);
1102 	KASSERT(error == 0, ("Grant table operation failed"));
1103 }
1104 
1105 /**
1106  * Allocate an internal transaction tracking structure from the free pool.
1107  *
1108  * \param xbb  Per-instance xbb configuration structure.
1109  *
1110  * \return  On success, a pointer to the allocated xbb_xen_reqlist structure.
1111  *          Otherwise NULL.
1112  */
1113 static inline struct xbb_xen_reqlist *
1114 xbb_get_reqlist(struct xbb_softc *xbb)
1115 {
1116 	struct xbb_xen_reqlist *reqlist;
1117 
1118 	reqlist = NULL;
1119 
1120 	mtx_assert(&xbb->lock, MA_OWNED);
1121 
1122 	if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1123 
1124 		STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
1125 		reqlist->flags = XBB_REQLIST_NONE;
1126 		reqlist->kva = NULL;
1127 		reqlist->status = BLKIF_RSP_OKAY;
1128 		reqlist->residual_512b_sectors = 0;
1129 		reqlist->num_children = 0;
1130 		reqlist->nr_segments = 0;
1131 		STAILQ_INIT(&reqlist->contig_req_list);
1132 	}
1133 
1134 	return (reqlist);
1135 }
1136 
1137 /**
1138  * Return an allocated transaction tracking structure to the free pool.
1139  *
1140  * \param xbb        Per-instance xbb configuration structure.
1141  * \param req        The request list structure to free.
1142  * \param wakeup     If set, wakeup the work thread if freeing this reqlist
1143  *                   during a resource shortage condition.
1144  */
1145 static inline void
1146 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1147 		    int wakeup)
1148 {
1149 
1150 	mtx_assert(&xbb->lock, MA_OWNED);
1151 
1152 	if (wakeup) {
1153 		wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
1154 		xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
1155 	}
1156 
1157 	if (reqlist->kva != NULL)
1158 		xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1159 
1160 	xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1161 
1162 	STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1163 
1164 	if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1165 		/*
1166 		 * Shutdown is in progress.  See if we can
1167 		 * progress further now that one more request
1168 		 * has completed and been returned to the
1169 		 * free pool.
1170 		 */
1171 		xbb_shutdown(xbb);
1172 	}
1173 
1174 	if (wakeup != 0)
1175 		taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1176 }
1177 
1178 /**
1179  * Request resources and do basic request setup.
1180  *
1181  * \param xbb          Per-instance xbb configuration structure.
1182  * \param reqlist      Pointer to reqlist pointer.
1183  * \param ring_req     Pointer to a block ring request.
1184  * \param ring_index   The ring index of this request.
1185  *
1186  * \return  0 for success, non-zero for failure.
1187  */
1188 static int
1189 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1190 		  blkif_request_t *ring_req, RING_IDX ring_idx)
1191 {
1192 	struct xbb_xen_reqlist *nreqlist;
1193 	struct xbb_xen_req     *nreq;
1194 
1195 	nreqlist = NULL;
1196 	nreq     = NULL;
1197 
1198 	mtx_lock(&xbb->lock);
1199 
1200 	/*
1201 	 * We don't allow new resources to be allocated if we're in the
1202 	 * process of shutting down.
1203 	 */
1204 	if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1205 		mtx_unlock(&xbb->lock);
1206 		return (1);
1207 	}
1208 
1209 	/*
1210 	 * Allocate a reqlist if the caller doesn't have one already.
1211 	 */
1212 	if (*reqlist == NULL) {
1213 		nreqlist = xbb_get_reqlist(xbb);
1214 		if (nreqlist == NULL)
1215 			goto bailout_error;
1216 	}
1217 
1218 	/* We always allocate a request. */
1219 	nreq = xbb_get_req(xbb);
1220 	if (nreq == NULL)
1221 		goto bailout_error;
1222 
1223 	mtx_unlock(&xbb->lock);
1224 
1225 	if (*reqlist == NULL) {
1226 		*reqlist = nreqlist;
1227 		nreqlist->operation = ring_req->operation;
1228 		nreqlist->starting_sector_number = ring_req->sector_number;
1229 		STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
1230 				   links);
1231 	}
1232 
1233 	nreq->reqlist = *reqlist;
1234 	nreq->req_ring_idx = ring_idx;
1235 	nreq->id = ring_req->id;
1236 	nreq->operation = ring_req->operation;
1237 
1238 	if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
1239 		bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req));
1240 		nreq->ring_req = &nreq->ring_req_storage;
1241 	} else {
1242 		nreq->ring_req = ring_req;
1243 	}
1244 
1245 	binuptime(&nreq->ds_t0);
1246 	devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
1247 	STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
1248 	(*reqlist)->num_children++;
1249 	(*reqlist)->nr_segments += ring_req->nr_segments;
1250 
1251 	return (0);
1252 
1253 bailout_error:
1254 
1255 	/*
1256 	 * We're out of resources, so set the shortage flag.  The next time
1257 	 * a request is released, we'll try waking up the work thread to
1258 	 * see if we can allocate more resources.
1259 	 */
1260 	xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1261 	xbb->request_shortages++;
1262 
1263 	if (nreq != NULL)
1264 		xbb_release_req(xbb, nreq);
1265 
1266 	if (nreqlist != NULL)
1267 		xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
1268 
1269 	mtx_unlock(&xbb->lock);
1270 
1271 	return (1);
1272 }
1273 
1274 /**
1275  * Create and queue a response to a blkif request.
1276  *
1277  * \param xbb     Per-instance xbb configuration structure.
1278  * \param req     The request structure to which to respond.
1279  * \param status  The status code to report.  See BLKIF_RSP_*
1280  *                in sys/xen/interface/io/blkif.h.
1281  */
1282 static void
1283 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
1284 {
1285 	blkif_response_t *resp;
1286 
1287 	/*
1288 	 * The mutex is required here, and should be held across this call
1289 	 * until after the subsequent call to xbb_push_responses().  This
1290 	 * is to guarantee that another context won't queue responses and
1291 	 * push them while we're active.
1292 	 *
1293 	 * That could lead to the other end being notified of responses
1294 	 * before the resources have been freed on this end.  The other end
1295 	 * would then be able to queue additional I/O, and we may run out
1296  	 * of resources because we haven't freed them all yet.
1297 	 */
1298 	mtx_assert(&xbb->lock, MA_OWNED);
1299 
1300 	/*
1301 	 * Place on the response ring for the relevant domain.
1302 	 * For now, only the spacing between entries is different
1303 	 * in the different ABIs, not the response entry layout.
1304 	 */
1305 	switch (xbb->abi) {
1306 	case BLKIF_PROTOCOL_NATIVE:
1307 		resp = RING_GET_RESPONSE(&xbb->rings.native,
1308 					 xbb->rings.native.rsp_prod_pvt);
1309 		break;
1310 	case BLKIF_PROTOCOL_X86_32:
1311 		resp = (blkif_response_t *)
1312 		    RING_GET_RESPONSE(&xbb->rings.x86_32,
1313 				      xbb->rings.x86_32.rsp_prod_pvt);
1314 		break;
1315 	case BLKIF_PROTOCOL_X86_64:
1316 		resp = (blkif_response_t *)
1317 		    RING_GET_RESPONSE(&xbb->rings.x86_64,
1318 				      xbb->rings.x86_64.rsp_prod_pvt);
1319 		break;
1320 	default:
1321 		panic("Unexpected blkif protocol ABI.");
1322 	}
1323 
1324 	resp->id        = req->id;
1325 	resp->operation = req->operation;
1326 	resp->status    = status;
1327 
1328 	if (status != BLKIF_RSP_OKAY)
1329 		xbb->reqs_completed_with_error++;
1330 
1331 	xbb->rings.common.rsp_prod_pvt++;
1332 
1333 	xbb->reqs_queued_for_completion++;
1334 
1335 }
1336 
1337 /**
1338  * Send queued responses to blkif requests.
1339  *
1340  * \param xbb            Per-instance xbb configuration structure.
1341  * \param run_taskqueue  Flag that is set to 1 if the taskqueue
1342  *			 should be run, 0 if it does not need to be run.
1343  * \param notify	 Flag that is set to 1 if the other end should be
1344  * 			 notified via irq, 0 if the other end should not be
1345  *			 notified.
1346  */
1347 static void
1348 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
1349 {
1350 	int more_to_do;
1351 
1352 	/*
1353 	 * The mutex is required here.
1354 	 */
1355 	mtx_assert(&xbb->lock, MA_OWNED);
1356 
1357 	more_to_do = 0;
1358 
1359 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
1360 
1361 	if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
1362 
1363 		/*
1364 		 * Tail check for pending requests. Allows frontend to avoid
1365 		 * notifications if requests are already in flight (lower
1366 		 * overheads and promotes batching).
1367 		 */
1368 		RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
1369 	} else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
1370 
1371 		more_to_do = 1;
1372 	}
1373 
1374 	xbb->reqs_completed += xbb->reqs_queued_for_completion;
1375 	xbb->reqs_queued_for_completion = 0;
1376 
1377 	*run_taskqueue = more_to_do;
1378 }
1379 
1380 /**
1381  * Complete a request list.
1382  *
1383  * \param xbb        Per-instance xbb configuration structure.
1384  * \param reqlist    Allocated internal request list structure.
1385  */
1386 static void
1387 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1388 {
1389 	struct xbb_xen_req *nreq;
1390 	off_t		    sectors_sent;
1391 	int		    notify, run_taskqueue;
1392 
1393 	sectors_sent = 0;
1394 
1395 	if (reqlist->flags & XBB_REQLIST_MAPPED)
1396 		xbb_unmap_reqlist(reqlist);
1397 
1398 	mtx_lock(&xbb->lock);
1399 
1400 	/*
1401 	 * All I/O is done, send the response. A lock is not necessary
1402 	 * to protect the request list, because all requests have
1403 	 * completed.  Therefore this is the only context accessing this
1404 	 * reqlist right now.  However, in order to make sure that no one
1405 	 * else queues responses onto the queue or pushes them to the other
1406 	 * side while we're active, we need to hold the lock across the
1407 	 * calls to xbb_queue_response() and xbb_push_responses().
1408 	 */
1409 	STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1410 		off_t cur_sectors_sent;
1411 
1412 		/* Put this response on the ring, but don't push yet */
1413 		xbb_queue_response(xbb, nreq, reqlist->status);
1414 
1415 		/* We don't report bytes sent if there is an error. */
1416 		if (reqlist->status == BLKIF_RSP_OKAY)
1417 			cur_sectors_sent = nreq->nr_512b_sectors;
1418 		else
1419 			cur_sectors_sent = 0;
1420 
1421 		sectors_sent += cur_sectors_sent;
1422 
1423 		devstat_end_transaction(xbb->xbb_stats_in,
1424 					/*bytes*/cur_sectors_sent << 9,
1425 					reqlist->ds_tag_type,
1426 					reqlist->ds_trans_type,
1427 					/*now*/NULL,
1428 					/*then*/&nreq->ds_t0);
1429 	}
1430 
1431 	/*
1432 	 * Take out any sectors not sent.  If we wind up negative (which
1433 	 * might happen if an error is reported as well as a residual), just
1434 	 * report 0 sectors sent.
1435 	 */
1436 	sectors_sent -= reqlist->residual_512b_sectors;
1437 	if (sectors_sent < 0)
1438 		sectors_sent = 0;
1439 
1440 	devstat_end_transaction(xbb->xbb_stats,
1441 				/*bytes*/ sectors_sent << 9,
1442 				reqlist->ds_tag_type,
1443 				reqlist->ds_trans_type,
1444 				/*now*/NULL,
1445 				/*then*/&reqlist->ds_t0);
1446 
1447 	xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1448 
1449 	xbb_push_responses(xbb, &run_taskqueue, &notify);
1450 
1451 	mtx_unlock(&xbb->lock);
1452 
1453 	if (run_taskqueue)
1454 		taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1455 
1456 	if (notify)
1457 		xen_intr_signal(xbb->xen_intr_handle);
1458 }
1459 
1460 /**
1461  * Completion handler for buffer I/O requests issued by the device
1462  * backend driver.
1463  *
1464  * \param bio  The buffer I/O request on which to perform completion
1465  *             processing.
1466  */
1467 static void
1468 xbb_bio_done(struct bio *bio)
1469 {
1470 	struct xbb_softc       *xbb;
1471 	struct xbb_xen_reqlist *reqlist;
1472 
1473 	reqlist = bio->bio_caller1;
1474 	xbb     = reqlist->xbb;
1475 
1476 	reqlist->residual_512b_sectors += bio->bio_resid >> 9;
1477 
1478 	/*
1479 	 * This is a bit imprecise.  With aggregated I/O a single
1480 	 * request list can contain multiple front-end requests and
1481 	 * a multiple bios may point to a single request.  By carefully
1482 	 * walking the request list, we could map residuals and errors
1483 	 * back to the original front-end request, but the interface
1484 	 * isn't sufficiently rich for us to properly report the error.
1485 	 * So, we just treat the entire request list as having failed if an
1486 	 * error occurs on any part.  And, if an error occurs, we treat
1487 	 * the amount of data transferred as 0.
1488 	 *
1489 	 * For residuals, we report it on the overall aggregated device,
1490 	 * but not on the individual requests, since we don't currently
1491 	 * do the work to determine which front-end request to which the
1492 	 * residual applies.
1493 	 */
1494 	if (bio->bio_error) {
1495 		DPRINTF("BIO returned error %d for operation on device %s\n",
1496 			bio->bio_error, xbb->dev_name);
1497 		reqlist->status = BLKIF_RSP_ERROR;
1498 
1499 		if (bio->bio_error == ENXIO
1500 		 && xenbus_get_state(xbb->dev) == XenbusStateConnected) {
1501 
1502 			/*
1503 			 * Backend device has disappeared.  Signal the
1504 			 * front-end that we (the device proxy) want to
1505 			 * go away.
1506 			 */
1507 			xenbus_set_state(xbb->dev, XenbusStateClosing);
1508 		}
1509 	}
1510 
1511 #ifdef XBB_USE_BOUNCE_BUFFERS
1512 	if (bio->bio_cmd == BIO_READ) {
1513 		vm_offset_t kva_offset;
1514 
1515 		kva_offset = (vm_offset_t)bio->bio_data
1516 			   - (vm_offset_t)reqlist->bounce;
1517 		memcpy((uint8_t *)reqlist->kva + kva_offset,
1518 		       bio->bio_data, bio->bio_bcount);
1519 	}
1520 #endif /* XBB_USE_BOUNCE_BUFFERS */
1521 
1522 	/*
1523 	 * Decrement the pending count for the request list.  When we're
1524 	 * done with the requests, send status back for all of them.
1525 	 */
1526 	if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
1527 		xbb_complete_reqlist(xbb, reqlist);
1528 
1529 	g_destroy_bio(bio);
1530 }
1531 
1532 /**
1533  * Parse a blkif request into an internal request structure and send
1534  * it to the backend for processing.
1535  *
1536  * \param xbb       Per-instance xbb configuration structure.
1537  * \param reqlist   Allocated internal request list structure.
1538  *
1539  * \return          On success, 0.  For resource shortages, non-zero.
1540  *
1541  * This routine performs the backend common aspects of request parsing
1542  * including compiling an internal request structure, parsing the S/G
1543  * list and any secondary ring requests in which they may reside, and
1544  * the mapping of front-end I/O pages into our domain.
1545  */
1546 static int
1547 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1548 {
1549 	struct xbb_sg                *xbb_sg;
1550 	struct gnttab_map_grant_ref  *map;
1551 	struct blkif_request_segment *sg;
1552 	struct blkif_request_segment *last_block_sg;
1553 	struct xbb_xen_req	     *nreq;
1554 	u_int			      nseg;
1555 	u_int			      seg_idx;
1556 	u_int			      block_segs;
1557 	int			      nr_sects;
1558 	int			      total_sects;
1559 	int			      operation;
1560 	uint8_t			      bio_flags;
1561 	int			      error;
1562 
1563 	reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1564 	bio_flags            = 0;
1565 	total_sects	     = 0;
1566 	nr_sects	     = 0;
1567 
1568 	/*
1569 	 * First determine whether we have enough free KVA to satisfy this
1570 	 * request list.  If not, tell xbb_run_queue() so it can go to
1571 	 * sleep until we have more KVA.
1572 	 */
1573 	reqlist->kva = NULL;
1574 	if (reqlist->nr_segments != 0) {
1575 		reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1576 		if (reqlist->kva == NULL) {
1577 			/*
1578 			 * If we're out of KVA, return ENOMEM.
1579 			 */
1580 			return (ENOMEM);
1581 		}
1582 	}
1583 
1584 	binuptime(&reqlist->ds_t0);
1585 	devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1586 
1587 	switch (reqlist->operation) {
1588 	case BLKIF_OP_WRITE_BARRIER:
1589 		bio_flags       |= BIO_ORDERED;
1590 		reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1591 		/* FALLTHROUGH */
1592 	case BLKIF_OP_WRITE:
1593 		operation = BIO_WRITE;
1594 		reqlist->ds_trans_type = DEVSTAT_WRITE;
1595 		if ((xbb->flags & XBBF_READ_ONLY) != 0) {
1596 			DPRINTF("Attempt to write to read only device %s\n",
1597 				xbb->dev_name);
1598 			reqlist->status = BLKIF_RSP_ERROR;
1599 			goto send_response;
1600 		}
1601 		break;
1602 	case BLKIF_OP_READ:
1603 		operation = BIO_READ;
1604 		reqlist->ds_trans_type = DEVSTAT_READ;
1605 		break;
1606 	case BLKIF_OP_FLUSH_DISKCACHE:
1607 		/*
1608 		 * If this is true, the user has requested that we disable
1609 		 * flush support.  So we just complete the requests
1610 		 * successfully.
1611 		 */
1612 		if (xbb->disable_flush != 0) {
1613 			goto send_response;
1614 		}
1615 
1616 		/*
1617 		 * The user has requested that we only send a real flush
1618 		 * for every N flush requests.  So keep count, and either
1619 		 * complete the request immediately or queue it for the
1620 		 * backend.
1621 		 */
1622 		if (xbb->flush_interval != 0) {
1623 		 	if (++(xbb->flush_count) < xbb->flush_interval) {
1624 				goto send_response;
1625 			} else
1626 				xbb->flush_count = 0;
1627 		}
1628 
1629 		operation = BIO_FLUSH;
1630 		reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1631 		reqlist->ds_trans_type = DEVSTAT_NO_DATA;
1632 		goto do_dispatch;
1633 		/*NOTREACHED*/
1634 	default:
1635 		DPRINTF("error: unknown block io operation [%d]\n",
1636 			reqlist->operation);
1637 		reqlist->status = BLKIF_RSP_ERROR;
1638 		goto send_response;
1639 	}
1640 
1641 	reqlist->xbb  = xbb;
1642 	xbb_sg        = xbb->xbb_sgs;
1643 	map	      = xbb->maps;
1644 	seg_idx	      = 0;
1645 
1646 	STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1647 		blkif_request_t		*ring_req;
1648 		RING_IDX		 req_ring_idx;
1649 		u_int			 req_seg_idx;
1650 
1651 		ring_req	      = nreq->ring_req;
1652 		req_ring_idx	      = nreq->req_ring_idx;
1653 		nr_sects              = 0;
1654 		nseg                  = ring_req->nr_segments;
1655 		nreq->nr_pages        = nseg;
1656 		nreq->nr_512b_sectors = 0;
1657 		req_seg_idx	      = 0;
1658 		sg	              = NULL;
1659 
1660 		/* Check that number of segments is sane. */
1661 		if (__predict_false(nseg == 0)
1662 		 || __predict_false(nseg > xbb->max_request_segments)) {
1663 			DPRINTF("Bad number of segments in request (%d)\n",
1664 				nseg);
1665 			reqlist->status = BLKIF_RSP_ERROR;
1666 			goto send_response;
1667 		}
1668 
1669 		block_segs    = nseg;
1670 		sg            = ring_req->seg;
1671 		last_block_sg = sg + block_segs;
1672 
1673 		while (sg < last_block_sg) {
1674 			KASSERT(seg_idx <
1675 				XBB_MAX_SEGMENTS_PER_REQLIST,
1676 				("seg_idx %d is too large, max "
1677 				"segs %d\n", seg_idx,
1678 				XBB_MAX_SEGMENTS_PER_REQLIST));
1679 
1680 			xbb_sg->first_sect = sg->first_sect;
1681 			xbb_sg->last_sect  = sg->last_sect;
1682 			xbb_sg->nsect =
1683 			    (int8_t)(sg->last_sect -
1684 			    sg->first_sect + 1);
1685 
1686 			if ((sg->last_sect >= (PAGE_SIZE >> 9))
1687 			 || (xbb_sg->nsect <= 0)) {
1688 				reqlist->status = BLKIF_RSP_ERROR;
1689 				goto send_response;
1690 			}
1691 
1692 			nr_sects += xbb_sg->nsect;
1693 			map->host_addr = xbb_get_gntaddr(reqlist,
1694 						seg_idx, /*sector*/0);
1695 			KASSERT(map->host_addr + PAGE_SIZE <=
1696 				xbb->ring_config.gnt_addr,
1697 				("Host address %#jx len %d overlaps "
1698 				 "ring address %#jx\n",
1699 				(uintmax_t)map->host_addr, PAGE_SIZE,
1700 				(uintmax_t)xbb->ring_config.gnt_addr));
1701 
1702 			map->flags     = GNTMAP_host_map;
1703 			map->ref       = sg->gref;
1704 			map->dom       = xbb->otherend_id;
1705 			if (operation == BIO_WRITE)
1706 				map->flags |= GNTMAP_readonly;
1707 			sg++;
1708 			map++;
1709 			xbb_sg++;
1710 			seg_idx++;
1711 			req_seg_idx++;
1712 		}
1713 
1714 		/* Convert to the disk's sector size */
1715 		nreq->nr_512b_sectors = nr_sects;
1716 		nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
1717 		total_sects += nr_sects;
1718 
1719 		if ((nreq->nr_512b_sectors &
1720 		    ((xbb->sector_size >> 9) - 1)) != 0) {
1721 			device_printf(xbb->dev, "%s: I/O size (%d) is not "
1722 				      "a multiple of the backing store sector "
1723 				      "size (%d)\n", __func__,
1724 				      nreq->nr_512b_sectors << 9,
1725 				      xbb->sector_size);
1726 			reqlist->status = BLKIF_RSP_ERROR;
1727 			goto send_response;
1728 		}
1729 	}
1730 
1731 	error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
1732 					  xbb->maps, reqlist->nr_segments);
1733 	if (error != 0)
1734 		panic("Grant table operation failed (%d)", error);
1735 
1736 	reqlist->flags |= XBB_REQLIST_MAPPED;
1737 
1738 	for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1739 	     seg_idx++, map++){
1740 
1741 		if (__predict_false(map->status != 0)) {
1742 			DPRINTF("invalid buffer -- could not remap "
1743 			        "it (%d)\n", map->status);
1744 			DPRINTF("Mapping(%d): Host Addr 0x%lx, flags "
1745 			        "0x%x ref 0x%x, dom %d\n", seg_idx,
1746 				map->host_addr, map->flags, map->ref,
1747 				map->dom);
1748 			reqlist->status = BLKIF_RSP_ERROR;
1749 			goto send_response;
1750 		}
1751 
1752 		reqlist->gnt_handles[seg_idx] = map->handle;
1753 	}
1754 	if (reqlist->starting_sector_number + total_sects >
1755 	    xbb->media_num_sectors) {
1756 
1757 		DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] "
1758 			"extends past end of device %s\n",
1759 			operation == BIO_READ ? "read" : "write",
1760 			reqlist->starting_sector_number,
1761 			reqlist->starting_sector_number + total_sects,
1762 			xbb->dev_name);
1763 		reqlist->status = BLKIF_RSP_ERROR;
1764 		goto send_response;
1765 	}
1766 
1767 do_dispatch:
1768 
1769 	error = xbb->dispatch_io(xbb,
1770 				 reqlist,
1771 				 operation,
1772 				 bio_flags);
1773 
1774 	if (error != 0) {
1775 		reqlist->status = BLKIF_RSP_ERROR;
1776 		goto send_response;
1777 	}
1778 
1779 	return (0);
1780 
1781 send_response:
1782 
1783 	xbb_complete_reqlist(xbb, reqlist);
1784 
1785 	return (0);
1786 }
1787 
1788 static __inline int
1789 xbb_count_sects(blkif_request_t *ring_req)
1790 {
1791 	int i;
1792 	int cur_size = 0;
1793 
1794 	for (i = 0; i < ring_req->nr_segments; i++) {
1795 		int nsect;
1796 
1797 		nsect = (int8_t)(ring_req->seg[i].last_sect -
1798 			ring_req->seg[i].first_sect + 1);
1799 		if (nsect <= 0)
1800 			break;
1801 
1802 		cur_size += nsect;
1803 	}
1804 
1805 	return (cur_size);
1806 }
1807 
1808 /**
1809  * Process incoming requests from the shared communication ring in response
1810  * to a signal on the ring's event channel.
1811  *
1812  * \param context  Callback argument registerd during task initialization -
1813  *                 the xbb_softc for this instance.
1814  * \param pending  The number of taskqueue_enqueue events that have
1815  *                 occurred since this handler was last run.
1816  */
1817 static void
1818 xbb_run_queue(void *context, int pending)
1819 {
1820 	struct xbb_softc       *xbb;
1821 	blkif_back_rings_t     *rings;
1822 	RING_IDX		rp;
1823 	uint64_t		cur_sector;
1824 	int			cur_operation;
1825 	struct xbb_xen_reqlist *reqlist;
1826 
1827 
1828 	xbb   = (struct xbb_softc *)context;
1829 	rings = &xbb->rings;
1830 
1831 	/*
1832 	 * Work gather and dispatch loop.  Note that we have a bias here
1833 	 * towards gathering I/O sent by blockfront.  We first gather up
1834 	 * everything in the ring, as long as we have resources.  Then we
1835 	 * dispatch one request, and then attempt to gather up any
1836 	 * additional requests that have come in while we were dispatching
1837 	 * the request.
1838 	 *
1839 	 * This allows us to get a clearer picture (via devstat) of how
1840 	 * many requests blockfront is queueing to us at any given time.
1841 	 */
1842 	for (;;) {
1843 		int retval;
1844 
1845 		/*
1846 		 * Initialize reqlist to the last element in the pending
1847 		 * queue, if there is one.  This allows us to add more
1848 		 * requests to that request list, if we have room.
1849 		 */
1850 		reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1851 				      xbb_xen_reqlist, links);
1852 		if (reqlist != NULL) {
1853 			cur_sector = reqlist->next_contig_sector;
1854 			cur_operation = reqlist->operation;
1855 		} else {
1856 			cur_operation = 0;
1857 			cur_sector    = 0;
1858 		}
1859 
1860 		/*
1861 		 * Cache req_prod to avoid accessing a cache line shared
1862 		 * with the frontend.
1863 		 */
1864 		rp = rings->common.sring->req_prod;
1865 
1866 		/* Ensure we see queued requests up to 'rp'. */
1867 		rmb();
1868 
1869 		/**
1870 		 * Run so long as there is work to consume and the generation
1871 		 * of a response will not overflow the ring.
1872 		 *
1873 		 * @note There's a 1 to 1 relationship between requests and
1874 		 *       responses, so an overflow should never occur.  This
1875 		 *       test is to protect our domain from digesting bogus
1876 		 *       data.  Shouldn't we log this?
1877 		 */
1878 		while (rings->common.req_cons != rp
1879 		    && RING_REQUEST_CONS_OVERFLOW(&rings->common,
1880 						  rings->common.req_cons) == 0){
1881 			blkif_request_t	        ring_req_storage;
1882 			blkif_request_t	       *ring_req;
1883 			int			cur_size;
1884 
1885 			switch (xbb->abi) {
1886 			case BLKIF_PROTOCOL_NATIVE:
1887 				ring_req = RING_GET_REQUEST(&xbb->rings.native,
1888 				    rings->common.req_cons);
1889 				break;
1890 			case BLKIF_PROTOCOL_X86_32:
1891 			{
1892 				struct blkif_x86_32_request *ring_req32;
1893 
1894 				ring_req32 = RING_GET_REQUEST(
1895 				    &xbb->rings.x86_32, rings->common.req_cons);
1896 				blkif_get_x86_32_req(&ring_req_storage,
1897 						     ring_req32);
1898 				ring_req = &ring_req_storage;
1899 				break;
1900 			}
1901 			case BLKIF_PROTOCOL_X86_64:
1902 			{
1903 				struct blkif_x86_64_request *ring_req64;
1904 
1905 				ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
1906 				    rings->common.req_cons);
1907 				blkif_get_x86_64_req(&ring_req_storage,
1908 						     ring_req64);
1909 				ring_req = &ring_req_storage;
1910 				break;
1911 			}
1912 			default:
1913 				panic("Unexpected blkif protocol ABI.");
1914 				/* NOTREACHED */
1915 			}
1916 
1917 			/*
1918 			 * Check for situations that would require closing
1919 			 * off this I/O for further coalescing:
1920 			 *  - Coalescing is turned off.
1921 			 *  - Current I/O is out of sequence with the previous
1922 			 *    I/O.
1923 			 *  - Coalesced I/O would be too large.
1924 			 */
1925 			if ((reqlist != NULL)
1926 			 && ((xbb->no_coalesce_reqs != 0)
1927 			  || ((xbb->no_coalesce_reqs == 0)
1928 			   && ((ring_req->sector_number != cur_sector)
1929 			    || (ring_req->operation != cur_operation)
1930 			    || ((ring_req->nr_segments + reqlist->nr_segments) >
1931 			         xbb->max_reqlist_segments))))) {
1932 				reqlist = NULL;
1933 			}
1934 
1935 			/*
1936 			 * Grab and check for all resources in one shot.
1937 			 * If we can't get all of the resources we need,
1938 			 * the shortage is noted and the thread will get
1939 			 * woken up when more resources are available.
1940 			 */
1941 			retval = xbb_get_resources(xbb, &reqlist, ring_req,
1942 						   xbb->rings.common.req_cons);
1943 
1944 			if (retval != 0) {
1945 				/*
1946 				 * Resource shortage has been recorded.
1947 				 * We'll be scheduled to run once a request
1948 				 * object frees up due to a completion.
1949 				 */
1950 				break;
1951 			}
1952 
1953 			/*
1954 			 * Signify that	we can overwrite this request with
1955 			 * a response by incrementing our consumer index.
1956 			 * The response won't be generated until after
1957 			 * we've already consumed all necessary data out
1958 			 * of the version of the request in the ring buffer
1959 			 * (for native mode).  We must update the consumer
1960 			 * index  before issueing back-end I/O so there is
1961 			 * no possibility that it will complete and a
1962 			 * response be generated before we make room in
1963 			 * the queue for that response.
1964 			 */
1965 			xbb->rings.common.req_cons++;
1966 			xbb->reqs_received++;
1967 
1968 			cur_size = xbb_count_sects(ring_req);
1969 			cur_sector = ring_req->sector_number + cur_size;
1970 			reqlist->next_contig_sector = cur_sector;
1971 			cur_operation = ring_req->operation;
1972 		}
1973 
1974 		/* Check for I/O to dispatch */
1975 		reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1976 		if (reqlist == NULL) {
1977 			/*
1978 			 * We're out of work to do, put the task queue to
1979 			 * sleep.
1980 			 */
1981 			break;
1982 		}
1983 
1984 		/*
1985 		 * Grab the first request off the queue and attempt
1986 		 * to dispatch it.
1987 		 */
1988 		STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
1989 
1990 		retval = xbb_dispatch_io(xbb, reqlist);
1991 		if (retval != 0) {
1992 			/*
1993 			 * xbb_dispatch_io() returns non-zero only when
1994 			 * there is a resource shortage.  If that's the
1995 			 * case, re-queue this request on the head of the
1996 			 * queue, and go to sleep until we have more
1997 			 * resources.
1998 			 */
1999 			STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
2000 					   reqlist, links);
2001 			break;
2002 		} else {
2003 			/*
2004 			 * If we still have anything on the queue after
2005 			 * removing the head entry, that is because we
2006 			 * met one of the criteria to create a new
2007 			 * request list (outlined above), and we'll call
2008 			 * that a forced dispatch for statistical purposes.
2009 			 *
2010 			 * Otherwise, if there is only one element on the
2011 			 * queue, we coalesced everything available on
2012 			 * the ring and we'll call that a normal dispatch.
2013 			 */
2014 			reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
2015 
2016 			if (reqlist != NULL)
2017 				xbb->forced_dispatch++;
2018 			else
2019 				xbb->normal_dispatch++;
2020 
2021 			xbb->total_dispatch++;
2022 		}
2023 	}
2024 }
2025 
2026 /**
2027  * Interrupt handler bound to the shared ring's event channel.
2028  *
2029  * \param arg  Callback argument registerd during event channel
2030  *             binding - the xbb_softc for this instance.
2031  */
2032 static int
2033 xbb_filter(void *arg)
2034 {
2035 	struct xbb_softc *xbb;
2036 
2037 	/* Defer to taskqueue thread. */
2038 	xbb = (struct xbb_softc *)arg;
2039 	taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
2040 
2041 	return (FILTER_HANDLED);
2042 }
2043 
2044 SDT_PROVIDER_DEFINE(xbb);
2045 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
2046 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
2047 		  "uint64_t");
2048 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
2049 		  "uint64_t", "uint64_t");
2050 
2051 /*----------------------------- Backend Handlers -----------------------------*/
2052 /**
2053  * Backend handler for character device access.
2054  *
2055  * \param xbb        Per-instance xbb configuration structure.
2056  * \param reqlist    Allocated internal request list structure.
2057  * \param operation  BIO_* I/O operation code.
2058  * \param bio_flags  Additional bio_flag data to pass to any generated
2059  *                   bios (e.g. BIO_ORDERED)..
2060  *
2061  * \return  0 for success, errno codes for failure.
2062  */
2063 static int
2064 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2065 		 int operation, int bio_flags)
2066 {
2067 	struct xbb_dev_data *dev_data;
2068 	struct bio          *bios[XBB_MAX_SEGMENTS_PER_REQLIST];
2069 	off_t                bio_offset;
2070 	struct bio          *bio;
2071 	struct xbb_sg       *xbb_sg;
2072 	u_int	             nbio;
2073 	u_int                bio_idx;
2074 	u_int		     nseg;
2075 	u_int                seg_idx;
2076 	int                  error;
2077 
2078 	dev_data   = &xbb->backend.dev;
2079 	bio_offset = (off_t)reqlist->starting_sector_number
2080 		   << xbb->sector_size_shift;
2081 	error      = 0;
2082 	nbio       = 0;
2083 	bio_idx    = 0;
2084 
2085 	if (operation == BIO_FLUSH) {
2086 		bio = g_new_bio();
2087 		if (__predict_false(bio == NULL)) {
2088 			DPRINTF("Unable to allocate bio for BIO_FLUSH\n");
2089 			error = ENOMEM;
2090 			return (error);
2091 		}
2092 
2093 		bio->bio_cmd	 = BIO_FLUSH;
2094 		bio->bio_flags	|= BIO_ORDERED;
2095 		bio->bio_dev	 = dev_data->cdev;
2096 		bio->bio_offset	 = 0;
2097 		bio->bio_data	 = 0;
2098 		bio->bio_done	 = xbb_bio_done;
2099 		bio->bio_caller1 = reqlist;
2100 		bio->bio_pblkno	 = 0;
2101 
2102 		reqlist->pendcnt = 1;
2103 
2104 		SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
2105 			   device_get_unit(xbb->dev));
2106 
2107 		(*dev_data->csw->d_strategy)(bio);
2108 
2109 		return (0);
2110 	}
2111 
2112 	xbb_sg = xbb->xbb_sgs;
2113 	bio    = NULL;
2114 	nseg = reqlist->nr_segments;
2115 
2116 	for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
2117 
2118 		/*
2119 		 * KVA will not be contiguous, so any additional
2120 		 * I/O will need to be represented in a new bio.
2121 		 */
2122 		if ((bio != NULL)
2123 		 && (xbb_sg->first_sect != 0)) {
2124 			if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2125 				printf("%s: Discontiguous I/O request "
2126 				       "from domain %d ends on "
2127 				       "non-sector boundary\n",
2128 				       __func__, xbb->otherend_id);
2129 				error = EINVAL;
2130 				goto fail_free_bios;
2131 			}
2132 			bio = NULL;
2133 		}
2134 
2135 		if (bio == NULL) {
2136 			/*
2137 			 * Make sure that the start of this bio is
2138 			 * aligned to a device sector.
2139 			 */
2140 			if ((bio_offset & (xbb->sector_size - 1)) != 0){
2141 				printf("%s: Misaligned I/O request "
2142 				       "from domain %d\n", __func__,
2143 				       xbb->otherend_id);
2144 				error = EINVAL;
2145 				goto fail_free_bios;
2146 			}
2147 
2148 			bio = bios[nbio++] = g_new_bio();
2149 			if (__predict_false(bio == NULL)) {
2150 				error = ENOMEM;
2151 				goto fail_free_bios;
2152 			}
2153 			bio->bio_cmd     = operation;
2154 			bio->bio_flags  |= bio_flags;
2155 			bio->bio_dev     = dev_data->cdev;
2156 			bio->bio_offset  = bio_offset;
2157 			bio->bio_data    = xbb_reqlist_ioaddr(reqlist, seg_idx,
2158 						xbb_sg->first_sect);
2159 			bio->bio_done    = xbb_bio_done;
2160 			bio->bio_caller1 = reqlist;
2161 			bio->bio_pblkno  = bio_offset >> xbb->sector_size_shift;
2162 		}
2163 
2164 		bio->bio_length += xbb_sg->nsect << 9;
2165 		bio->bio_bcount  = bio->bio_length;
2166 		bio_offset      += xbb_sg->nsect << 9;
2167 
2168 		if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) {
2169 
2170 			if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2171 				printf("%s: Discontiguous I/O request "
2172 				       "from domain %d ends on "
2173 				       "non-sector boundary\n",
2174 				       __func__, xbb->otherend_id);
2175 				error = EINVAL;
2176 				goto fail_free_bios;
2177 			}
2178 			/*
2179 			 * KVA will not be contiguous, so any additional
2180 			 * I/O will need to be represented in a new bio.
2181 			 */
2182 			bio = NULL;
2183 		}
2184 	}
2185 
2186 	reqlist->pendcnt = nbio;
2187 
2188 	for (bio_idx = 0; bio_idx < nbio; bio_idx++)
2189 	{
2190 #ifdef XBB_USE_BOUNCE_BUFFERS
2191 		vm_offset_t kva_offset;
2192 
2193 		kva_offset = (vm_offset_t)bios[bio_idx]->bio_data
2194 			   - (vm_offset_t)reqlist->bounce;
2195 		if (operation == BIO_WRITE) {
2196 			memcpy(bios[bio_idx]->bio_data,
2197 			       (uint8_t *)reqlist->kva + kva_offset,
2198 			       bios[bio_idx]->bio_bcount);
2199 		}
2200 #endif
2201 		if (operation == BIO_READ) {
2202 			SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read,
2203 				   device_get_unit(xbb->dev),
2204 				   bios[bio_idx]->bio_offset,
2205 				   bios[bio_idx]->bio_length);
2206 		} else if (operation == BIO_WRITE) {
2207 			SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write,
2208 				   device_get_unit(xbb->dev),
2209 				   bios[bio_idx]->bio_offset,
2210 				   bios[bio_idx]->bio_length);
2211 		}
2212 		(*dev_data->csw->d_strategy)(bios[bio_idx]);
2213 	}
2214 
2215 	return (error);
2216 
2217 fail_free_bios:
2218 	for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++)
2219 		g_destroy_bio(bios[bio_idx]);
2220 
2221 	return (error);
2222 }
2223 
2224 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
2225 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
2226 		  "uint64_t");
2227 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
2228 		  "uint64_t", "uint64_t");
2229 
2230 /**
2231  * Backend handler for file access.
2232  *
2233  * \param xbb        Per-instance xbb configuration structure.
2234  * \param reqlist    Allocated internal request list.
2235  * \param operation  BIO_* I/O operation code.
2236  * \param flags      Additional bio_flag data to pass to any generated bios
2237  *                   (e.g. BIO_ORDERED)..
2238  *
2239  * \return  0 for success, errno codes for failure.
2240  */
2241 static int
2242 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2243 		  int operation, int flags)
2244 {
2245 	struct xbb_file_data *file_data;
2246 	u_int                 seg_idx;
2247 	u_int		      nseg;
2248 	off_t		      sectors_sent;
2249 	struct uio            xuio;
2250 	struct xbb_sg        *xbb_sg;
2251 	struct iovec         *xiovec;
2252 #ifdef XBB_USE_BOUNCE_BUFFERS
2253 	void                **p_vaddr;
2254 	int                   saved_uio_iovcnt;
2255 #endif /* XBB_USE_BOUNCE_BUFFERS */
2256 	int                   error;
2257 
2258 	file_data = &xbb->backend.file;
2259 	sectors_sent = 0;
2260 	error = 0;
2261 	bzero(&xuio, sizeof(xuio));
2262 
2263 	switch (operation) {
2264 	case BIO_READ:
2265 		xuio.uio_rw = UIO_READ;
2266 		break;
2267 	case BIO_WRITE:
2268 		xuio.uio_rw = UIO_WRITE;
2269 		break;
2270 	case BIO_FLUSH: {
2271 		struct mount *mountpoint;
2272 
2273 		SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush,
2274 			   device_get_unit(xbb->dev));
2275 
2276 		(void) vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2277 
2278 		vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2279 		error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread);
2280 		VOP_UNLOCK(xbb->vn, 0);
2281 
2282 		vn_finished_write(mountpoint);
2283 
2284 		goto bailout_send_response;
2285 		/* NOTREACHED */
2286 	}
2287 	default:
2288 		panic("invalid operation %d", operation);
2289 		/* NOTREACHED */
2290 	}
2291 	xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
2292 			<< xbb->sector_size_shift;
2293 	xuio.uio_segflg = UIO_SYSSPACE;
2294 	xuio.uio_iov = file_data->xiovecs;
2295 	xuio.uio_iovcnt = 0;
2296 	xbb_sg = xbb->xbb_sgs;
2297 	nseg = reqlist->nr_segments;
2298 
2299 	for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
2300 
2301 		/*
2302 		 * If the first sector is not 0, the KVA will
2303 		 * not be contiguous and we'll need to go on
2304 		 * to another segment.
2305 		 */
2306 		if (xbb_sg->first_sect != 0)
2307 			xiovec = NULL;
2308 
2309 		if (xiovec == NULL) {
2310 			xiovec = &file_data->xiovecs[xuio.uio_iovcnt];
2311 			xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
2312 			    seg_idx, xbb_sg->first_sect);
2313 #ifdef XBB_USE_BOUNCE_BUFFERS
2314 			/*
2315 			 * Store the address of the incoming
2316 			 * buffer at this particular offset
2317 			 * as well, so we can do the copy
2318 			 * later without having to do more
2319 			 * work to recalculate this address.
2320 		 	 */
2321 			p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt];
2322 			*p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx,
2323 			    xbb_sg->first_sect);
2324 #endif /* XBB_USE_BOUNCE_BUFFERS */
2325 			xiovec->iov_len = 0;
2326 			xuio.uio_iovcnt++;
2327 		}
2328 
2329 		xiovec->iov_len += xbb_sg->nsect << 9;
2330 
2331 		xuio.uio_resid += xbb_sg->nsect << 9;
2332 
2333 		/*
2334 		 * If the last sector is not the full page
2335 		 * size count, the next segment will not be
2336 		 * contiguous in KVA and we need a new iovec.
2337 		 */
2338 		if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9)
2339 			xiovec = NULL;
2340 	}
2341 
2342 	xuio.uio_td = curthread;
2343 
2344 #ifdef XBB_USE_BOUNCE_BUFFERS
2345 	saved_uio_iovcnt = xuio.uio_iovcnt;
2346 
2347 	if (operation == BIO_WRITE) {
2348 		/* Copy the write data to the local buffer. */
2349 		for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
2350 		     xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt;
2351 		     seg_idx++, xiovec++, p_vaddr++) {
2352 
2353 			memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len);
2354 		}
2355 	} else {
2356 		/*
2357 		 * We only need to save off the iovecs in the case of a
2358 		 * read, because the copy for the read happens after the
2359 		 * VOP_READ().  (The uio will get modified in that call
2360 		 * sequence.)
2361 		 */
2362 		memcpy(file_data->saved_xiovecs, xuio.uio_iov,
2363 		       xuio.uio_iovcnt * sizeof(xuio.uio_iov[0]));
2364 	}
2365 #endif /* XBB_USE_BOUNCE_BUFFERS */
2366 
2367 	switch (operation) {
2368 	case BIO_READ:
2369 
2370 		SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read,
2371 			   device_get_unit(xbb->dev), xuio.uio_offset,
2372 			   xuio.uio_resid);
2373 
2374 		vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2375 
2376 		/*
2377 		 * UFS pays attention to IO_DIRECT for reads.  If the
2378 		 * DIRECTIO option is configured into the kernel, it calls
2379 		 * ffs_rawread().  But that only works for single-segment
2380 		 * uios with user space addresses.  In our case, with a
2381 		 * kernel uio, it still reads into the buffer cache, but it
2382 		 * will just try to release the buffer from the cache later
2383 		 * on in ffs_read().
2384 		 *
2385 		 * ZFS does not pay attention to IO_DIRECT for reads.
2386 		 *
2387 		 * UFS does not pay attention to IO_SYNC for reads.
2388 		 *
2389 		 * ZFS pays attention to IO_SYNC (which translates into the
2390 		 * Solaris define FRSYNC for zfs_read()) for reads.  It
2391 		 * attempts to sync the file before reading.
2392 		 *
2393 		 * So, to attempt to provide some barrier semantics in the
2394 		 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
2395 		 */
2396 		error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2397 				 (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
2398 
2399 		VOP_UNLOCK(xbb->vn, 0);
2400 		break;
2401 	case BIO_WRITE: {
2402 		struct mount *mountpoint;
2403 
2404 		SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write,
2405 			   device_get_unit(xbb->dev), xuio.uio_offset,
2406 			   xuio.uio_resid);
2407 
2408 		(void)vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2409 
2410 		vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2411 
2412 		/*
2413 		 * UFS pays attention to IO_DIRECT for writes.  The write
2414 		 * is done asynchronously.  (Normally the write would just
2415 		 * get put into cache.
2416 		 *
2417 		 * UFS pays attention to IO_SYNC for writes.  It will
2418 		 * attempt to write the buffer out synchronously if that
2419 		 * flag is set.
2420 		 *
2421 		 * ZFS does not pay attention to IO_DIRECT for writes.
2422 		 *
2423 		 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
2424 		 * for writes.  It will flush the transaction from the
2425 		 * cache before returning.
2426 		 *
2427 		 * So if we've got the BIO_ORDERED flag set, we want
2428 		 * IO_SYNC in either the UFS or ZFS case.
2429 		 */
2430 		error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2431 				  IO_SYNC : 0, file_data->cred);
2432 		VOP_UNLOCK(xbb->vn, 0);
2433 
2434 		vn_finished_write(mountpoint);
2435 
2436 		break;
2437 	}
2438 	default:
2439 		panic("invalid operation %d", operation);
2440 		/* NOTREACHED */
2441 	}
2442 
2443 #ifdef XBB_USE_BOUNCE_BUFFERS
2444 	/* We only need to copy here for read operations */
2445 	if (operation == BIO_READ) {
2446 
2447 		for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr,
2448 		     xiovec = file_data->saved_xiovecs;
2449 		     seg_idx < saved_uio_iovcnt; seg_idx++,
2450 		     xiovec++, p_vaddr++) {
2451 
2452 			/*
2453 			 * Note that we have to use the copy of the
2454 			 * io vector we made above.  uiomove() modifies
2455 			 * the uio and its referenced vector as uiomove
2456 			 * performs the copy, so we can't rely on any
2457 			 * state from the original uio.
2458 			 */
2459 			memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len);
2460 		}
2461 	}
2462 #endif /* XBB_USE_BOUNCE_BUFFERS */
2463 
2464 bailout_send_response:
2465 
2466 	if (error != 0)
2467 		reqlist->status = BLKIF_RSP_ERROR;
2468 
2469 	xbb_complete_reqlist(xbb, reqlist);
2470 
2471 	return (0);
2472 }
2473 
2474 /*--------------------------- Backend Configuration --------------------------*/
2475 /**
2476  * Close and cleanup any backend device/file specific state for this
2477  * block back instance.
2478  *
2479  * \param xbb  Per-instance xbb configuration structure.
2480  */
2481 static void
2482 xbb_close_backend(struct xbb_softc *xbb)
2483 {
2484 	DROP_GIANT();
2485 	DPRINTF("closing dev=%s\n", xbb->dev_name);
2486 	if (xbb->vn) {
2487 		int flags = FREAD;
2488 
2489 		if ((xbb->flags & XBBF_READ_ONLY) == 0)
2490 			flags |= FWRITE;
2491 
2492 		switch (xbb->device_type) {
2493 		case XBB_TYPE_DISK:
2494 			if (xbb->backend.dev.csw) {
2495 				dev_relthread(xbb->backend.dev.cdev,
2496 					      xbb->backend.dev.dev_ref);
2497 				xbb->backend.dev.csw  = NULL;
2498 				xbb->backend.dev.cdev = NULL;
2499 			}
2500 			break;
2501 		case XBB_TYPE_FILE:
2502 			break;
2503 		case XBB_TYPE_NONE:
2504 		default:
2505 			panic("Unexpected backend type.");
2506 			break;
2507 		}
2508 
2509 		(void)vn_close(xbb->vn, flags, NOCRED, curthread);
2510 		xbb->vn = NULL;
2511 
2512 		switch (xbb->device_type) {
2513 		case XBB_TYPE_DISK:
2514 			break;
2515 		case XBB_TYPE_FILE:
2516 			if (xbb->backend.file.cred != NULL) {
2517 				crfree(xbb->backend.file.cred);
2518 				xbb->backend.file.cred = NULL;
2519 			}
2520 			break;
2521 		case XBB_TYPE_NONE:
2522 		default:
2523 			panic("Unexpected backend type.");
2524 			break;
2525 		}
2526 	}
2527 	PICKUP_GIANT();
2528 }
2529 
2530 /**
2531  * Open a character device to be used for backend I/O.
2532  *
2533  * \param xbb  Per-instance xbb configuration structure.
2534  *
2535  * \return  0 for success, errno codes for failure.
2536  */
2537 static int
2538 xbb_open_dev(struct xbb_softc *xbb)
2539 {
2540 	struct vattr   vattr;
2541 	struct cdev   *dev;
2542 	struct cdevsw *devsw;
2543 	int	       error;
2544 
2545 	xbb->device_type = XBB_TYPE_DISK;
2546 	xbb->dispatch_io = xbb_dispatch_dev;
2547 	xbb->backend.dev.cdev = xbb->vn->v_rdev;
2548 	xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev,
2549 					     &xbb->backend.dev.dev_ref);
2550 	if (xbb->backend.dev.csw == NULL)
2551 		panic("Unable to retrieve device switch");
2552 
2553 	error = VOP_GETATTR(xbb->vn, &vattr, NOCRED);
2554 	if (error) {
2555 		xenbus_dev_fatal(xbb->dev, error, "error getting "
2556 				 "vnode attributes for device %s",
2557 				 xbb->dev_name);
2558 		return (error);
2559 	}
2560 
2561 
2562 	dev = xbb->vn->v_rdev;
2563 	devsw = dev->si_devsw;
2564 	if (!devsw->d_ioctl) {
2565 		xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for "
2566 				 "device %s!", xbb->dev_name);
2567 		return (ENODEV);
2568 	}
2569 
2570 	error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
2571 			       (caddr_t)&xbb->sector_size, FREAD,
2572 			       curthread);
2573 	if (error) {
2574 		xenbus_dev_fatal(xbb->dev, error,
2575 				 "error calling ioctl DIOCGSECTORSIZE "
2576 				 "for device %s", xbb->dev_name);
2577 		return (error);
2578 	}
2579 
2580 	error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
2581 			       (caddr_t)&xbb->media_size, FREAD,
2582 			       curthread);
2583 	if (error) {
2584 		xenbus_dev_fatal(xbb->dev, error,
2585 				 "error calling ioctl DIOCGMEDIASIZE "
2586 				 "for device %s", xbb->dev_name);
2587 		return (error);
2588 	}
2589 
2590 	return (0);
2591 }
2592 
2593 /**
2594  * Open a file to be used for backend I/O.
2595  *
2596  * \param xbb  Per-instance xbb configuration structure.
2597  *
2598  * \return  0 for success, errno codes for failure.
2599  */
2600 static int
2601 xbb_open_file(struct xbb_softc *xbb)
2602 {
2603 	struct xbb_file_data *file_data;
2604 	struct vattr          vattr;
2605 	int                   error;
2606 
2607 	file_data = &xbb->backend.file;
2608 	xbb->device_type = XBB_TYPE_FILE;
2609 	xbb->dispatch_io = xbb_dispatch_file;
2610 	error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred);
2611 	if (error != 0) {
2612 		xenbus_dev_fatal(xbb->dev, error,
2613 				 "error calling VOP_GETATTR()"
2614 				 "for file %s", xbb->dev_name);
2615 		return (error);
2616 	}
2617 
2618 	/*
2619 	 * Verify that we have the ability to upgrade to exclusive
2620 	 * access on this file so we can trap errors at open instead
2621 	 * of reporting them during first access.
2622 	 */
2623 	if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
2624 		vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
2625 		if (xbb->vn->v_iflag & VI_DOOMED) {
2626 			error = EBADF;
2627 			xenbus_dev_fatal(xbb->dev, error,
2628 					 "error locking file %s",
2629 					 xbb->dev_name);
2630 
2631 			return (error);
2632 		}
2633 	}
2634 
2635 	file_data->cred = crhold(curthread->td_ucred);
2636 	xbb->media_size = vattr.va_size;
2637 
2638 	/*
2639 	 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
2640 	 * With ZFS, it is 131072 bytes.  Block sizes that large don't work
2641 	 * with disklabel and UFS on FreeBSD at least.  Large block sizes
2642 	 * may not work with other OSes as well.  So just export a sector
2643 	 * size of 512 bytes, which should work with any OS or
2644 	 * application.  Since our backing is a file, any block size will
2645 	 * work fine for the backing store.
2646 	 */
2647 #if 0
2648 	xbb->sector_size = vattr.va_blocksize;
2649 #endif
2650 	xbb->sector_size = 512;
2651 
2652 	/*
2653 	 * Sanity check.  The media size has to be at least one
2654 	 * sector long.
2655 	 */
2656 	if (xbb->media_size < xbb->sector_size) {
2657 		error = EINVAL;
2658 		xenbus_dev_fatal(xbb->dev, error,
2659 				 "file %s size %ju < block size %u",
2660 				 xbb->dev_name,
2661 				 (uintmax_t)xbb->media_size,
2662 				 xbb->sector_size);
2663 	}
2664 	return (error);
2665 }
2666 
2667 /**
2668  * Open the backend provider for this connection.
2669  *
2670  * \param xbb  Per-instance xbb configuration structure.
2671  *
2672  * \return  0 for success, errno codes for failure.
2673  */
2674 static int
2675 xbb_open_backend(struct xbb_softc *xbb)
2676 {
2677 	struct nameidata nd;
2678 	int		 flags;
2679 	int		 error;
2680 
2681 	flags = FREAD;
2682 	error = 0;
2683 
2684 	DPRINTF("opening dev=%s\n", xbb->dev_name);
2685 
2686 	if (rootvnode == NULL) {
2687 		xenbus_dev_fatal(xbb->dev, ENOENT,
2688 				 "Root file system not mounted");
2689 		return (ENOENT);
2690 	}
2691 
2692 	if ((xbb->flags & XBBF_READ_ONLY) == 0)
2693 		flags |= FWRITE;
2694 
2695 	pwd_ensure_dirs();
2696 
2697  again:
2698 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread);
2699 	error = vn_open(&nd, &flags, 0, NULL);
2700 	if (error) {
2701 		/*
2702 		 * This is the only reasonable guess we can make as far as
2703 		 * path if the user doesn't give us a fully qualified path.
2704 		 * If they want to specify a file, they need to specify the
2705 		 * full path.
2706 		 */
2707 		if (xbb->dev_name[0] != '/') {
2708 			char *dev_path = "/dev/";
2709 			char *dev_name;
2710 
2711 			/* Try adding device path at beginning of name */
2712 			dev_name = malloc(strlen(xbb->dev_name)
2713 					+ strlen(dev_path) + 1,
2714 					  M_XENBLOCKBACK, M_NOWAIT);
2715 			if (dev_name) {
2716 				sprintf(dev_name, "%s%s", dev_path,
2717 					xbb->dev_name);
2718 				free(xbb->dev_name, M_XENBLOCKBACK);
2719 				xbb->dev_name = dev_name;
2720 				goto again;
2721 			}
2722 		}
2723 		xenbus_dev_fatal(xbb->dev, error, "error opening device %s",
2724 				 xbb->dev_name);
2725 		return (error);
2726 	}
2727 
2728 	NDFREE(&nd, NDF_ONLY_PNBUF);
2729 
2730 	xbb->vn = nd.ni_vp;
2731 
2732 	/* We only support disks and files. */
2733 	if (vn_isdisk(xbb->vn, &error)) {
2734 		error = xbb_open_dev(xbb);
2735 	} else if (xbb->vn->v_type == VREG) {
2736 		error = xbb_open_file(xbb);
2737 	} else {
2738 		error = EINVAL;
2739 		xenbus_dev_fatal(xbb->dev, error, "%s is not a disk "
2740 				 "or file", xbb->dev_name);
2741 	}
2742 	VOP_UNLOCK(xbb->vn, 0);
2743 
2744 	if (error != 0) {
2745 		xbb_close_backend(xbb);
2746 		return (error);
2747 	}
2748 
2749 	xbb->sector_size_shift = fls(xbb->sector_size) - 1;
2750 	xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift;
2751 
2752 	DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n",
2753 		(xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file",
2754 		xbb->dev_name, xbb->sector_size, xbb->media_size);
2755 
2756 	return (0);
2757 }
2758 
2759 /*------------------------ Inter-Domain Communication ------------------------*/
2760 /**
2761  * Free dynamically allocated KVA or pseudo-physical address allocations.
2762  *
2763  * \param xbb  Per-instance xbb configuration structure.
2764  */
2765 static void
2766 xbb_free_communication_mem(struct xbb_softc *xbb)
2767 {
2768 	if (xbb->kva != 0) {
2769 		if (xbb->pseudo_phys_res != NULL) {
2770 			xenmem_free(xbb->dev, xbb->pseudo_phys_res_id,
2771 			    xbb->pseudo_phys_res);
2772 			xbb->pseudo_phys_res = NULL;
2773 		}
2774 	}
2775 	xbb->kva = 0;
2776 	xbb->gnt_base_addr = 0;
2777 	if (xbb->kva_free != NULL) {
2778 		free(xbb->kva_free, M_XENBLOCKBACK);
2779 		xbb->kva_free = NULL;
2780 	}
2781 }
2782 
2783 /**
2784  * Cleanup all inter-domain communication mechanisms.
2785  *
2786  * \param xbb  Per-instance xbb configuration structure.
2787  */
2788 static int
2789 xbb_disconnect(struct xbb_softc *xbb)
2790 {
2791 	struct gnttab_unmap_grant_ref  ops[XBB_MAX_RING_PAGES];
2792 	struct gnttab_unmap_grant_ref *op;
2793 	u_int			       ring_idx;
2794 	int			       error;
2795 
2796 	DPRINTF("\n");
2797 
2798 	if ((xbb->flags & XBBF_RING_CONNECTED) == 0)
2799 		return (0);
2800 
2801 	xen_intr_unbind(&xbb->xen_intr_handle);
2802 
2803 	mtx_unlock(&xbb->lock);
2804 	taskqueue_drain(xbb->io_taskqueue, &xbb->io_task);
2805 	mtx_lock(&xbb->lock);
2806 
2807 	/*
2808 	 * No new interrupts can generate work, but we must wait
2809 	 * for all currently active requests to drain.
2810 	 */
2811 	if (xbb->active_request_count != 0)
2812 		return (EAGAIN);
2813 
2814 	for (ring_idx = 0, op = ops;
2815 	     ring_idx < xbb->ring_config.ring_pages;
2816 	     ring_idx++, op++) {
2817 
2818 		op->host_addr    = xbb->ring_config.gnt_addr
2819 			         + (ring_idx * PAGE_SIZE);
2820 		op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
2821 		op->handle	 = xbb->ring_config.handle[ring_idx];
2822 	}
2823 
2824 	error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops,
2825 					  xbb->ring_config.ring_pages);
2826 	if (error != 0)
2827 		panic("Grant table op failed (%d)", error);
2828 
2829 	xbb_free_communication_mem(xbb);
2830 
2831 	if (xbb->requests != NULL) {
2832 		free(xbb->requests, M_XENBLOCKBACK);
2833 		xbb->requests = NULL;
2834 	}
2835 
2836 	if (xbb->request_lists != NULL) {
2837 		struct xbb_xen_reqlist *reqlist;
2838 		int i;
2839 
2840 		/* There is one request list for ever allocated request. */
2841 		for (i = 0, reqlist = xbb->request_lists;
2842 		     i < xbb->max_requests; i++, reqlist++){
2843 #ifdef XBB_USE_BOUNCE_BUFFERS
2844 			if (reqlist->bounce != NULL) {
2845 				free(reqlist->bounce, M_XENBLOCKBACK);
2846 				reqlist->bounce = NULL;
2847 			}
2848 #endif
2849 			if (reqlist->gnt_handles != NULL) {
2850 				free(reqlist->gnt_handles, M_XENBLOCKBACK);
2851 				reqlist->gnt_handles = NULL;
2852 			}
2853 		}
2854 		free(xbb->request_lists, M_XENBLOCKBACK);
2855 		xbb->request_lists = NULL;
2856 	}
2857 
2858 	xbb->flags &= ~XBBF_RING_CONNECTED;
2859 	return (0);
2860 }
2861 
2862 /**
2863  * Map shared memory ring into domain local address space, initialize
2864  * ring control structures, and bind an interrupt to the event channel
2865  * used to notify us of ring changes.
2866  *
2867  * \param xbb  Per-instance xbb configuration structure.
2868  */
2869 static int
2870 xbb_connect_ring(struct xbb_softc *xbb)
2871 {
2872 	struct gnttab_map_grant_ref  gnts[XBB_MAX_RING_PAGES];
2873 	struct gnttab_map_grant_ref *gnt;
2874 	u_int			     ring_idx;
2875 	int			     error;
2876 
2877 	if ((xbb->flags & XBBF_RING_CONNECTED) != 0)
2878 		return (0);
2879 
2880 	/*
2881 	 * Kva for our ring is at the tail of the region of kva allocated
2882 	 * by xbb_alloc_communication_mem().
2883 	 */
2884 	xbb->ring_config.va = xbb->kva
2885 			    + (xbb->kva_size
2886 			     - (xbb->ring_config.ring_pages * PAGE_SIZE));
2887 	xbb->ring_config.gnt_addr = xbb->gnt_base_addr
2888 				  + (xbb->kva_size
2889 				   - (xbb->ring_config.ring_pages * PAGE_SIZE));
2890 
2891 	for (ring_idx = 0, gnt = gnts;
2892 	     ring_idx < xbb->ring_config.ring_pages;
2893 	     ring_idx++, gnt++) {
2894 
2895 		gnt->host_addr = xbb->ring_config.gnt_addr
2896 			       + (ring_idx * PAGE_SIZE);
2897 		gnt->flags     = GNTMAP_host_map;
2898 		gnt->ref       = xbb->ring_config.ring_ref[ring_idx];
2899 		gnt->dom       = xbb->otherend_id;
2900 	}
2901 
2902 	error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts,
2903 					  xbb->ring_config.ring_pages);
2904 	if (error)
2905 		panic("blkback: Ring page grant table op failed (%d)", error);
2906 
2907 	for (ring_idx = 0, gnt = gnts;
2908 	     ring_idx < xbb->ring_config.ring_pages;
2909 	     ring_idx++, gnt++) {
2910 		if (gnt->status != 0) {
2911 			xbb->ring_config.va = 0;
2912 			xenbus_dev_fatal(xbb->dev, EACCES,
2913 					 "Ring shared page mapping failed. "
2914 					 "Status %d.", gnt->status);
2915 			return (EACCES);
2916 		}
2917 		xbb->ring_config.handle[ring_idx]   = gnt->handle;
2918 		xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr;
2919 	}
2920 
2921 	/* Initialize the ring based on ABI. */
2922 	switch (xbb->abi) {
2923 	case BLKIF_PROTOCOL_NATIVE:
2924 	{
2925 		blkif_sring_t *sring;
2926 		sring = (blkif_sring_t *)xbb->ring_config.va;
2927 		BACK_RING_INIT(&xbb->rings.native, sring,
2928 			       xbb->ring_config.ring_pages * PAGE_SIZE);
2929 		break;
2930 	}
2931 	case BLKIF_PROTOCOL_X86_32:
2932 	{
2933 		blkif_x86_32_sring_t *sring_x86_32;
2934 		sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va;
2935 		BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32,
2936 			       xbb->ring_config.ring_pages * PAGE_SIZE);
2937 		break;
2938 	}
2939 	case BLKIF_PROTOCOL_X86_64:
2940 	{
2941 		blkif_x86_64_sring_t *sring_x86_64;
2942 		sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va;
2943 		BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64,
2944 			       xbb->ring_config.ring_pages * PAGE_SIZE);
2945 		break;
2946 	}
2947 	default:
2948 		panic("Unexpected blkif protocol ABI.");
2949 	}
2950 
2951 	xbb->flags |= XBBF_RING_CONNECTED;
2952 
2953 	error = xen_intr_bind_remote_port(xbb->dev,
2954 					  xbb->otherend_id,
2955 					  xbb->ring_config.evtchn,
2956 					  xbb_filter,
2957 					  /*ithread_handler*/NULL,
2958 					  /*arg*/xbb,
2959 					  INTR_TYPE_BIO | INTR_MPSAFE,
2960 					  &xbb->xen_intr_handle);
2961 	if (error) {
2962 		(void)xbb_disconnect(xbb);
2963 		xenbus_dev_fatal(xbb->dev, error, "binding event channel");
2964 		return (error);
2965 	}
2966 
2967 	DPRINTF("rings connected!\n");
2968 
2969 	return 0;
2970 }
2971 
2972 /* Needed to make bit_alloc() macro work */
2973 #define	calloc(count, size) malloc((count)*(size), M_XENBLOCKBACK,	\
2974 				   M_NOWAIT|M_ZERO);
2975 
2976 /**
2977  * Size KVA and pseudo-physical address allocations based on negotiated
2978  * values for the size and number of I/O requests, and the size of our
2979  * communication ring.
2980  *
2981  * \param xbb  Per-instance xbb configuration structure.
2982  *
2983  * These address spaces are used to dynamically map pages in the
2984  * front-end's domain into our own.
2985  */
2986 static int
2987 xbb_alloc_communication_mem(struct xbb_softc *xbb)
2988 {
2989 	xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
2990 	xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
2991 	xbb->kva_size = xbb->reqlist_kva_size +
2992 			(xbb->ring_config.ring_pages * PAGE_SIZE);
2993 
2994 	xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages);
2995 	if (xbb->kva_free == NULL)
2996 		return (ENOMEM);
2997 
2998 	DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n",
2999 		device_get_nameunit(xbb->dev), xbb->kva_size,
3000 		xbb->reqlist_kva_size);
3001 	/*
3002 	 * Reserve a range of pseudo physical memory that we can map
3003 	 * into kva.  These pages will only be backed by machine
3004 	 * pages ("real memory") during the lifetime of front-end requests
3005 	 * via grant table operations.
3006 	 */
3007 	xbb->pseudo_phys_res_id = 0;
3008 	xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id,
3009 	    xbb->kva_size);
3010 	if (xbb->pseudo_phys_res == NULL) {
3011 		xbb->kva = 0;
3012 		return (ENOMEM);
3013 	}
3014 	xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
3015 	xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
3016 
3017 	DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n",
3018 		device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
3019 		(uintmax_t)xbb->gnt_base_addr);
3020 	return (0);
3021 }
3022 
3023 /**
3024  * Collect front-end information from the XenStore.
3025  *
3026  * \param xbb  Per-instance xbb configuration structure.
3027  */
3028 static int
3029 xbb_collect_frontend_info(struct xbb_softc *xbb)
3030 {
3031 	char	    protocol_abi[64];
3032 	const char *otherend_path;
3033 	int	    error;
3034 	u_int	    ring_idx;
3035 	u_int	    ring_page_order;
3036 	size_t	    ring_size;
3037 
3038 	otherend_path = xenbus_get_otherend_path(xbb->dev);
3039 
3040 	/*
3041 	 * Protocol defaults valid even if all negotiation fails.
3042 	 */
3043 	xbb->ring_config.ring_pages = 1;
3044 	xbb->max_request_segments   = BLKIF_MAX_SEGMENTS_PER_REQUEST;
3045 	xbb->max_request_size	    = xbb->max_request_segments * PAGE_SIZE;
3046 
3047 	/*
3048 	 * Mandatory data (used in all versions of the protocol) first.
3049 	 */
3050 	error = xs_scanf(XST_NIL, otherend_path,
3051 			 "event-channel", NULL, "%" PRIu32,
3052 			 &xbb->ring_config.evtchn);
3053 	if (error != 0) {
3054 		xenbus_dev_fatal(xbb->dev, error,
3055 				 "Unable to retrieve event-channel information "
3056 				 "from frontend %s.  Unable to connect.",
3057 				 xenbus_get_otherend_path(xbb->dev));
3058 		return (error);
3059 	}
3060 
3061 	/*
3062 	 * These fields are initialized to legacy protocol defaults
3063 	 * so we only need to fail if reading the updated value succeeds
3064 	 * and the new value is outside of its allowed range.
3065 	 *
3066 	 * \note xs_gather() returns on the first encountered error, so
3067 	 *       we must use independant calls in order to guarantee
3068 	 *       we don't miss information in a sparsly populated front-end
3069 	 *       tree.
3070 	 *
3071 	 * \note xs_scanf() does not update variables for unmatched
3072 	 *       fields.
3073 	 */
3074 	ring_page_order = 0;
3075 	xbb->max_requests = 32;
3076 
3077 	(void)xs_scanf(XST_NIL, otherend_path,
3078 		       "ring-page-order", NULL, "%u",
3079 		       &ring_page_order);
3080 	xbb->ring_config.ring_pages = 1 << ring_page_order;
3081 	ring_size = PAGE_SIZE * xbb->ring_config.ring_pages;
3082 	xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size);
3083 
3084 	if (xbb->ring_config.ring_pages	> XBB_MAX_RING_PAGES) {
3085 		xenbus_dev_fatal(xbb->dev, EINVAL,
3086 				 "Front-end specified ring-pages of %u "
3087 				 "exceeds backend limit of %u.  "
3088 				 "Unable to connect.",
3089 				 xbb->ring_config.ring_pages,
3090 				 XBB_MAX_RING_PAGES);
3091 		return (EINVAL);
3092 	}
3093 
3094 	if (xbb->ring_config.ring_pages	== 1) {
3095 		error = xs_gather(XST_NIL, otherend_path,
3096 				  "ring-ref", "%" PRIu32,
3097 				  &xbb->ring_config.ring_ref[0],
3098 				  NULL);
3099 		if (error != 0) {
3100 			xenbus_dev_fatal(xbb->dev, error,
3101 					 "Unable to retrieve ring information "
3102 					 "from frontend %s.  Unable to "
3103 					 "connect.",
3104 					 xenbus_get_otherend_path(xbb->dev));
3105 			return (error);
3106 		}
3107 	} else {
3108 		/* Multi-page ring format. */
3109 		for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages;
3110 		     ring_idx++) {
3111 			char ring_ref_name[]= "ring_refXX";
3112 
3113 			snprintf(ring_ref_name, sizeof(ring_ref_name),
3114 				 "ring-ref%u", ring_idx);
3115 			error = xs_scanf(XST_NIL, otherend_path,
3116 					 ring_ref_name, NULL, "%" PRIu32,
3117 					 &xbb->ring_config.ring_ref[ring_idx]);
3118 			if (error != 0) {
3119 				xenbus_dev_fatal(xbb->dev, error,
3120 						 "Failed to retriev grant "
3121 						 "reference for page %u of "
3122 						 "shared ring.  Unable "
3123 						 "to connect.", ring_idx);
3124 				return (error);
3125 			}
3126 		}
3127 	}
3128 
3129 	error = xs_gather(XST_NIL, otherend_path,
3130 			  "protocol", "%63s", protocol_abi,
3131 			  NULL);
3132 	if (error != 0
3133 	 || !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) {
3134 		/*
3135 		 * Assume native if the frontend has not
3136 		 * published ABI data or it has published and
3137 		 * matches our own ABI.
3138 		 */
3139 		xbb->abi = BLKIF_PROTOCOL_NATIVE;
3140 	} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) {
3141 
3142 		xbb->abi = BLKIF_PROTOCOL_X86_32;
3143 	} else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) {
3144 
3145 		xbb->abi = BLKIF_PROTOCOL_X86_64;
3146 	} else {
3147 
3148 		xenbus_dev_fatal(xbb->dev, EINVAL,
3149 				 "Unknown protocol ABI (%s) published by "
3150 				 "frontend.  Unable to connect.", protocol_abi);
3151 		return (EINVAL);
3152 	}
3153 	return (0);
3154 }
3155 
3156 /**
3157  * Allocate per-request data structures given request size and number
3158  * information negotiated with the front-end.
3159  *
3160  * \param xbb  Per-instance xbb configuration structure.
3161  */
3162 static int
3163 xbb_alloc_requests(struct xbb_softc *xbb)
3164 {
3165 	struct xbb_xen_req *req;
3166 	struct xbb_xen_req *last_req;
3167 
3168 	/*
3169 	 * Allocate request book keeping datastructures.
3170 	 */
3171 	xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests),
3172 			       M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3173 	if (xbb->requests == NULL) {
3174 		xenbus_dev_fatal(xbb->dev, ENOMEM,
3175 				  "Unable to allocate request structures");
3176 		return (ENOMEM);
3177 	}
3178 
3179 	req      = xbb->requests;
3180 	last_req = &xbb->requests[xbb->max_requests - 1];
3181 	STAILQ_INIT(&xbb->request_free_stailq);
3182 	while (req <= last_req) {
3183 		STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
3184 		req++;
3185 	}
3186 	return (0);
3187 }
3188 
3189 static int
3190 xbb_alloc_request_lists(struct xbb_softc *xbb)
3191 {
3192 	struct xbb_xen_reqlist *reqlist;
3193 	int			i;
3194 
3195 	/*
3196 	 * If no requests can be merged, we need 1 request list per
3197 	 * in flight request.
3198 	 */
3199 	xbb->request_lists = malloc(xbb->max_requests *
3200 		sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3201 	if (xbb->request_lists == NULL) {
3202 		xenbus_dev_fatal(xbb->dev, ENOMEM,
3203 				  "Unable to allocate request list structures");
3204 		return (ENOMEM);
3205 	}
3206 
3207 	STAILQ_INIT(&xbb->reqlist_free_stailq);
3208 	STAILQ_INIT(&xbb->reqlist_pending_stailq);
3209 	for (i = 0; i < xbb->max_requests; i++) {
3210 		int seg;
3211 
3212 		reqlist      = &xbb->request_lists[i];
3213 
3214 		reqlist->xbb = xbb;
3215 
3216 #ifdef XBB_USE_BOUNCE_BUFFERS
3217 		reqlist->bounce = malloc(xbb->max_reqlist_size,
3218 					 M_XENBLOCKBACK, M_NOWAIT);
3219 		if (reqlist->bounce == NULL) {
3220 			xenbus_dev_fatal(xbb->dev, ENOMEM,
3221 					 "Unable to allocate request "
3222 					 "bounce buffers");
3223 			return (ENOMEM);
3224 		}
3225 #endif /* XBB_USE_BOUNCE_BUFFERS */
3226 
3227 		reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3228 					      sizeof(*reqlist->gnt_handles),
3229 					      M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3230 		if (reqlist->gnt_handles == NULL) {
3231 			xenbus_dev_fatal(xbb->dev, ENOMEM,
3232 					  "Unable to allocate request "
3233 					  "grant references");
3234 			return (ENOMEM);
3235 		}
3236 
3237 		for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
3238 			reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
3239 
3240 		STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3241 	}
3242 	return (0);
3243 }
3244 
3245 /**
3246  * Supply information about the physical device to the frontend
3247  * via XenBus.
3248  *
3249  * \param xbb  Per-instance xbb configuration structure.
3250  */
3251 static int
3252 xbb_publish_backend_info(struct xbb_softc *xbb)
3253 {
3254 	struct xs_transaction xst;
3255 	const char	     *our_path;
3256 	const char	     *leaf;
3257 	int		      error;
3258 
3259 	our_path = xenbus_get_node(xbb->dev);
3260 	while (1) {
3261 		error = xs_transaction_start(&xst);
3262 		if (error != 0) {
3263 			xenbus_dev_fatal(xbb->dev, error,
3264 					 "Error publishing backend info "
3265 					 "(start transaction)");
3266 			return (error);
3267 		}
3268 
3269 		leaf = "sectors";
3270 		error = xs_printf(xst, our_path, leaf,
3271 				  "%"PRIu64, xbb->media_num_sectors);
3272 		if (error != 0)
3273 			break;
3274 
3275 		/* XXX Support all VBD attributes here. */
3276 		leaf = "info";
3277 		error = xs_printf(xst, our_path, leaf, "%u",
3278 				  xbb->flags & XBBF_READ_ONLY
3279 				? VDISK_READONLY : 0);
3280 		if (error != 0)
3281 			break;
3282 
3283 		leaf = "sector-size";
3284 		error = xs_printf(xst, our_path, leaf, "%u",
3285 				  xbb->sector_size);
3286 		if (error != 0)
3287 			break;
3288 
3289 		error = xs_transaction_end(xst, 0);
3290 		if (error == 0) {
3291 			return (0);
3292 		} else if (error != EAGAIN) {
3293 			xenbus_dev_fatal(xbb->dev, error, "ending transaction");
3294 			return (error);
3295 		}
3296 	}
3297 
3298 	xenbus_dev_fatal(xbb->dev, error, "writing %s/%s",
3299 			our_path, leaf);
3300 	xs_transaction_end(xst, 1);
3301 	return (error);
3302 }
3303 
3304 /**
3305  * Connect to our blkfront peer now that it has completed publishing
3306  * its configuration into the XenStore.
3307  *
3308  * \param xbb  Per-instance xbb configuration structure.
3309  */
3310 static void
3311 xbb_connect(struct xbb_softc *xbb)
3312 {
3313 	int error;
3314 
3315 	if (xenbus_get_state(xbb->dev) == XenbusStateConnected)
3316 		return;
3317 
3318 	if (xbb_collect_frontend_info(xbb) != 0)
3319 		return;
3320 
3321 	xbb->flags &= ~XBBF_SHUTDOWN;
3322 
3323 	/*
3324 	 * We limit the maximum number of reqlist segments to the maximum
3325 	 * number of segments in the ring, or our absolute maximum,
3326 	 * whichever is smaller.
3327 	 */
3328 	xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
3329 		xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
3330 
3331 	/*
3332 	 * The maximum size is simply a function of the number of segments
3333 	 * we can handle.
3334 	 */
3335 	xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
3336 
3337 	/* Allocate resources whose size depends on front-end configuration. */
3338 	error = xbb_alloc_communication_mem(xbb);
3339 	if (error != 0) {
3340 		xenbus_dev_fatal(xbb->dev, error,
3341 				 "Unable to allocate communication memory");
3342 		return;
3343 	}
3344 
3345 	error = xbb_alloc_requests(xbb);
3346 	if (error != 0) {
3347 		/* Specific errors are reported by xbb_alloc_requests(). */
3348 		return;
3349 	}
3350 
3351 	error = xbb_alloc_request_lists(xbb);
3352 	if (error != 0) {
3353 		/* Specific errors are reported by xbb_alloc_request_lists(). */
3354 		return;
3355 	}
3356 
3357 	/*
3358 	 * Connect communication channel.
3359 	 */
3360 	error = xbb_connect_ring(xbb);
3361 	if (error != 0) {
3362 		/* Specific errors are reported by xbb_connect_ring(). */
3363 		return;
3364 	}
3365 
3366 	if (xbb_publish_backend_info(xbb) != 0) {
3367 		/*
3368 		 * If we can't publish our data, we cannot participate
3369 		 * in this connection, and waiting for a front-end state
3370 		 * change will not help the situation.
3371 		 */
3372 		(void)xbb_disconnect(xbb);
3373 		return;
3374 	}
3375 
3376 	/* Ready for I/O. */
3377 	xenbus_set_state(xbb->dev, XenbusStateConnected);
3378 }
3379 
3380 /*-------------------------- Device Teardown Support -------------------------*/
3381 /**
3382  * Perform device shutdown functions.
3383  *
3384  * \param xbb  Per-instance xbb configuration structure.
3385  *
3386  * Mark this instance as shutting down, wait for any active I/O on the
3387  * backend device/file to drain, disconnect from the front-end, and notify
3388  * any waiters (e.g. a thread invoking our detach method) that detach can
3389  * now proceed.
3390  */
3391 static int
3392 xbb_shutdown(struct xbb_softc *xbb)
3393 {
3394 	XenbusState frontState;
3395 	int	    error;
3396 
3397 	DPRINTF("\n");
3398 
3399 	/*
3400 	 * Due to the need to drop our mutex during some
3401 	 * xenbus operations, it is possible for two threads
3402 	 * to attempt to close out shutdown processing at
3403 	 * the same time.  Tell the caller that hits this
3404 	 * race to try back later.
3405 	 */
3406 	if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
3407 		return (EAGAIN);
3408 
3409 	xbb->flags |= XBBF_IN_SHUTDOWN;
3410 	mtx_unlock(&xbb->lock);
3411 
3412 	if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
3413 		xenbus_set_state(xbb->dev, XenbusStateClosing);
3414 
3415 	frontState = xenbus_get_otherend_state(xbb->dev);
3416 	mtx_lock(&xbb->lock);
3417 	xbb->flags &= ~XBBF_IN_SHUTDOWN;
3418 
3419 	/* The front can submit I/O until entering the closed state. */
3420 	if (frontState < XenbusStateClosed)
3421 		return (EAGAIN);
3422 
3423 	DPRINTF("\n");
3424 
3425 	/* Indicate shutdown is in progress. */
3426 	xbb->flags |= XBBF_SHUTDOWN;
3427 
3428 	/* Disconnect from the front-end. */
3429 	error = xbb_disconnect(xbb);
3430 	if (error != 0) {
3431 		/*
3432 		 * Requests still outstanding.  We'll be called again
3433 		 * once they complete.
3434 		 */
3435 		KASSERT(error == EAGAIN,
3436 			("%s: Unexpected xbb_disconnect() failure %d",
3437 			 __func__, error));
3438 
3439 		return (error);
3440 	}
3441 
3442 	DPRINTF("\n");
3443 
3444 	/* Indicate to xbb_detach() that is it safe to proceed. */
3445 	wakeup(xbb);
3446 
3447 	return (0);
3448 }
3449 
3450 /**
3451  * Report an attach time error to the console and Xen, and cleanup
3452  * this instance by forcing immediate detach processing.
3453  *
3454  * \param xbb  Per-instance xbb configuration structure.
3455  * \param err  Errno describing the error.
3456  * \param fmt  Printf style format and arguments
3457  */
3458 static void
3459 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
3460 {
3461 	va_list ap;
3462 	va_list ap_hotplug;
3463 
3464 	va_start(ap, fmt);
3465 	va_copy(ap_hotplug, ap);
3466 	xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev),
3467 		  "hotplug-error", fmt, ap_hotplug);
3468 	va_end(ap_hotplug);
3469 	xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3470 		  "hotplug-status", "error");
3471 
3472 	xenbus_dev_vfatal(xbb->dev, err, fmt, ap);
3473 	va_end(ap);
3474 
3475 	xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3476 		  "online", "0");
3477 	xbb_detach(xbb->dev);
3478 }
3479 
3480 /*---------------------------- NewBus Entrypoints ----------------------------*/
3481 /**
3482  * Inspect a XenBus device and claim it if is of the appropriate type.
3483  *
3484  * \param dev  NewBus device object representing a candidate XenBus device.
3485  *
3486  * \return  0 for success, errno codes for failure.
3487  */
3488 static int
3489 xbb_probe(device_t dev)
3490 {
3491 
3492         if (!strcmp(xenbus_get_type(dev), "vbd")) {
3493                 device_set_desc(dev, "Backend Virtual Block Device");
3494                 device_quiet(dev);
3495                 return (0);
3496         }
3497 
3498         return (ENXIO);
3499 }
3500 
3501 /**
3502  * Setup sysctl variables to control various Block Back parameters.
3503  *
3504  * \param xbb  Xen Block Back softc.
3505  *
3506  */
3507 static void
3508 xbb_setup_sysctl(struct xbb_softc *xbb)
3509 {
3510 	struct sysctl_ctx_list *sysctl_ctx = NULL;
3511 	struct sysctl_oid      *sysctl_tree = NULL;
3512 
3513 	sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
3514 	if (sysctl_ctx == NULL)
3515 		return;
3516 
3517 	sysctl_tree = device_get_sysctl_tree(xbb->dev);
3518 	if (sysctl_tree == NULL)
3519 		return;
3520 
3521 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3522 		       "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
3523 		       "fake the flush command");
3524 
3525 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3526 		       "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
3527 		       "send a real flush for N flush requests");
3528 
3529 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3530 		       "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
3531 		       "Don't coalesce contiguous requests");
3532 
3533 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3534 			 "reqs_received", CTLFLAG_RW, &xbb->reqs_received,
3535 			 "how many I/O requests we have received");
3536 
3537 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3538 			 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
3539 			 "how many I/O requests have been completed");
3540 
3541 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3542 			 "reqs_queued_for_completion", CTLFLAG_RW,
3543 			 &xbb->reqs_queued_for_completion,
3544 			 "how many I/O requests queued but not yet pushed");
3545 
3546 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3547 			 "reqs_completed_with_error", CTLFLAG_RW,
3548 			 &xbb->reqs_completed_with_error,
3549 			 "how many I/O requests completed with error status");
3550 
3551 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3552 			 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
3553 			 "how many I/O dispatches were forced");
3554 
3555 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3556 			 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
3557 			 "how many I/O dispatches were normal");
3558 
3559 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3560 			 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
3561 			 "total number of I/O dispatches");
3562 
3563 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3564 			 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
3565 			 "how many times we have run out of KVA");
3566 
3567 	SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3568 			 "request_shortages", CTLFLAG_RW,
3569 			 &xbb->request_shortages,
3570 			 "how many times we have run out of requests");
3571 
3572 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3573 		        "max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
3574 		        "maximum outstanding requests (negotiated)");
3575 
3576 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3577 		        "max_request_segments", CTLFLAG_RD,
3578 		        &xbb->max_request_segments, 0,
3579 		        "maximum number of pages per requests (negotiated)");
3580 
3581 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3582 		        "max_request_size", CTLFLAG_RD,
3583 		        &xbb->max_request_size, 0,
3584 		        "maximum size in bytes of a request (negotiated)");
3585 
3586 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
3587 		        "ring_pages", CTLFLAG_RD,
3588 		        &xbb->ring_config.ring_pages, 0,
3589 		        "communication channel pages (negotiated)");
3590 }
3591 
3592 /**
3593  * Attach to a XenBus device that has been claimed by our probe routine.
3594  *
3595  * \param dev  NewBus device object representing this Xen Block Back instance.
3596  *
3597  * \return  0 for success, errno codes for failure.
3598  */
3599 static int
3600 xbb_attach(device_t dev)
3601 {
3602 	struct xbb_softc	*xbb;
3603 	int			 error;
3604 	u_int			 max_ring_page_order;
3605 
3606 	DPRINTF("Attaching to %s\n", xenbus_get_node(dev));
3607 
3608 	/*
3609 	 * Basic initialization.
3610 	 * After this block it is safe to call xbb_detach()
3611 	 * to clean up any allocated data for this instance.
3612 	 */
3613 	xbb = device_get_softc(dev);
3614 	xbb->dev = dev;
3615 	xbb->otherend_id = xenbus_get_otherend_id(dev);
3616 	TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
3617 	mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
3618 
3619 	/*
3620 	 * Publish protocol capabilities for consumption by the
3621 	 * front-end.
3622 	 */
3623 	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3624 			  "feature-barrier", "1");
3625 	if (error) {
3626 		xbb_attach_failed(xbb, error, "writing %s/feature-barrier",
3627 				  xenbus_get_node(xbb->dev));
3628 		return (error);
3629 	}
3630 
3631 	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3632 			  "feature-flush-cache", "1");
3633 	if (error) {
3634 		xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache",
3635 				  xenbus_get_node(xbb->dev));
3636 		return (error);
3637 	}
3638 
3639 	max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1;
3640 	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3641 			  "max-ring-page-order", "%u", max_ring_page_order);
3642 	if (error) {
3643 		xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order",
3644 				  xenbus_get_node(xbb->dev));
3645 		return (error);
3646 	}
3647 
3648 	/* Collect physical device information. */
3649 	error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev),
3650 			  "device-type", NULL, &xbb->dev_type,
3651 			  NULL);
3652 	if (error != 0)
3653 		xbb->dev_type = NULL;
3654 
3655 	error = xs_gather(XST_NIL, xenbus_get_node(dev),
3656                           "mode", NULL, &xbb->dev_mode,
3657 			  "params", NULL, &xbb->dev_name,
3658                           NULL);
3659 	if (error != 0) {
3660 		xbb_attach_failed(xbb, error, "reading backend fields at %s",
3661 				  xenbus_get_node(dev));
3662                 return (ENXIO);
3663         }
3664 
3665 	/* Parse fopen style mode flags. */
3666 	if (strchr(xbb->dev_mode, 'w') == NULL)
3667 		xbb->flags |= XBBF_READ_ONLY;
3668 
3669 	/*
3670 	 * Verify the physical device is present and can support
3671 	 * the desired I/O mode.
3672 	 */
3673 	DROP_GIANT();
3674 	error = xbb_open_backend(xbb);
3675 	PICKUP_GIANT();
3676 	if (error != 0) {
3677 		xbb_attach_failed(xbb, error, "Unable to open %s",
3678 				  xbb->dev_name);
3679 		return (ENXIO);
3680 	}
3681 
3682 	/* Use devstat(9) for recording statistics. */
3683 	xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev),
3684 					   xbb->sector_size,
3685 					   DEVSTAT_ALL_SUPPORTED,
3686 					   DEVSTAT_TYPE_DIRECT
3687 					 | DEVSTAT_TYPE_IF_OTHER,
3688 					   DEVSTAT_PRIORITY_OTHER);
3689 
3690 	xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
3691 					      xbb->sector_size,
3692 					      DEVSTAT_ALL_SUPPORTED,
3693 					      DEVSTAT_TYPE_DIRECT
3694 					    | DEVSTAT_TYPE_IF_OTHER,
3695 					      DEVSTAT_PRIORITY_OTHER);
3696 	/*
3697 	 * Setup sysctl variables.
3698 	 */
3699 	xbb_setup_sysctl(xbb);
3700 
3701 	/*
3702 	 * Create a taskqueue for doing work that must occur from a
3703 	 * thread context.
3704 	 */
3705 	xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev),
3706 						  M_NOWAIT,
3707 						  taskqueue_thread_enqueue,
3708 						  /*contxt*/&xbb->io_taskqueue);
3709 	if (xbb->io_taskqueue == NULL) {
3710 		xbb_attach_failed(xbb, error, "Unable to create taskqueue");
3711 		return (ENOMEM);
3712 	}
3713 
3714 	taskqueue_start_threads(&xbb->io_taskqueue,
3715 				/*num threads*/1,
3716 				/*priority*/PWAIT,
3717 				/*thread name*/
3718 				"%s taskq", device_get_nameunit(dev));
3719 
3720 	/* Update hot-plug status to satisfy xend. */
3721 	error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3722 			  "hotplug-status", "connected");
3723 	if (error) {
3724 		xbb_attach_failed(xbb, error, "writing %s/hotplug-status",
3725 				  xenbus_get_node(xbb->dev));
3726 		return (error);
3727 	}
3728 
3729 	/* Tell the front end that we are ready to connect. */
3730 	xenbus_set_state(dev, XenbusStateInitWait);
3731 
3732 	return (0);
3733 }
3734 
3735 /**
3736  * Detach from a block back device instance.
3737  *
3738  * \param dev  NewBus device object representing this Xen Block Back instance.
3739  *
3740  * \return  0 for success, errno codes for failure.
3741  *
3742  * \note A block back device may be detached at any time in its life-cycle,
3743  *       including part way through the attach process.  For this reason,
3744  *       initialization order and the intialization state checks in this
3745  *       routine must be carefully coupled so that attach time failures
3746  *       are gracefully handled.
3747  */
3748 static int
3749 xbb_detach(device_t dev)
3750 {
3751         struct xbb_softc *xbb;
3752 
3753 	DPRINTF("\n");
3754 
3755         xbb = device_get_softc(dev);
3756 	mtx_lock(&xbb->lock);
3757 	while (xbb_shutdown(xbb) == EAGAIN) {
3758 		msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0,
3759 		       "xbb_shutdown", 0);
3760 	}
3761 	mtx_unlock(&xbb->lock);
3762 
3763 	DPRINTF("\n");
3764 
3765 	if (xbb->io_taskqueue != NULL)
3766 		taskqueue_free(xbb->io_taskqueue);
3767 
3768 	if (xbb->xbb_stats != NULL)
3769 		devstat_remove_entry(xbb->xbb_stats);
3770 
3771 	if (xbb->xbb_stats_in != NULL)
3772 		devstat_remove_entry(xbb->xbb_stats_in);
3773 
3774 	xbb_close_backend(xbb);
3775 
3776 	if (xbb->dev_mode != NULL) {
3777 		free(xbb->dev_mode, M_XENSTORE);
3778 		xbb->dev_mode = NULL;
3779 	}
3780 
3781 	if (xbb->dev_type != NULL) {
3782 		free(xbb->dev_type, M_XENSTORE);
3783 		xbb->dev_type = NULL;
3784 	}
3785 
3786 	if (xbb->dev_name != NULL) {
3787 		free(xbb->dev_name, M_XENSTORE);
3788 		xbb->dev_name = NULL;
3789 	}
3790 
3791 	mtx_destroy(&xbb->lock);
3792         return (0);
3793 }
3794 
3795 /**
3796  * Prepare this block back device for suspension of this VM.
3797  *
3798  * \param dev  NewBus device object representing this Xen Block Back instance.
3799  *
3800  * \return  0 for success, errno codes for failure.
3801  */
3802 static int
3803 xbb_suspend(device_t dev)
3804 {
3805 #ifdef NOT_YET
3806         struct xbb_softc *sc = device_get_softc(dev);
3807 
3808         /* Prevent new requests being issued until we fix things up. */
3809         mtx_lock(&sc->xb_io_lock);
3810         sc->connected = BLKIF_STATE_SUSPENDED;
3811         mtx_unlock(&sc->xb_io_lock);
3812 #endif
3813 
3814         return (0);
3815 }
3816 
3817 /**
3818  * Perform any processing required to recover from a suspended state.
3819  *
3820  * \param dev  NewBus device object representing this Xen Block Back instance.
3821  *
3822  * \return  0 for success, errno codes for failure.
3823  */
3824 static int
3825 xbb_resume(device_t dev)
3826 {
3827 	return (0);
3828 }
3829 
3830 /**
3831  * Handle state changes expressed via the XenStore by our front-end peer.
3832  *
3833  * \param dev             NewBus device object representing this Xen
3834  *                        Block Back instance.
3835  * \param frontend_state  The new state of the front-end.
3836  *
3837  * \return  0 for success, errno codes for failure.
3838  */
3839 static void
3840 xbb_frontend_changed(device_t dev, XenbusState frontend_state)
3841 {
3842 	struct xbb_softc *xbb = device_get_softc(dev);
3843 
3844 	DPRINTF("frontend_state=%s, xbb_state=%s\n",
3845 	        xenbus_strstate(frontend_state),
3846 		xenbus_strstate(xenbus_get_state(xbb->dev)));
3847 
3848 	switch (frontend_state) {
3849 	case XenbusStateInitialising:
3850 		break;
3851 	case XenbusStateInitialised:
3852 	case XenbusStateConnected:
3853 		xbb_connect(xbb);
3854 		break;
3855 	case XenbusStateClosing:
3856 	case XenbusStateClosed:
3857 		mtx_lock(&xbb->lock);
3858 		xbb_shutdown(xbb);
3859 		mtx_unlock(&xbb->lock);
3860 		if (frontend_state == XenbusStateClosed)
3861 			xenbus_set_state(xbb->dev, XenbusStateClosed);
3862 		break;
3863 	default:
3864 		xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",
3865 				 frontend_state);
3866 		break;
3867 	}
3868 }
3869 
3870 /*---------------------------- NewBus Registration ---------------------------*/
3871 static device_method_t xbb_methods[] = {
3872 	/* Device interface */
3873 	DEVMETHOD(device_probe,		xbb_probe),
3874 	DEVMETHOD(device_attach,	xbb_attach),
3875 	DEVMETHOD(device_detach,	xbb_detach),
3876 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
3877 	DEVMETHOD(device_suspend,	xbb_suspend),
3878 	DEVMETHOD(device_resume,	xbb_resume),
3879 
3880 	/* Xenbus interface */
3881 	DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed),
3882 
3883 	{ 0, 0 }
3884 };
3885 
3886 static driver_t xbb_driver = {
3887         "xbbd",
3888         xbb_methods,
3889         sizeof(struct xbb_softc),
3890 };
3891 devclass_t xbb_devclass;
3892 
3893 DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0);
3894