189e0f4d2SKip Macy /* 29999d2cbSKip Macy * XenBSD block device driver 39999d2cbSKip Macy * 433eebb6aSJustin T. Gibbs * Copyright (c) 2010-2013 Spectra Logic Corporation 5e4808c4bSKip Macy * Copyright (c) 2009 Scott Long, Yahoo! 69999d2cbSKip Macy * Copyright (c) 2009 Frank Suchomel, Citrix 79999d2cbSKip Macy * Copyright (c) 2009 Doug F. Rabson, Citrix 89999d2cbSKip Macy * Copyright (c) 2005 Kip Macy 99999d2cbSKip Macy * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 109999d2cbSKip Macy * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 119999d2cbSKip Macy * 129999d2cbSKip Macy * 139999d2cbSKip Macy * Permission is hereby granted, free of charge, to any person obtaining a copy 149999d2cbSKip Macy * of this software and associated documentation files (the "Software"), to 159999d2cbSKip Macy * deal in the Software without restriction, including without limitation the 169999d2cbSKip Macy * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 179999d2cbSKip Macy * sell copies of the Software, and to permit persons to whom the Software is 189999d2cbSKip Macy * furnished to do so, subject to the following conditions: 199999d2cbSKip Macy * 209999d2cbSKip Macy * The above copyright notice and this permission notice shall be included in 219999d2cbSKip Macy * all copies or substantial portions of the Software. 2289e0f4d2SKip Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 2389e0f4d2SKip Macy * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2489e0f4d2SKip Macy * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 2589e0f4d2SKip Macy * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2689e0f4d2SKip Macy * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27e4808c4bSKip Macy * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28e4808c4bSKip Macy * DEALINGS IN THE SOFTWARE. 2989e0f4d2SKip Macy * 3089e0f4d2SKip Macy * $FreeBSD$ 3189e0f4d2SKip Macy */ 3289e0f4d2SKip Macy 3333eebb6aSJustin T. Gibbs #ifndef __XEN_BLKFRONT_BLOCK_H__ 3433eebb6aSJustin T. Gibbs #define __XEN_BLKFRONT_BLOCK_H__ 35ff662b5cSJustin T. Gibbs #include <xen/blkif.h> 36ff662b5cSJustin T. Gibbs 37ff662b5cSJustin T. Gibbs /** 38443cc4d4SJustin T. Gibbs * Given a number of blkif segments, compute the maximum I/O size supported. 39443cc4d4SJustin T. Gibbs * 40443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 41443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 42443cc4d4SJustin T. Gibbs * 43443cc4d4SJustin T. Gibbs * \note We reserve a segement from the maximum supported by the transport to 44443cc4d4SJustin T. Gibbs * guarantee we can handle an unaligned transfer without the need to 45443cc4d4SJustin T. Gibbs * use a bounce buffer. 46443cc4d4SJustin T. Gibbs */ 4733eebb6aSJustin T. Gibbs #define XBD_SEGS_TO_SIZE(segs) \ 48443cc4d4SJustin T. Gibbs (((segs) - 1) * PAGE_SIZE) 49443cc4d4SJustin T. Gibbs 50443cc4d4SJustin T. Gibbs /** 51443cc4d4SJustin T. Gibbs * Compute the maximum number of blkif segments requried to represent 52443cc4d4SJustin T. Gibbs * an I/O of the given size. 53443cc4d4SJustin T. Gibbs * 54443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 55443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 56443cc4d4SJustin T. Gibbs * 57443cc4d4SJustin T. Gibbs * \note We reserve a segement to guarantee we can handle an unaligned 58443cc4d4SJustin T. Gibbs * transfer without the need to use a bounce buffer. 59443cc4d4SJustin T. Gibbs */ 6033eebb6aSJustin T. Gibbs #define XBD_SIZE_TO_SEGS(size) \ 61443cc4d4SJustin T. Gibbs ((size / PAGE_SIZE) + 1) 62443cc4d4SJustin T. Gibbs 63443cc4d4SJustin T. Gibbs /** 64112cacaeSRoger Pau Monné * The maximum number of shared memory ring pages we will allow in a 65112cacaeSRoger Pau Monné * negotiated block-front/back communication channel. Allow enough 66112cacaeSRoger Pau Monné * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. 67112cacaeSRoger Pau Monné */ 68112cacaeSRoger Pau Monné #define XBD_MAX_RING_PAGES 32 69112cacaeSRoger Pau Monné 70112cacaeSRoger Pau Monné /** 71*ad935ed2SColin Percival * The maximum number of outstanding requests we will allow in a negotiated 72*ad935ed2SColin Percival * block-front/back communication channel. 73ff662b5cSJustin T. Gibbs */ 74112cacaeSRoger Pau Monné #define XBD_MAX_REQUESTS \ 75112cacaeSRoger Pau Monné __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES) 76ff662b5cSJustin T. Gibbs 77ff662b5cSJustin T. Gibbs /** 78ff662b5cSJustin T. Gibbs * The maximum mapped region size per request we will allow in a negotiated 79ff662b5cSJustin T. Gibbs * block-front/back communication channel. 80ff662b5cSJustin T. Gibbs */ 8133eebb6aSJustin T. Gibbs #define XBD_MAX_REQUEST_SIZE \ 8233eebb6aSJustin T. Gibbs MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST)) 83ff662b5cSJustin T. Gibbs 84e2c1fe90SJustin T. Gibbs typedef enum { 85e2c1fe90SJustin T. Gibbs XBDCF_Q_MASK = 0xFF, 869985113bSJustin T. Gibbs /* This command has contributed to xbd_qfrozen_cnt. */ 87e2c1fe90SJustin T. Gibbs XBDCF_FROZEN = 1<<8, 889985113bSJustin T. Gibbs /* Freeze the command queue on dispatch (i.e. single step command). */ 899985113bSJustin T. Gibbs XBDCF_Q_FREEZE = 1<<9, 909985113bSJustin T. Gibbs /* Bus DMA returned EINPROGRESS for this command. */ 91127a9483SJustin T. Gibbs XBDCF_ASYNC_MAPPING = 1<<10, 92e2c1fe90SJustin T. Gibbs XBDCF_INITIALIZER = XBDCF_Q_MASK 93e2c1fe90SJustin T. Gibbs } xbdc_flag_t; 94e2c1fe90SJustin T. Gibbs 9533eebb6aSJustin T. Gibbs struct xbd_command; 9633eebb6aSJustin T. Gibbs typedef void xbd_cbcf_t(struct xbd_command *); 9789e0f4d2SKip Macy 9833eebb6aSJustin T. Gibbs struct xbd_command { 9933eebb6aSJustin T. Gibbs TAILQ_ENTRY(xbd_command) cm_link; 10033eebb6aSJustin T. Gibbs struct xbd_softc *cm_sc; 101e2c1fe90SJustin T. Gibbs xbdc_flag_t cm_flags; 10233eebb6aSJustin T. Gibbs bus_dmamap_t cm_map; 10333eebb6aSJustin T. Gibbs uint64_t cm_id; 10433eebb6aSJustin T. Gibbs grant_ref_t *cm_sg_refs; 10533eebb6aSJustin T. Gibbs struct bio *cm_bp; 10633eebb6aSJustin T. Gibbs grant_ref_t cm_gref_head; 10733eebb6aSJustin T. Gibbs void *cm_data; 10833eebb6aSJustin T. Gibbs size_t cm_datalen; 10933eebb6aSJustin T. Gibbs u_int cm_nseg; 11033eebb6aSJustin T. Gibbs int cm_operation; 11133eebb6aSJustin T. Gibbs blkif_sector_t cm_sector_number; 11233eebb6aSJustin T. Gibbs int cm_status; 11333eebb6aSJustin T. Gibbs xbd_cbcf_t *cm_complete; 11489e0f4d2SKip Macy }; 11589e0f4d2SKip Macy 116e2c1fe90SJustin T. Gibbs typedef enum { 117e2c1fe90SJustin T. Gibbs XBD_Q_FREE, 118e2c1fe90SJustin T. Gibbs XBD_Q_READY, 119e2c1fe90SJustin T. Gibbs XBD_Q_BUSY, 120e2c1fe90SJustin T. Gibbs XBD_Q_COMPLETE, 121e2c1fe90SJustin T. Gibbs XBD_Q_BIO, 122e2c1fe90SJustin T. Gibbs XBD_Q_COUNT, 123e2c1fe90SJustin T. Gibbs XBD_Q_NONE = XBDCF_Q_MASK 124e2c1fe90SJustin T. Gibbs } xbd_q_index_t; 12589e0f4d2SKip Macy 126e2c1fe90SJustin T. Gibbs typedef struct xbd_cm_q { 127e2c1fe90SJustin T. Gibbs TAILQ_HEAD(, xbd_command) q_tailq; 128e4808c4bSKip Macy uint32_t q_length; 129e4808c4bSKip Macy uint32_t q_max; 130e2c1fe90SJustin T. Gibbs } xbd_cm_q_t; 131e4808c4bSKip Macy 132e2c1fe90SJustin T. Gibbs typedef enum { 133e2c1fe90SJustin T. Gibbs XBD_STATE_DISCONNECTED, 134e2c1fe90SJustin T. Gibbs XBD_STATE_CONNECTED, 135e2c1fe90SJustin T. Gibbs XBD_STATE_SUSPENDED 136e2c1fe90SJustin T. Gibbs } xbd_state_t; 137e2c1fe90SJustin T. Gibbs 138e2c1fe90SJustin T. Gibbs typedef enum { 139127a9483SJustin T. Gibbs XBDF_NONE = 0, 140e2c1fe90SJustin T. Gibbs XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */ 141e2c1fe90SJustin T. Gibbs XBDF_BARRIER = 1 << 1, /* backend supports barriers */ 1429985113bSJustin T. Gibbs XBDF_FLUSH = 1 << 2, /* backend supports flush */ 1439985113bSJustin T. Gibbs XBDF_READY = 1 << 3, /* Is ready */ 1449985113bSJustin T. Gibbs XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */ 1459985113bSJustin T. Gibbs XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */ 1469985113bSJustin T. Gibbs XBDF_WAIT_IDLE = 1 << 6 /* 1479985113bSJustin T. Gibbs * No new work until oustanding work 1489985113bSJustin T. Gibbs * completes. 1499985113bSJustin T. Gibbs */ 150e2c1fe90SJustin T. Gibbs } xbd_flag_t; 151e4808c4bSKip Macy 152e4808c4bSKip Macy /* 153e4808c4bSKip Macy * We have one of these per vbd, whether ide, scsi or 'other'. 154e4808c4bSKip Macy */ 15533eebb6aSJustin T. Gibbs struct xbd_softc { 15633eebb6aSJustin T. Gibbs device_t xbd_dev; 15733eebb6aSJustin T. Gibbs struct disk *xbd_disk; /* disk params */ 15833eebb6aSJustin T. Gibbs struct bio_queue_head xbd_bioq; /* sort queue */ 15933eebb6aSJustin T. Gibbs int xbd_unit; 160e2c1fe90SJustin T. Gibbs xbd_flag_t xbd_flags; 161127a9483SJustin T. Gibbs int xbd_qfrozen_cnt; 16233eebb6aSJustin T. Gibbs int xbd_vdevice; 163e2c1fe90SJustin T. Gibbs xbd_state_t xbd_state; 16433eebb6aSJustin T. Gibbs u_int xbd_ring_pages; 16533eebb6aSJustin T. Gibbs uint32_t xbd_max_requests; 16633eebb6aSJustin T. Gibbs uint32_t xbd_max_request_segments; 16733eebb6aSJustin T. Gibbs uint32_t xbd_max_request_size; 16833eebb6aSJustin T. Gibbs grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]; 16933eebb6aSJustin T. Gibbs blkif_front_ring_t xbd_ring; 17076acc41fSJustin T. Gibbs xen_intr_handle_t xen_intr_handle; 17133eebb6aSJustin T. Gibbs struct gnttab_free_callback xbd_callback; 172e2c1fe90SJustin T. Gibbs xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]; 17333eebb6aSJustin T. Gibbs bus_dma_tag_t xbd_io_dmat; 174e4808c4bSKip Macy 17589e0f4d2SKip Macy /** 17689e0f4d2SKip Macy * The number of people holding this device open. We won't allow a 17789e0f4d2SKip Macy * hot-unplug unless this is 0. 17889e0f4d2SKip Macy */ 17933eebb6aSJustin T. Gibbs int xbd_users; 18033eebb6aSJustin T. Gibbs struct mtx xbd_io_lock; 181ff662b5cSJustin T. Gibbs 18233eebb6aSJustin T. Gibbs struct xbd_command *xbd_shadow; 18389e0f4d2SKip Macy }; 184e4808c4bSKip Macy 18533eebb6aSJustin T. Gibbs int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, 186ff662b5cSJustin T. Gibbs uint16_t vdisk_info, unsigned long sector_size); 187e4808c4bSKip Macy 188e2c1fe90SJustin T. Gibbs static inline void 189e2c1fe90SJustin T. Gibbs xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index) 190e2c1fe90SJustin T. Gibbs { 191e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 192e4808c4bSKip Macy 193e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 194e2c1fe90SJustin T. Gibbs cmq->q_length++; 195e2c1fe90SJustin T. Gibbs if (cmq->q_length > cmq->q_max) 196e2c1fe90SJustin T. Gibbs cmq->q_max = cmq->q_length; 197e2c1fe90SJustin T. Gibbs } 198e4808c4bSKip Macy 199e2c1fe90SJustin T. Gibbs static inline void 200e2c1fe90SJustin T. Gibbs xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index) 201e2c1fe90SJustin T. Gibbs { 202e2c1fe90SJustin T. Gibbs sc->xbd_cm_q[index].q_length--; 203e2c1fe90SJustin T. Gibbs } 204e4808c4bSKip Macy 2059985113bSJustin T. Gibbs static inline uint32_t 2069985113bSJustin T. Gibbs xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index) 2079985113bSJustin T. Gibbs { 2089985113bSJustin T. Gibbs return (sc->xbd_cm_q[index].q_length); 2099985113bSJustin T. Gibbs } 2109985113bSJustin T. Gibbs 211e2c1fe90SJustin T. Gibbs static inline void 212e2c1fe90SJustin T. Gibbs xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index) 213e2c1fe90SJustin T. Gibbs { 214e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 215e4808c4bSKip Macy 216e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 217e2c1fe90SJustin T. Gibbs TAILQ_INIT(&cmq->q_tailq); 218e2c1fe90SJustin T. Gibbs cmq->q_length = 0; 219e2c1fe90SJustin T. Gibbs cmq->q_max = 0; 220e2c1fe90SJustin T. Gibbs } 221e2c1fe90SJustin T. Gibbs 222e2c1fe90SJustin T. Gibbs static inline void 223e2c1fe90SJustin T. Gibbs xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index) 224e2c1fe90SJustin T. Gibbs { 225e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 226e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 227e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 228e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 229e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 230e2c1fe90SJustin T. Gibbs TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 231e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 232e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 233e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 234e2c1fe90SJustin T. Gibbs } 235e2c1fe90SJustin T. Gibbs 236e2c1fe90SJustin T. Gibbs static inline void 237e2c1fe90SJustin T. Gibbs xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index) 238e2c1fe90SJustin T. Gibbs { 239e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 240e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 241e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 242e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 243e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 244e2c1fe90SJustin T. Gibbs TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 245e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 246e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 247e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 248e2c1fe90SJustin T. Gibbs } 249e2c1fe90SJustin T. Gibbs 250e2c1fe90SJustin T. Gibbs static inline struct xbd_command * 251e2c1fe90SJustin T. Gibbs xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index) 252e2c1fe90SJustin T. Gibbs { 253e2c1fe90SJustin T. Gibbs struct xbd_command *cm; 254e2c1fe90SJustin T. Gibbs 255e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 256e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 257e2c1fe90SJustin T. Gibbs 258e2c1fe90SJustin T. Gibbs if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) { 259e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != index) { 260e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, " 261e2c1fe90SJustin T. Gibbs "not specified queue %d", 262e2c1fe90SJustin T. Gibbs __func__, cm, 263e2c1fe90SJustin T. Gibbs cm->cm_flags & XBDCF_Q_MASK, 264e2c1fe90SJustin T. Gibbs index); 265e2c1fe90SJustin T. Gibbs } 266e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link); 267e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 268e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 269e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 270e2c1fe90SJustin T. Gibbs } 271e2c1fe90SJustin T. Gibbs return (cm); 272e2c1fe90SJustin T. Gibbs } 273e2c1fe90SJustin T. Gibbs 274e2c1fe90SJustin T. Gibbs static inline void 275e2c1fe90SJustin T. Gibbs xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index) 276e2c1fe90SJustin T. Gibbs { 277e2c1fe90SJustin T. Gibbs xbd_q_index_t index; 278e2c1fe90SJustin T. Gibbs 279e2c1fe90SJustin T. Gibbs index = cm->cm_flags & XBDCF_Q_MASK; 280e2c1fe90SJustin T. Gibbs 281e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 282e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 283e2c1fe90SJustin T. Gibbs 284e2c1fe90SJustin T. Gibbs if (index != expected_index) { 285e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, not specified queue %d", 286e2c1fe90SJustin T. Gibbs __func__, cm, index, expected_index); 287e2c1fe90SJustin T. Gibbs } 288e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 289e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 290e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 291e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 292e2c1fe90SJustin T. Gibbs } 293e4808c4bSKip Macy 2949985113bSJustin T. Gibbs static inline void 29533eebb6aSJustin T. Gibbs xbd_initq_bio(struct xbd_softc *sc) 296e4808c4bSKip Macy { 29733eebb6aSJustin T. Gibbs bioq_init(&sc->xbd_bioq); 298e4808c4bSKip Macy } 299e4808c4bSKip Macy 3009985113bSJustin T. Gibbs static inline void 30133eebb6aSJustin T. Gibbs xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp) 302e4808c4bSKip Macy { 30333eebb6aSJustin T. Gibbs bioq_insert_tail(&sc->xbd_bioq, bp); 304e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 305e4808c4bSKip Macy } 306e4808c4bSKip Macy 3079985113bSJustin T. Gibbs static inline void 30833eebb6aSJustin T. Gibbs xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp) 309e4808c4bSKip Macy { 31033eebb6aSJustin T. Gibbs bioq_insert_head(&sc->xbd_bioq, bp); 311e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 312e4808c4bSKip Macy } 313e4808c4bSKip Macy 3149985113bSJustin T. Gibbs static inline struct bio * 31533eebb6aSJustin T. Gibbs xbd_dequeue_bio(struct xbd_softc *sc) 316e4808c4bSKip Macy { 317e4808c4bSKip Macy struct bio *bp; 318e4808c4bSKip Macy 31933eebb6aSJustin T. Gibbs if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) { 32033eebb6aSJustin T. Gibbs bioq_remove(&sc->xbd_bioq, bp); 321e2c1fe90SJustin T. Gibbs xbd_removed_qentry(sc, XBD_Q_BIO); 322e4808c4bSKip Macy } 323e4808c4bSKip Macy return (bp); 324e4808c4bSKip Macy } 32589e0f4d2SKip Macy 326e2c1fe90SJustin T. Gibbs static inline void 327e2c1fe90SJustin T. Gibbs xbd_initqs(struct xbd_softc *sc) 328e2c1fe90SJustin T. Gibbs { 329e2c1fe90SJustin T. Gibbs u_int index; 330e2c1fe90SJustin T. Gibbs 331e2c1fe90SJustin T. Gibbs for (index = 0; index < XBD_Q_COUNT; index++) 332e2c1fe90SJustin T. Gibbs xbd_initq_cm(sc, index); 333e2c1fe90SJustin T. Gibbs 334e2c1fe90SJustin T. Gibbs xbd_initq_bio(sc); 335e2c1fe90SJustin T. Gibbs } 336e2c1fe90SJustin T. Gibbs 33733eebb6aSJustin T. Gibbs #endif /* __XEN_BLKFRONT_BLOCK_H__ */ 338