189e0f4d2SKip Macy /* 29999d2cbSKip Macy * XenBSD block device driver 39999d2cbSKip Macy * 433eebb6aSJustin T. Gibbs * Copyright (c) 2010-2013 Spectra Logic Corporation 5e4808c4bSKip Macy * Copyright (c) 2009 Scott Long, Yahoo! 69999d2cbSKip Macy * Copyright (c) 2009 Frank Suchomel, Citrix 79999d2cbSKip Macy * Copyright (c) 2009 Doug F. Rabson, Citrix 89999d2cbSKip Macy * Copyright (c) 2005 Kip Macy 99999d2cbSKip Macy * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 109999d2cbSKip Macy * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 119999d2cbSKip Macy * 129999d2cbSKip Macy * 139999d2cbSKip Macy * Permission is hereby granted, free of charge, to any person obtaining a copy 149999d2cbSKip Macy * of this software and associated documentation files (the "Software"), to 159999d2cbSKip Macy * deal in the Software without restriction, including without limitation the 169999d2cbSKip Macy * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 179999d2cbSKip Macy * sell copies of the Software, and to permit persons to whom the Software is 189999d2cbSKip Macy * furnished to do so, subject to the following conditions: 199999d2cbSKip Macy * 209999d2cbSKip Macy * The above copyright notice and this permission notice shall be included in 219999d2cbSKip Macy * all copies or substantial portions of the Software. 2289e0f4d2SKip Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 2389e0f4d2SKip Macy * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2489e0f4d2SKip Macy * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 2589e0f4d2SKip Macy * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2689e0f4d2SKip Macy * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27e4808c4bSKip Macy * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28e4808c4bSKip Macy * DEALINGS IN THE SOFTWARE. 2989e0f4d2SKip Macy * 3089e0f4d2SKip Macy * $FreeBSD$ 3189e0f4d2SKip Macy */ 3289e0f4d2SKip Macy 3333eebb6aSJustin T. Gibbs #ifndef __XEN_BLKFRONT_BLOCK_H__ 3433eebb6aSJustin T. Gibbs #define __XEN_BLKFRONT_BLOCK_H__ 35ff662b5cSJustin T. Gibbs #include <xen/blkif.h> 36ff662b5cSJustin T. Gibbs 37ff662b5cSJustin T. Gibbs /** 38443cc4d4SJustin T. Gibbs * Given a number of blkif segments, compute the maximum I/O size supported. 39443cc4d4SJustin T. Gibbs * 40443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 41443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 42443cc4d4SJustin T. Gibbs * 43443cc4d4SJustin T. Gibbs * \note We reserve a segement from the maximum supported by the transport to 44443cc4d4SJustin T. Gibbs * guarantee we can handle an unaligned transfer without the need to 45443cc4d4SJustin T. Gibbs * use a bounce buffer. 46443cc4d4SJustin T. Gibbs */ 4733eebb6aSJustin T. Gibbs #define XBD_SEGS_TO_SIZE(segs) \ 48443cc4d4SJustin T. Gibbs (((segs) - 1) * PAGE_SIZE) 49443cc4d4SJustin T. Gibbs 50443cc4d4SJustin T. Gibbs /** 51443cc4d4SJustin T. Gibbs * Compute the maximum number of blkif segments requried to represent 52443cc4d4SJustin T. Gibbs * an I/O of the given size. 53443cc4d4SJustin T. Gibbs * 54443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 55443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 56443cc4d4SJustin T. Gibbs * 57443cc4d4SJustin T. Gibbs * \note We reserve a segement to guarantee we can handle an unaligned 58443cc4d4SJustin T. Gibbs * transfer without the need to use a bounce buffer. 59443cc4d4SJustin T. Gibbs */ 6033eebb6aSJustin T. Gibbs #define XBD_SIZE_TO_SEGS(size) \ 61443cc4d4SJustin T. Gibbs ((size / PAGE_SIZE) + 1) 62443cc4d4SJustin T. Gibbs 63443cc4d4SJustin T. Gibbs /** 64ff662b5cSJustin T. Gibbs * The maximum number of outstanding requests blocks (request headers plus 65ff662b5cSJustin T. Gibbs * additional segment blocks) we will allow in a negotiated block-front/back 66ff662b5cSJustin T. Gibbs * communication channel. 67ff662b5cSJustin T. Gibbs */ 6833eebb6aSJustin T. Gibbs #define XBD_MAX_REQUESTS 256 69ff662b5cSJustin T. Gibbs 70ff662b5cSJustin T. Gibbs /** 71ff662b5cSJustin T. Gibbs * The maximum mapped region size per request we will allow in a negotiated 72ff662b5cSJustin T. Gibbs * block-front/back communication channel. 73ff662b5cSJustin T. Gibbs */ 7433eebb6aSJustin T. Gibbs #define XBD_MAX_REQUEST_SIZE \ 7533eebb6aSJustin T. Gibbs MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST)) 76ff662b5cSJustin T. Gibbs 77ff662b5cSJustin T. Gibbs /** 78ff662b5cSJustin T. Gibbs * The maximum number of segments (within a request header and accompanying 79ff662b5cSJustin T. Gibbs * segment blocks) per request we will allow in a negotiated block-front/back 80ff662b5cSJustin T. Gibbs * communication channel. 81ff662b5cSJustin T. Gibbs */ 8233eebb6aSJustin T. Gibbs #define XBD_MAX_SEGMENTS_PER_REQUEST \ 83ff662b5cSJustin T. Gibbs (MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ 8433eebb6aSJustin T. Gibbs XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE))) 85ff662b5cSJustin T. Gibbs 86ff662b5cSJustin T. Gibbs /** 87ff662b5cSJustin T. Gibbs * The maximum number of shared memory ring pages we will allow in a 88ff662b5cSJustin T. Gibbs * negotiated block-front/back communication channel. Allow enough 8933eebb6aSJustin T. Gibbs * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. 90ff662b5cSJustin T. Gibbs */ 9133eebb6aSJustin T. Gibbs #define XBD_MAX_RING_PAGES \ 9233eebb6aSJustin T. Gibbs BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \ 9333eebb6aSJustin T. Gibbs * XBD_MAX_REQUESTS) 9489e0f4d2SKip Macy 95*e2c1fe90SJustin T. Gibbs typedef enum { 96*e2c1fe90SJustin T. Gibbs XBDCF_Q_MASK = 0xFF, 97*e2c1fe90SJustin T. Gibbs XBDCF_FROZEN = 1<<8, 98*e2c1fe90SJustin T. Gibbs XBDCF_POLLED = 1<<9, 99*e2c1fe90SJustin T. Gibbs XBDCF_INITIALIZER = XBDCF_Q_MASK 100*e2c1fe90SJustin T. Gibbs } xbdc_flag_t; 101*e2c1fe90SJustin T. Gibbs 10233eebb6aSJustin T. Gibbs struct xbd_command; 10333eebb6aSJustin T. Gibbs typedef void xbd_cbcf_t(struct xbd_command *); 10489e0f4d2SKip Macy 10533eebb6aSJustin T. Gibbs struct xbd_command { 10633eebb6aSJustin T. Gibbs TAILQ_ENTRY(xbd_command) cm_link; 10733eebb6aSJustin T. Gibbs struct xbd_softc *cm_sc; 108*e2c1fe90SJustin T. Gibbs xbdc_flag_t cm_flags; 10933eebb6aSJustin T. Gibbs bus_dmamap_t cm_map; 11033eebb6aSJustin T. Gibbs uint64_t cm_id; 11133eebb6aSJustin T. Gibbs grant_ref_t *cm_sg_refs; 11233eebb6aSJustin T. Gibbs struct bio *cm_bp; 11333eebb6aSJustin T. Gibbs grant_ref_t cm_gref_head; 11433eebb6aSJustin T. Gibbs void *cm_data; 11533eebb6aSJustin T. Gibbs size_t cm_datalen; 11633eebb6aSJustin T. Gibbs u_int cm_nseg; 11733eebb6aSJustin T. Gibbs int cm_operation; 11833eebb6aSJustin T. Gibbs blkif_sector_t cm_sector_number; 11933eebb6aSJustin T. Gibbs int cm_status; 12033eebb6aSJustin T. Gibbs xbd_cbcf_t *cm_complete; 12189e0f4d2SKip Macy }; 12289e0f4d2SKip Macy 123*e2c1fe90SJustin T. Gibbs typedef enum { 124*e2c1fe90SJustin T. Gibbs XBD_Q_FREE, 125*e2c1fe90SJustin T. Gibbs XBD_Q_READY, 126*e2c1fe90SJustin T. Gibbs XBD_Q_BUSY, 127*e2c1fe90SJustin T. Gibbs XBD_Q_COMPLETE, 128*e2c1fe90SJustin T. Gibbs XBD_Q_BIO, 129*e2c1fe90SJustin T. Gibbs XBD_Q_COUNT, 130*e2c1fe90SJustin T. Gibbs XBD_Q_NONE = XBDCF_Q_MASK 131*e2c1fe90SJustin T. Gibbs } xbd_q_index_t; 13289e0f4d2SKip Macy 133*e2c1fe90SJustin T. Gibbs typedef struct xbd_cm_q { 134*e2c1fe90SJustin T. Gibbs TAILQ_HEAD(, xbd_command) q_tailq; 135e4808c4bSKip Macy uint32_t q_length; 136e4808c4bSKip Macy uint32_t q_max; 137*e2c1fe90SJustin T. Gibbs } xbd_cm_q_t; 138e4808c4bSKip Macy 139*e2c1fe90SJustin T. Gibbs typedef enum { 140*e2c1fe90SJustin T. Gibbs XBD_STATE_DISCONNECTED, 141*e2c1fe90SJustin T. Gibbs XBD_STATE_CONNECTED, 142*e2c1fe90SJustin T. Gibbs XBD_STATE_SUSPENDED 143*e2c1fe90SJustin T. Gibbs } xbd_state_t; 144*e2c1fe90SJustin T. Gibbs 145*e2c1fe90SJustin T. Gibbs typedef enum { 146*e2c1fe90SJustin T. Gibbs XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */ 147*e2c1fe90SJustin T. Gibbs XBDF_BARRIER = 1 << 1, /* backend supports barriers */ 148*e2c1fe90SJustin T. Gibbs XBDF_READY = 1 << 2, /* Is ready */ 149*e2c1fe90SJustin T. Gibbs XBDF_FROZEN = 1 << 3 /* Waiting for resources */ 150*e2c1fe90SJustin T. Gibbs } xbd_flag_t; 151e4808c4bSKip Macy 152e4808c4bSKip Macy /* 153e4808c4bSKip Macy * We have one of these per vbd, whether ide, scsi or 'other'. 154e4808c4bSKip Macy */ 15533eebb6aSJustin T. Gibbs struct xbd_softc { 15633eebb6aSJustin T. Gibbs device_t xbd_dev; 15733eebb6aSJustin T. Gibbs struct disk *xbd_disk; /* disk params */ 15833eebb6aSJustin T. Gibbs struct bio_queue_head xbd_bioq; /* sort queue */ 15933eebb6aSJustin T. Gibbs int xbd_unit; 160*e2c1fe90SJustin T. Gibbs xbd_flag_t xbd_flags; 16133eebb6aSJustin T. Gibbs int xbd_vdevice; 162*e2c1fe90SJustin T. Gibbs xbd_state_t xbd_state; 16333eebb6aSJustin T. Gibbs u_int xbd_ring_pages; 16433eebb6aSJustin T. Gibbs uint32_t xbd_max_requests; 16533eebb6aSJustin T. Gibbs uint32_t xbd_max_request_segments; 16633eebb6aSJustin T. Gibbs uint32_t xbd_max_request_blocks; 16733eebb6aSJustin T. Gibbs uint32_t xbd_max_request_size; 16833eebb6aSJustin T. Gibbs grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]; 16933eebb6aSJustin T. Gibbs blkif_front_ring_t xbd_ring; 17033eebb6aSJustin T. Gibbs unsigned int xbd_irq; 17133eebb6aSJustin T. Gibbs struct gnttab_free_callback xbd_callback; 172*e2c1fe90SJustin T. Gibbs xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]; 17333eebb6aSJustin T. Gibbs bus_dma_tag_t xbd_io_dmat; 174e4808c4bSKip Macy 17589e0f4d2SKip Macy /** 17689e0f4d2SKip Macy * The number of people holding this device open. We won't allow a 17789e0f4d2SKip Macy * hot-unplug unless this is 0. 17889e0f4d2SKip Macy */ 17933eebb6aSJustin T. Gibbs int xbd_users; 18033eebb6aSJustin T. Gibbs struct mtx xbd_io_lock; 181ff662b5cSJustin T. Gibbs 18233eebb6aSJustin T. Gibbs struct xbd_command *xbd_shadow; 18389e0f4d2SKip Macy }; 184e4808c4bSKip Macy 18533eebb6aSJustin T. Gibbs int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, 186ff662b5cSJustin T. Gibbs uint16_t vdisk_info, unsigned long sector_size); 187e4808c4bSKip Macy 188*e2c1fe90SJustin T. Gibbs static inline void 189*e2c1fe90SJustin T. Gibbs xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index) 190*e2c1fe90SJustin T. Gibbs { 191*e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 192e4808c4bSKip Macy 193*e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 194*e2c1fe90SJustin T. Gibbs cmq->q_length++; 195*e2c1fe90SJustin T. Gibbs if (cmq->q_length > cmq->q_max) 196*e2c1fe90SJustin T. Gibbs cmq->q_max = cmq->q_length; 197*e2c1fe90SJustin T. Gibbs } 198e4808c4bSKip Macy 199*e2c1fe90SJustin T. Gibbs static inline void 200*e2c1fe90SJustin T. Gibbs xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index) 201*e2c1fe90SJustin T. Gibbs { 202*e2c1fe90SJustin T. Gibbs sc->xbd_cm_q[index].q_length--; 203*e2c1fe90SJustin T. Gibbs } 204e4808c4bSKip Macy 205*e2c1fe90SJustin T. Gibbs static inline void 206*e2c1fe90SJustin T. Gibbs xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index) 207*e2c1fe90SJustin T. Gibbs { 208*e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 209e4808c4bSKip Macy 210*e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 211*e2c1fe90SJustin T. Gibbs TAILQ_INIT(&cmq->q_tailq); 212*e2c1fe90SJustin T. Gibbs cmq->q_length = 0; 213*e2c1fe90SJustin T. Gibbs cmq->q_max = 0; 214*e2c1fe90SJustin T. Gibbs } 215*e2c1fe90SJustin T. Gibbs 216*e2c1fe90SJustin T. Gibbs static inline void 217*e2c1fe90SJustin T. Gibbs xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index) 218*e2c1fe90SJustin T. Gibbs { 219*e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 220*e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 221*e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 222*e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 223*e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 224*e2c1fe90SJustin T. Gibbs TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 225*e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 226*e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 227*e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 228*e2c1fe90SJustin T. Gibbs } 229*e2c1fe90SJustin T. Gibbs 230*e2c1fe90SJustin T. Gibbs static inline void 231*e2c1fe90SJustin T. Gibbs xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index) 232*e2c1fe90SJustin T. Gibbs { 233*e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 234*e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 235*e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 236*e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 237*e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 238*e2c1fe90SJustin T. Gibbs TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 239*e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 240*e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 241*e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 242*e2c1fe90SJustin T. Gibbs } 243*e2c1fe90SJustin T. Gibbs 244*e2c1fe90SJustin T. Gibbs static inline struct xbd_command * 245*e2c1fe90SJustin T. Gibbs xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index) 246*e2c1fe90SJustin T. Gibbs { 247*e2c1fe90SJustin T. Gibbs struct xbd_command *cm; 248*e2c1fe90SJustin T. Gibbs 249*e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 250*e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 251*e2c1fe90SJustin T. Gibbs 252*e2c1fe90SJustin T. Gibbs if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) { 253*e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != index) { 254*e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, " 255*e2c1fe90SJustin T. Gibbs "not specified queue %d", 256*e2c1fe90SJustin T. Gibbs __func__, cm, 257*e2c1fe90SJustin T. Gibbs cm->cm_flags & XBDCF_Q_MASK, 258*e2c1fe90SJustin T. Gibbs index); 259*e2c1fe90SJustin T. Gibbs } 260*e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link); 261*e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 262*e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 263*e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 264*e2c1fe90SJustin T. Gibbs } 265*e2c1fe90SJustin T. Gibbs return (cm); 266*e2c1fe90SJustin T. Gibbs } 267*e2c1fe90SJustin T. Gibbs 268*e2c1fe90SJustin T. Gibbs static inline void 269*e2c1fe90SJustin T. Gibbs xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index) 270*e2c1fe90SJustin T. Gibbs { 271*e2c1fe90SJustin T. Gibbs xbd_q_index_t index; 272*e2c1fe90SJustin T. Gibbs 273*e2c1fe90SJustin T. Gibbs index = cm->cm_flags & XBDCF_Q_MASK; 274*e2c1fe90SJustin T. Gibbs 275*e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 276*e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 277*e2c1fe90SJustin T. Gibbs 278*e2c1fe90SJustin T. Gibbs if (index != expected_index) { 279*e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, not specified queue %d", 280*e2c1fe90SJustin T. Gibbs __func__, cm, index, expected_index); 281*e2c1fe90SJustin T. Gibbs } 282*e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 283*e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 284*e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 285*e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 286*e2c1fe90SJustin T. Gibbs } 287e4808c4bSKip Macy 288e4808c4bSKip Macy static __inline void 28933eebb6aSJustin T. Gibbs xbd_initq_bio(struct xbd_softc *sc) 290e4808c4bSKip Macy { 29133eebb6aSJustin T. Gibbs bioq_init(&sc->xbd_bioq); 292e4808c4bSKip Macy } 293e4808c4bSKip Macy 294e4808c4bSKip Macy static __inline void 29533eebb6aSJustin T. Gibbs xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp) 296e4808c4bSKip Macy { 29733eebb6aSJustin T. Gibbs bioq_insert_tail(&sc->xbd_bioq, bp); 298*e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 299e4808c4bSKip Macy } 300e4808c4bSKip Macy 301e4808c4bSKip Macy static __inline void 30233eebb6aSJustin T. Gibbs xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp) 303e4808c4bSKip Macy { 30433eebb6aSJustin T. Gibbs bioq_insert_head(&sc->xbd_bioq, bp); 305*e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 306e4808c4bSKip Macy } 307e4808c4bSKip Macy 308e4808c4bSKip Macy static __inline struct bio * 30933eebb6aSJustin T. Gibbs xbd_dequeue_bio(struct xbd_softc *sc) 310e4808c4bSKip Macy { 311e4808c4bSKip Macy struct bio *bp; 312e4808c4bSKip Macy 31333eebb6aSJustin T. Gibbs if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) { 31433eebb6aSJustin T. Gibbs bioq_remove(&sc->xbd_bioq, bp); 315*e2c1fe90SJustin T. Gibbs xbd_removed_qentry(sc, XBD_Q_BIO); 316e4808c4bSKip Macy } 317e4808c4bSKip Macy return (bp); 318e4808c4bSKip Macy } 31989e0f4d2SKip Macy 320*e2c1fe90SJustin T. Gibbs static inline void 321*e2c1fe90SJustin T. Gibbs xbd_initqs(struct xbd_softc *sc) 322*e2c1fe90SJustin T. Gibbs { 323*e2c1fe90SJustin T. Gibbs u_int index; 324*e2c1fe90SJustin T. Gibbs 325*e2c1fe90SJustin T. Gibbs for (index = 0; index < XBD_Q_COUNT; index++) 326*e2c1fe90SJustin T. Gibbs xbd_initq_cm(sc, index); 327*e2c1fe90SJustin T. Gibbs 328*e2c1fe90SJustin T. Gibbs xbd_initq_bio(sc); 329*e2c1fe90SJustin T. Gibbs } 330*e2c1fe90SJustin T. Gibbs 33133eebb6aSJustin T. Gibbs #endif /* __XEN_BLKFRONT_BLOCK_H__ */ 332