189e0f4d2SKip Macy /* 29999d2cbSKip Macy * XenBSD block device driver 39999d2cbSKip Macy * 433eebb6aSJustin T. Gibbs * Copyright (c) 2010-2013 Spectra Logic Corporation 5e4808c4bSKip Macy * Copyright (c) 2009 Scott Long, Yahoo! 69999d2cbSKip Macy * Copyright (c) 2009 Frank Suchomel, Citrix 79999d2cbSKip Macy * Copyright (c) 2009 Doug F. Rabson, Citrix 89999d2cbSKip Macy * Copyright (c) 2005 Kip Macy 99999d2cbSKip Macy * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 109999d2cbSKip Macy * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 119999d2cbSKip Macy * 129999d2cbSKip Macy * 139999d2cbSKip Macy * Permission is hereby granted, free of charge, to any person obtaining a copy 149999d2cbSKip Macy * of this software and associated documentation files (the "Software"), to 159999d2cbSKip Macy * deal in the Software without restriction, including without limitation the 169999d2cbSKip Macy * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 179999d2cbSKip Macy * sell copies of the Software, and to permit persons to whom the Software is 189999d2cbSKip Macy * furnished to do so, subject to the following conditions: 199999d2cbSKip Macy * 209999d2cbSKip Macy * The above copyright notice and this permission notice shall be included in 219999d2cbSKip Macy * all copies or substantial portions of the Software. 2289e0f4d2SKip Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 2389e0f4d2SKip Macy * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2489e0f4d2SKip Macy * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 2589e0f4d2SKip Macy * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2689e0f4d2SKip Macy * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27e4808c4bSKip Macy * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28e4808c4bSKip Macy * DEALINGS IN THE SOFTWARE. 2989e0f4d2SKip Macy * 3089e0f4d2SKip Macy * $FreeBSD$ 3189e0f4d2SKip Macy */ 3289e0f4d2SKip Macy 3333eebb6aSJustin T. Gibbs #ifndef __XEN_BLKFRONT_BLOCK_H__ 3433eebb6aSJustin T. Gibbs #define __XEN_BLKFRONT_BLOCK_H__ 35ff662b5cSJustin T. Gibbs #include <xen/blkif.h> 36ff662b5cSJustin T. Gibbs 37ff662b5cSJustin T. Gibbs /** 38443cc4d4SJustin T. Gibbs * Given a number of blkif segments, compute the maximum I/O size supported. 39443cc4d4SJustin T. Gibbs * 40443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 41443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 42443cc4d4SJustin T. Gibbs * 43443cc4d4SJustin T. Gibbs * \note We reserve a segement from the maximum supported by the transport to 44443cc4d4SJustin T. Gibbs * guarantee we can handle an unaligned transfer without the need to 45443cc4d4SJustin T. Gibbs * use a bounce buffer. 46443cc4d4SJustin T. Gibbs */ 4733eebb6aSJustin T. Gibbs #define XBD_SEGS_TO_SIZE(segs) \ 48443cc4d4SJustin T. Gibbs (((segs) - 1) * PAGE_SIZE) 49443cc4d4SJustin T. Gibbs 50443cc4d4SJustin T. Gibbs /** 51443cc4d4SJustin T. Gibbs * Compute the maximum number of blkif segments requried to represent 52443cc4d4SJustin T. Gibbs * an I/O of the given size. 53443cc4d4SJustin T. Gibbs * 54443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 55443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 56443cc4d4SJustin T. Gibbs * 57443cc4d4SJustin T. Gibbs * \note We reserve a segement to guarantee we can handle an unaligned 58443cc4d4SJustin T. Gibbs * transfer without the need to use a bounce buffer. 59443cc4d4SJustin T. Gibbs */ 6033eebb6aSJustin T. Gibbs #define XBD_SIZE_TO_SEGS(size) \ 61443cc4d4SJustin T. Gibbs ((size / PAGE_SIZE) + 1) 62443cc4d4SJustin T. Gibbs 63443cc4d4SJustin T. Gibbs /** 64ff662b5cSJustin T. Gibbs * The maximum number of outstanding requests blocks (request headers plus 65ff662b5cSJustin T. Gibbs * additional segment blocks) we will allow in a negotiated block-front/back 66ff662b5cSJustin T. Gibbs * communication channel. 67ff662b5cSJustin T. Gibbs */ 6833eebb6aSJustin T. Gibbs #define XBD_MAX_REQUESTS 256 69ff662b5cSJustin T. Gibbs 70ff662b5cSJustin T. Gibbs /** 71ff662b5cSJustin T. Gibbs * The maximum mapped region size per request we will allow in a negotiated 72ff662b5cSJustin T. Gibbs * block-front/back communication channel. 73ff662b5cSJustin T. Gibbs */ 7433eebb6aSJustin T. Gibbs #define XBD_MAX_REQUEST_SIZE \ 7533eebb6aSJustin T. Gibbs MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST)) 76ff662b5cSJustin T. Gibbs 77ff662b5cSJustin T. Gibbs /** 78ff662b5cSJustin T. Gibbs * The maximum number of segments (within a request header and accompanying 79ff662b5cSJustin T. Gibbs * segment blocks) per request we will allow in a negotiated block-front/back 80ff662b5cSJustin T. Gibbs * communication channel. 81ff662b5cSJustin T. Gibbs */ 8233eebb6aSJustin T. Gibbs #define XBD_MAX_SEGMENTS_PER_REQUEST \ 83ff662b5cSJustin T. Gibbs (MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ 8433eebb6aSJustin T. Gibbs XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE))) 85ff662b5cSJustin T. Gibbs 86ff662b5cSJustin T. Gibbs /** 87ff662b5cSJustin T. Gibbs * The maximum number of shared memory ring pages we will allow in a 88ff662b5cSJustin T. Gibbs * negotiated block-front/back communication channel. Allow enough 8933eebb6aSJustin T. Gibbs * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. 90ff662b5cSJustin T. Gibbs */ 9133eebb6aSJustin T. Gibbs #define XBD_MAX_RING_PAGES \ 9233eebb6aSJustin T. Gibbs BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \ 9333eebb6aSJustin T. Gibbs * XBD_MAX_REQUESTS) 9489e0f4d2SKip Macy 95e2c1fe90SJustin T. Gibbs typedef enum { 96e2c1fe90SJustin T. Gibbs XBDCF_Q_MASK = 0xFF, 97e2c1fe90SJustin T. Gibbs XBDCF_FROZEN = 1<<8, 98e2c1fe90SJustin T. Gibbs XBDCF_POLLED = 1<<9, 99*127a9483SJustin T. Gibbs XBDCF_ASYNC_MAPPING = 1<<10, 100e2c1fe90SJustin T. Gibbs XBDCF_INITIALIZER = XBDCF_Q_MASK 101e2c1fe90SJustin T. Gibbs } xbdc_flag_t; 102e2c1fe90SJustin T. Gibbs 10333eebb6aSJustin T. Gibbs struct xbd_command; 10433eebb6aSJustin T. Gibbs typedef void xbd_cbcf_t(struct xbd_command *); 10589e0f4d2SKip Macy 10633eebb6aSJustin T. Gibbs struct xbd_command { 10733eebb6aSJustin T. Gibbs TAILQ_ENTRY(xbd_command) cm_link; 10833eebb6aSJustin T. Gibbs struct xbd_softc *cm_sc; 109e2c1fe90SJustin T. Gibbs xbdc_flag_t cm_flags; 11033eebb6aSJustin T. Gibbs bus_dmamap_t cm_map; 11133eebb6aSJustin T. Gibbs uint64_t cm_id; 11233eebb6aSJustin T. Gibbs grant_ref_t *cm_sg_refs; 11333eebb6aSJustin T. Gibbs struct bio *cm_bp; 11433eebb6aSJustin T. Gibbs grant_ref_t cm_gref_head; 11533eebb6aSJustin T. Gibbs void *cm_data; 11633eebb6aSJustin T. Gibbs size_t cm_datalen; 11733eebb6aSJustin T. Gibbs u_int cm_nseg; 11833eebb6aSJustin T. Gibbs int cm_operation; 11933eebb6aSJustin T. Gibbs blkif_sector_t cm_sector_number; 12033eebb6aSJustin T. Gibbs int cm_status; 12133eebb6aSJustin T. Gibbs xbd_cbcf_t *cm_complete; 12289e0f4d2SKip Macy }; 12389e0f4d2SKip Macy 124e2c1fe90SJustin T. Gibbs typedef enum { 125e2c1fe90SJustin T. Gibbs XBD_Q_FREE, 126e2c1fe90SJustin T. Gibbs XBD_Q_READY, 127e2c1fe90SJustin T. Gibbs XBD_Q_BUSY, 128e2c1fe90SJustin T. Gibbs XBD_Q_COMPLETE, 129e2c1fe90SJustin T. Gibbs XBD_Q_BIO, 130e2c1fe90SJustin T. Gibbs XBD_Q_COUNT, 131e2c1fe90SJustin T. Gibbs XBD_Q_NONE = XBDCF_Q_MASK 132e2c1fe90SJustin T. Gibbs } xbd_q_index_t; 13389e0f4d2SKip Macy 134e2c1fe90SJustin T. Gibbs typedef struct xbd_cm_q { 135e2c1fe90SJustin T. Gibbs TAILQ_HEAD(, xbd_command) q_tailq; 136e4808c4bSKip Macy uint32_t q_length; 137e4808c4bSKip Macy uint32_t q_max; 138e2c1fe90SJustin T. Gibbs } xbd_cm_q_t; 139e4808c4bSKip Macy 140e2c1fe90SJustin T. Gibbs typedef enum { 141e2c1fe90SJustin T. Gibbs XBD_STATE_DISCONNECTED, 142e2c1fe90SJustin T. Gibbs XBD_STATE_CONNECTED, 143e2c1fe90SJustin T. Gibbs XBD_STATE_SUSPENDED 144e2c1fe90SJustin T. Gibbs } xbd_state_t; 145e2c1fe90SJustin T. Gibbs 146e2c1fe90SJustin T. Gibbs typedef enum { 147*127a9483SJustin T. Gibbs XBDF_NONE = 0, 148e2c1fe90SJustin T. Gibbs XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */ 149e2c1fe90SJustin T. Gibbs XBDF_BARRIER = 1 << 1, /* backend supports barriers */ 150e2c1fe90SJustin T. Gibbs XBDF_READY = 1 << 2, /* Is ready */ 151*127a9483SJustin T. Gibbs XBDF_CM_SHORTAGE = 1 << 3, /* Free cm resource shortage active. */ 152*127a9483SJustin T. Gibbs XBDF_GNT_SHORTAGE = 1 << 4 /* Grant ref resource shortage active */ 153e2c1fe90SJustin T. Gibbs } xbd_flag_t; 154e4808c4bSKip Macy 155e4808c4bSKip Macy /* 156e4808c4bSKip Macy * We have one of these per vbd, whether ide, scsi or 'other'. 157e4808c4bSKip Macy */ 15833eebb6aSJustin T. Gibbs struct xbd_softc { 15933eebb6aSJustin T. Gibbs device_t xbd_dev; 16033eebb6aSJustin T. Gibbs struct disk *xbd_disk; /* disk params */ 16133eebb6aSJustin T. Gibbs struct bio_queue_head xbd_bioq; /* sort queue */ 16233eebb6aSJustin T. Gibbs int xbd_unit; 163e2c1fe90SJustin T. Gibbs xbd_flag_t xbd_flags; 164*127a9483SJustin T. Gibbs int xbd_qfrozen_cnt; 16533eebb6aSJustin T. Gibbs int xbd_vdevice; 166e2c1fe90SJustin T. Gibbs xbd_state_t xbd_state; 16733eebb6aSJustin T. Gibbs u_int xbd_ring_pages; 16833eebb6aSJustin T. Gibbs uint32_t xbd_max_requests; 16933eebb6aSJustin T. Gibbs uint32_t xbd_max_request_segments; 17033eebb6aSJustin T. Gibbs uint32_t xbd_max_request_blocks; 17133eebb6aSJustin T. Gibbs uint32_t xbd_max_request_size; 17233eebb6aSJustin T. Gibbs grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]; 17333eebb6aSJustin T. Gibbs blkif_front_ring_t xbd_ring; 17433eebb6aSJustin T. Gibbs unsigned int xbd_irq; 17533eebb6aSJustin T. Gibbs struct gnttab_free_callback xbd_callback; 176e2c1fe90SJustin T. Gibbs xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]; 17733eebb6aSJustin T. Gibbs bus_dma_tag_t xbd_io_dmat; 178e4808c4bSKip Macy 17989e0f4d2SKip Macy /** 18089e0f4d2SKip Macy * The number of people holding this device open. We won't allow a 18189e0f4d2SKip Macy * hot-unplug unless this is 0. 18289e0f4d2SKip Macy */ 18333eebb6aSJustin T. Gibbs int xbd_users; 18433eebb6aSJustin T. Gibbs struct mtx xbd_io_lock; 185ff662b5cSJustin T. Gibbs 18633eebb6aSJustin T. Gibbs struct xbd_command *xbd_shadow; 18789e0f4d2SKip Macy }; 188e4808c4bSKip Macy 18933eebb6aSJustin T. Gibbs int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, 190ff662b5cSJustin T. Gibbs uint16_t vdisk_info, unsigned long sector_size); 191e4808c4bSKip Macy 192e2c1fe90SJustin T. Gibbs static inline void 193e2c1fe90SJustin T. Gibbs xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index) 194e2c1fe90SJustin T. Gibbs { 195e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 196e4808c4bSKip Macy 197e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 198e2c1fe90SJustin T. Gibbs cmq->q_length++; 199e2c1fe90SJustin T. Gibbs if (cmq->q_length > cmq->q_max) 200e2c1fe90SJustin T. Gibbs cmq->q_max = cmq->q_length; 201e2c1fe90SJustin T. Gibbs } 202e4808c4bSKip Macy 203e2c1fe90SJustin T. Gibbs static inline void 204e2c1fe90SJustin T. Gibbs xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index) 205e2c1fe90SJustin T. Gibbs { 206e2c1fe90SJustin T. Gibbs sc->xbd_cm_q[index].q_length--; 207e2c1fe90SJustin T. Gibbs } 208e4808c4bSKip Macy 209e2c1fe90SJustin T. Gibbs static inline void 210e2c1fe90SJustin T. Gibbs xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index) 211e2c1fe90SJustin T. Gibbs { 212e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 213e4808c4bSKip Macy 214e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 215e2c1fe90SJustin T. Gibbs TAILQ_INIT(&cmq->q_tailq); 216e2c1fe90SJustin T. Gibbs cmq->q_length = 0; 217e2c1fe90SJustin T. Gibbs cmq->q_max = 0; 218e2c1fe90SJustin T. Gibbs } 219e2c1fe90SJustin T. Gibbs 220e2c1fe90SJustin T. Gibbs static inline void 221e2c1fe90SJustin T. Gibbs xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index) 222e2c1fe90SJustin T. Gibbs { 223e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 224e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 225e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 226e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 227e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 228e2c1fe90SJustin T. Gibbs TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 229e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 230e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 231e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 232e2c1fe90SJustin T. Gibbs } 233e2c1fe90SJustin T. Gibbs 234e2c1fe90SJustin T. Gibbs static inline void 235e2c1fe90SJustin T. Gibbs xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index) 236e2c1fe90SJustin T. Gibbs { 237e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 238e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 239e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 240e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 241e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 242e2c1fe90SJustin T. Gibbs TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 243e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 244e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 245e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 246e2c1fe90SJustin T. Gibbs } 247e2c1fe90SJustin T. Gibbs 248e2c1fe90SJustin T. Gibbs static inline struct xbd_command * 249e2c1fe90SJustin T. Gibbs xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index) 250e2c1fe90SJustin T. Gibbs { 251e2c1fe90SJustin T. Gibbs struct xbd_command *cm; 252e2c1fe90SJustin T. Gibbs 253e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 254e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 255e2c1fe90SJustin T. Gibbs 256e2c1fe90SJustin T. Gibbs if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) { 257e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != index) { 258e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, " 259e2c1fe90SJustin T. Gibbs "not specified queue %d", 260e2c1fe90SJustin T. Gibbs __func__, cm, 261e2c1fe90SJustin T. Gibbs cm->cm_flags & XBDCF_Q_MASK, 262e2c1fe90SJustin T. Gibbs index); 263e2c1fe90SJustin T. Gibbs } 264e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link); 265e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 266e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 267e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 268e2c1fe90SJustin T. Gibbs } 269e2c1fe90SJustin T. Gibbs return (cm); 270e2c1fe90SJustin T. Gibbs } 271e2c1fe90SJustin T. Gibbs 272e2c1fe90SJustin T. Gibbs static inline void 273e2c1fe90SJustin T. Gibbs xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index) 274e2c1fe90SJustin T. Gibbs { 275e2c1fe90SJustin T. Gibbs xbd_q_index_t index; 276e2c1fe90SJustin T. Gibbs 277e2c1fe90SJustin T. Gibbs index = cm->cm_flags & XBDCF_Q_MASK; 278e2c1fe90SJustin T. Gibbs 279e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 280e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 281e2c1fe90SJustin T. Gibbs 282e2c1fe90SJustin T. Gibbs if (index != expected_index) { 283e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, not specified queue %d", 284e2c1fe90SJustin T. Gibbs __func__, cm, index, expected_index); 285e2c1fe90SJustin T. Gibbs } 286e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 287e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 288e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 289e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 290e2c1fe90SJustin T. Gibbs } 291e4808c4bSKip Macy 292e4808c4bSKip Macy static __inline void 29333eebb6aSJustin T. Gibbs xbd_initq_bio(struct xbd_softc *sc) 294e4808c4bSKip Macy { 29533eebb6aSJustin T. Gibbs bioq_init(&sc->xbd_bioq); 296e4808c4bSKip Macy } 297e4808c4bSKip Macy 298e4808c4bSKip Macy static __inline void 29933eebb6aSJustin T. Gibbs xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp) 300e4808c4bSKip Macy { 30133eebb6aSJustin T. Gibbs bioq_insert_tail(&sc->xbd_bioq, bp); 302e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 303e4808c4bSKip Macy } 304e4808c4bSKip Macy 305e4808c4bSKip Macy static __inline void 30633eebb6aSJustin T. Gibbs xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp) 307e4808c4bSKip Macy { 30833eebb6aSJustin T. Gibbs bioq_insert_head(&sc->xbd_bioq, bp); 309e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 310e4808c4bSKip Macy } 311e4808c4bSKip Macy 312e4808c4bSKip Macy static __inline struct bio * 31333eebb6aSJustin T. Gibbs xbd_dequeue_bio(struct xbd_softc *sc) 314e4808c4bSKip Macy { 315e4808c4bSKip Macy struct bio *bp; 316e4808c4bSKip Macy 31733eebb6aSJustin T. Gibbs if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) { 31833eebb6aSJustin T. Gibbs bioq_remove(&sc->xbd_bioq, bp); 319e2c1fe90SJustin T. Gibbs xbd_removed_qentry(sc, XBD_Q_BIO); 320e4808c4bSKip Macy } 321e4808c4bSKip Macy return (bp); 322e4808c4bSKip Macy } 32389e0f4d2SKip Macy 324e2c1fe90SJustin T. Gibbs static inline void 325e2c1fe90SJustin T. Gibbs xbd_initqs(struct xbd_softc *sc) 326e2c1fe90SJustin T. Gibbs { 327e2c1fe90SJustin T. Gibbs u_int index; 328e2c1fe90SJustin T. Gibbs 329e2c1fe90SJustin T. Gibbs for (index = 0; index < XBD_Q_COUNT; index++) 330e2c1fe90SJustin T. Gibbs xbd_initq_cm(sc, index); 331e2c1fe90SJustin T. Gibbs 332e2c1fe90SJustin T. Gibbs xbd_initq_bio(sc); 333e2c1fe90SJustin T. Gibbs } 334e2c1fe90SJustin T. Gibbs 33533eebb6aSJustin T. Gibbs #endif /* __XEN_BLKFRONT_BLOCK_H__ */ 336