189e0f4d2SKip Macy /* 29999d2cbSKip Macy * XenBSD block device driver 39999d2cbSKip Macy * 433eebb6aSJustin T. Gibbs * Copyright (c) 2010-2013 Spectra Logic Corporation 5e4808c4bSKip Macy * Copyright (c) 2009 Scott Long, Yahoo! 69999d2cbSKip Macy * Copyright (c) 2009 Frank Suchomel, Citrix 79999d2cbSKip Macy * Copyright (c) 2009 Doug F. Rabson, Citrix 89999d2cbSKip Macy * Copyright (c) 2005 Kip Macy 99999d2cbSKip Macy * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 109999d2cbSKip Macy * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 119999d2cbSKip Macy * 129999d2cbSKip Macy * 139999d2cbSKip Macy * Permission is hereby granted, free of charge, to any person obtaining a copy 149999d2cbSKip Macy * of this software and associated documentation files (the "Software"), to 159999d2cbSKip Macy * deal in the Software without restriction, including without limitation the 169999d2cbSKip Macy * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 179999d2cbSKip Macy * sell copies of the Software, and to permit persons to whom the Software is 189999d2cbSKip Macy * furnished to do so, subject to the following conditions: 199999d2cbSKip Macy * 209999d2cbSKip Macy * The above copyright notice and this permission notice shall be included in 219999d2cbSKip Macy * all copies or substantial portions of the Software. 2289e0f4d2SKip Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 2389e0f4d2SKip Macy * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2489e0f4d2SKip Macy * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 2589e0f4d2SKip Macy * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2689e0f4d2SKip Macy * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27e4808c4bSKip Macy * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28e4808c4bSKip Macy * DEALINGS IN THE SOFTWARE. 2989e0f4d2SKip Macy * 3089e0f4d2SKip Macy * $FreeBSD$ 3189e0f4d2SKip Macy */ 3289e0f4d2SKip Macy 3333eebb6aSJustin T. Gibbs #ifndef __XEN_BLKFRONT_BLOCK_H__ 3433eebb6aSJustin T. Gibbs #define __XEN_BLKFRONT_BLOCK_H__ 35ff662b5cSJustin T. Gibbs #include <xen/blkif.h> 36ff662b5cSJustin T. Gibbs 37ff662b5cSJustin T. Gibbs /** 38443cc4d4SJustin T. Gibbs * Given a number of blkif segments, compute the maximum I/O size supported. 39443cc4d4SJustin T. Gibbs * 40443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 41443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 42443cc4d4SJustin T. Gibbs * 43443cc4d4SJustin T. Gibbs * \note We reserve a segement from the maximum supported by the transport to 44443cc4d4SJustin T. Gibbs * guarantee we can handle an unaligned transfer without the need to 45443cc4d4SJustin T. Gibbs * use a bounce buffer. 46443cc4d4SJustin T. Gibbs */ 4733eebb6aSJustin T. Gibbs #define XBD_SEGS_TO_SIZE(segs) \ 48443cc4d4SJustin T. Gibbs (((segs) - 1) * PAGE_SIZE) 49443cc4d4SJustin T. Gibbs 50443cc4d4SJustin T. Gibbs /** 51443cc4d4SJustin T. Gibbs * Compute the maximum number of blkif segments requried to represent 52443cc4d4SJustin T. Gibbs * an I/O of the given size. 53443cc4d4SJustin T. Gibbs * 54443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 55443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 56443cc4d4SJustin T. Gibbs * 57443cc4d4SJustin T. Gibbs * \note We reserve a segement to guarantee we can handle an unaligned 58443cc4d4SJustin T. Gibbs * transfer without the need to use a bounce buffer. 59443cc4d4SJustin T. Gibbs */ 6033eebb6aSJustin T. Gibbs #define XBD_SIZE_TO_SEGS(size) \ 61443cc4d4SJustin T. Gibbs ((size / PAGE_SIZE) + 1) 62443cc4d4SJustin T. Gibbs 63443cc4d4SJustin T. Gibbs /** 64112cacaeSRoger Pau Monné * The maximum number of shared memory ring pages we will allow in a 65112cacaeSRoger Pau Monné * negotiated block-front/back communication channel. Allow enough 66112cacaeSRoger Pau Monné * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. 67112cacaeSRoger Pau Monné */ 68112cacaeSRoger Pau Monné #define XBD_MAX_RING_PAGES 32 69112cacaeSRoger Pau Monné 70112cacaeSRoger Pau Monné /** 71ad935ed2SColin Percival * The maximum number of outstanding requests we will allow in a negotiated 72ad935ed2SColin Percival * block-front/back communication channel. 73ff662b5cSJustin T. Gibbs */ 74112cacaeSRoger Pau Monné #define XBD_MAX_REQUESTS \ 75112cacaeSRoger Pau Monné __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES) 76ff662b5cSJustin T. Gibbs 77ff662b5cSJustin T. Gibbs /** 78aaebf690SColin Percival * The maximum number of blkif segments which can be provided per indirect 79aaebf690SColin Percival * page in an indirect request. 80ff662b5cSJustin T. Gibbs */ 81aaebf690SColin Percival #define XBD_MAX_SEGMENTS_PER_PAGE \ 82aaebf690SColin Percival (PAGE_SIZE / sizeof(struct blkif_request_segment)) 83aaebf690SColin Percival 84aaebf690SColin Percival /** 85aaebf690SColin Percival * The maximum number of blkif segments which can be provided in an indirect 86aaebf690SColin Percival * request. 87aaebf690SColin Percival */ 88aaebf690SColin Percival #define XBD_MAX_INDIRECT_SEGMENTS \ 89aaebf690SColin Percival (BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * XBD_MAX_SEGMENTS_PER_PAGE) 90aaebf690SColin Percival 91aaebf690SColin Percival /** 92aaebf690SColin Percival * Compute the number of indirect segment pages required for an I/O with the 93aaebf690SColin Percival * specified number of indirect segments. 94aaebf690SColin Percival */ 95aaebf690SColin Percival #define XBD_INDIRECT_SEGS_TO_PAGES(segs) \ 96aaebf690SColin Percival ((segs + XBD_MAX_SEGMENTS_PER_PAGE - 1) / XBD_MAX_SEGMENTS_PER_PAGE) 97ff662b5cSJustin T. Gibbs 98e2c1fe90SJustin T. Gibbs typedef enum { 99e2c1fe90SJustin T. Gibbs XBDCF_Q_MASK = 0xFF, 1009985113bSJustin T. Gibbs /* This command has contributed to xbd_qfrozen_cnt. */ 101e2c1fe90SJustin T. Gibbs XBDCF_FROZEN = 1<<8, 1029985113bSJustin T. Gibbs /* Freeze the command queue on dispatch (i.e. single step command). */ 1039985113bSJustin T. Gibbs XBDCF_Q_FREEZE = 1<<9, 1049985113bSJustin T. Gibbs /* Bus DMA returned EINPROGRESS for this command. */ 105127a9483SJustin T. Gibbs XBDCF_ASYNC_MAPPING = 1<<10, 106e2c1fe90SJustin T. Gibbs XBDCF_INITIALIZER = XBDCF_Q_MASK 107e2c1fe90SJustin T. Gibbs } xbdc_flag_t; 108e2c1fe90SJustin T. Gibbs 10933eebb6aSJustin T. Gibbs struct xbd_command; 11033eebb6aSJustin T. Gibbs typedef void xbd_cbcf_t(struct xbd_command *); 11189e0f4d2SKip Macy 11233eebb6aSJustin T. Gibbs struct xbd_command { 11333eebb6aSJustin T. Gibbs TAILQ_ENTRY(xbd_command) cm_link; 11433eebb6aSJustin T. Gibbs struct xbd_softc *cm_sc; 115e2c1fe90SJustin T. Gibbs xbdc_flag_t cm_flags; 11633eebb6aSJustin T. Gibbs bus_dmamap_t cm_map; 11733eebb6aSJustin T. Gibbs uint64_t cm_id; 11833eebb6aSJustin T. Gibbs grant_ref_t *cm_sg_refs; 11933eebb6aSJustin T. Gibbs struct bio *cm_bp; 12033eebb6aSJustin T. Gibbs grant_ref_t cm_gref_head; 12133eebb6aSJustin T. Gibbs void *cm_data; 12233eebb6aSJustin T. Gibbs size_t cm_datalen; 12333eebb6aSJustin T. Gibbs u_int cm_nseg; 12433eebb6aSJustin T. Gibbs int cm_operation; 12533eebb6aSJustin T. Gibbs blkif_sector_t cm_sector_number; 12633eebb6aSJustin T. Gibbs int cm_status; 12733eebb6aSJustin T. Gibbs xbd_cbcf_t *cm_complete; 128aaebf690SColin Percival void *cm_indirectionpages; 129aaebf690SColin Percival grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 13089e0f4d2SKip Macy }; 13189e0f4d2SKip Macy 132e2c1fe90SJustin T. Gibbs typedef enum { 133e2c1fe90SJustin T. Gibbs XBD_Q_FREE, 134e2c1fe90SJustin T. Gibbs XBD_Q_READY, 135e2c1fe90SJustin T. Gibbs XBD_Q_BUSY, 136e2c1fe90SJustin T. Gibbs XBD_Q_COMPLETE, 137e2c1fe90SJustin T. Gibbs XBD_Q_BIO, 138e2c1fe90SJustin T. Gibbs XBD_Q_COUNT, 139e2c1fe90SJustin T. Gibbs XBD_Q_NONE = XBDCF_Q_MASK 140e2c1fe90SJustin T. Gibbs } xbd_q_index_t; 14189e0f4d2SKip Macy 142e2c1fe90SJustin T. Gibbs typedef struct xbd_cm_q { 143e2c1fe90SJustin T. Gibbs TAILQ_HEAD(, xbd_command) q_tailq; 144e4808c4bSKip Macy uint32_t q_length; 145e4808c4bSKip Macy uint32_t q_max; 146e2c1fe90SJustin T. Gibbs } xbd_cm_q_t; 147e4808c4bSKip Macy 148e2c1fe90SJustin T. Gibbs typedef enum { 149e2c1fe90SJustin T. Gibbs XBD_STATE_DISCONNECTED, 150e2c1fe90SJustin T. Gibbs XBD_STATE_CONNECTED, 151e2c1fe90SJustin T. Gibbs XBD_STATE_SUSPENDED 152e2c1fe90SJustin T. Gibbs } xbd_state_t; 153e2c1fe90SJustin T. Gibbs 154e2c1fe90SJustin T. Gibbs typedef enum { 155127a9483SJustin T. Gibbs XBDF_NONE = 0, 156e2c1fe90SJustin T. Gibbs XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */ 157e2c1fe90SJustin T. Gibbs XBDF_BARRIER = 1 << 1, /* backend supports barriers */ 1589985113bSJustin T. Gibbs XBDF_FLUSH = 1 << 2, /* backend supports flush */ 1599985113bSJustin T. Gibbs XBDF_READY = 1 << 3, /* Is ready */ 1609985113bSJustin T. Gibbs XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */ 1619985113bSJustin T. Gibbs XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */ 162*d5d7399dSAlexander Motin XBDF_WAIT_IDLE = 1 << 6, /* 1639985113bSJustin T. Gibbs * No new work until oustanding work 1649985113bSJustin T. Gibbs * completes. 1659985113bSJustin T. Gibbs */ 166*d5d7399dSAlexander Motin XBDF_DISCARD = 1 << 7, /* backend supports discard */ 167*d5d7399dSAlexander Motin XBDF_PERSISTENT = 1 << 8 /* backend supports persistent grants */ 168e2c1fe90SJustin T. Gibbs } xbd_flag_t; 169e4808c4bSKip Macy 170e4808c4bSKip Macy /* 171e4808c4bSKip Macy * We have one of these per vbd, whether ide, scsi or 'other'. 172e4808c4bSKip Macy */ 17333eebb6aSJustin T. Gibbs struct xbd_softc { 17433eebb6aSJustin T. Gibbs device_t xbd_dev; 17533eebb6aSJustin T. Gibbs struct disk *xbd_disk; /* disk params */ 17633eebb6aSJustin T. Gibbs struct bio_queue_head xbd_bioq; /* sort queue */ 17733eebb6aSJustin T. Gibbs int xbd_unit; 178e2c1fe90SJustin T. Gibbs xbd_flag_t xbd_flags; 179127a9483SJustin T. Gibbs int xbd_qfrozen_cnt; 18033eebb6aSJustin T. Gibbs int xbd_vdevice; 181e2c1fe90SJustin T. Gibbs xbd_state_t xbd_state; 18233eebb6aSJustin T. Gibbs u_int xbd_ring_pages; 18333eebb6aSJustin T. Gibbs uint32_t xbd_max_requests; 18433eebb6aSJustin T. Gibbs uint32_t xbd_max_request_segments; 18533eebb6aSJustin T. Gibbs uint32_t xbd_max_request_size; 186aaebf690SColin Percival uint32_t xbd_max_request_indirectpages; 18733eebb6aSJustin T. Gibbs grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]; 18833eebb6aSJustin T. Gibbs blkif_front_ring_t xbd_ring; 18976acc41fSJustin T. Gibbs xen_intr_handle_t xen_intr_handle; 19033eebb6aSJustin T. Gibbs struct gnttab_free_callback xbd_callback; 191e2c1fe90SJustin T. Gibbs xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]; 19233eebb6aSJustin T. Gibbs bus_dma_tag_t xbd_io_dmat; 193e4808c4bSKip Macy 19489e0f4d2SKip Macy /** 19589e0f4d2SKip Macy * The number of people holding this device open. We won't allow a 19689e0f4d2SKip Macy * hot-unplug unless this is 0. 19789e0f4d2SKip Macy */ 19833eebb6aSJustin T. Gibbs int xbd_users; 19933eebb6aSJustin T. Gibbs struct mtx xbd_io_lock; 200ff662b5cSJustin T. Gibbs 20133eebb6aSJustin T. Gibbs struct xbd_command *xbd_shadow; 20289e0f4d2SKip Macy }; 203e4808c4bSKip Macy 20433eebb6aSJustin T. Gibbs int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, 205*d5d7399dSAlexander Motin uint16_t vdisk_info, unsigned long sector_size, 206*d5d7399dSAlexander Motin unsigned long phys_sector_size); 207e4808c4bSKip Macy 208e2c1fe90SJustin T. Gibbs static inline void 209e2c1fe90SJustin T. Gibbs xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index) 210e2c1fe90SJustin T. Gibbs { 211e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 212e4808c4bSKip Macy 213e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 214e2c1fe90SJustin T. Gibbs cmq->q_length++; 215e2c1fe90SJustin T. Gibbs if (cmq->q_length > cmq->q_max) 216e2c1fe90SJustin T. Gibbs cmq->q_max = cmq->q_length; 217e2c1fe90SJustin T. Gibbs } 218e4808c4bSKip Macy 219e2c1fe90SJustin T. Gibbs static inline void 220e2c1fe90SJustin T. Gibbs xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index) 221e2c1fe90SJustin T. Gibbs { 222e2c1fe90SJustin T. Gibbs sc->xbd_cm_q[index].q_length--; 223e2c1fe90SJustin T. Gibbs } 224e4808c4bSKip Macy 2259985113bSJustin T. Gibbs static inline uint32_t 2269985113bSJustin T. Gibbs xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index) 2279985113bSJustin T. Gibbs { 2289985113bSJustin T. Gibbs return (sc->xbd_cm_q[index].q_length); 2299985113bSJustin T. Gibbs } 2309985113bSJustin T. Gibbs 231e2c1fe90SJustin T. Gibbs static inline void 232e2c1fe90SJustin T. Gibbs xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index) 233e2c1fe90SJustin T. Gibbs { 234e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 235e4808c4bSKip Macy 236e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 237e2c1fe90SJustin T. Gibbs TAILQ_INIT(&cmq->q_tailq); 238e2c1fe90SJustin T. Gibbs cmq->q_length = 0; 239e2c1fe90SJustin T. Gibbs cmq->q_max = 0; 240e2c1fe90SJustin T. Gibbs } 241e2c1fe90SJustin T. Gibbs 242e2c1fe90SJustin T. Gibbs static inline void 243e2c1fe90SJustin T. Gibbs xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index) 244e2c1fe90SJustin T. Gibbs { 245e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 246e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 247e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 248e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 249e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 250e2c1fe90SJustin T. Gibbs TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 251e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 252e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 253e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 254e2c1fe90SJustin T. Gibbs } 255e2c1fe90SJustin T. Gibbs 256e2c1fe90SJustin T. Gibbs static inline void 257e2c1fe90SJustin T. Gibbs xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index) 258e2c1fe90SJustin T. Gibbs { 259e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 260e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 261e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 262e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 263e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 264e2c1fe90SJustin T. Gibbs TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 265e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 266e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 267e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 268e2c1fe90SJustin T. Gibbs } 269e2c1fe90SJustin T. Gibbs 270e2c1fe90SJustin T. Gibbs static inline struct xbd_command * 271e2c1fe90SJustin T. Gibbs xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index) 272e2c1fe90SJustin T. Gibbs { 273e2c1fe90SJustin T. Gibbs struct xbd_command *cm; 274e2c1fe90SJustin T. Gibbs 275e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 276e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 277e2c1fe90SJustin T. Gibbs 278e2c1fe90SJustin T. Gibbs if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) { 279e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != index) { 280e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, " 281e2c1fe90SJustin T. Gibbs "not specified queue %d", 282e2c1fe90SJustin T. Gibbs __func__, cm, 283e2c1fe90SJustin T. Gibbs cm->cm_flags & XBDCF_Q_MASK, 284e2c1fe90SJustin T. Gibbs index); 285e2c1fe90SJustin T. Gibbs } 286e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link); 287e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 288e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 289e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 290e2c1fe90SJustin T. Gibbs } 291e2c1fe90SJustin T. Gibbs return (cm); 292e2c1fe90SJustin T. Gibbs } 293e2c1fe90SJustin T. Gibbs 294e2c1fe90SJustin T. Gibbs static inline void 295e2c1fe90SJustin T. Gibbs xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index) 296e2c1fe90SJustin T. Gibbs { 297e2c1fe90SJustin T. Gibbs xbd_q_index_t index; 298e2c1fe90SJustin T. Gibbs 299e2c1fe90SJustin T. Gibbs index = cm->cm_flags & XBDCF_Q_MASK; 300e2c1fe90SJustin T. Gibbs 301e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 302e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 303e2c1fe90SJustin T. Gibbs 304e2c1fe90SJustin T. Gibbs if (index != expected_index) { 305e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, not specified queue %d", 306e2c1fe90SJustin T. Gibbs __func__, cm, index, expected_index); 307e2c1fe90SJustin T. Gibbs } 308e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 309e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 310e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 311e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 312e2c1fe90SJustin T. Gibbs } 313e4808c4bSKip Macy 3149985113bSJustin T. Gibbs static inline void 31533eebb6aSJustin T. Gibbs xbd_initq_bio(struct xbd_softc *sc) 316e4808c4bSKip Macy { 31733eebb6aSJustin T. Gibbs bioq_init(&sc->xbd_bioq); 318e4808c4bSKip Macy } 319e4808c4bSKip Macy 3209985113bSJustin T. Gibbs static inline void 32133eebb6aSJustin T. Gibbs xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp) 322e4808c4bSKip Macy { 32333eebb6aSJustin T. Gibbs bioq_insert_tail(&sc->xbd_bioq, bp); 324e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 325e4808c4bSKip Macy } 326e4808c4bSKip Macy 3279985113bSJustin T. Gibbs static inline void 32833eebb6aSJustin T. Gibbs xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp) 329e4808c4bSKip Macy { 33033eebb6aSJustin T. Gibbs bioq_insert_head(&sc->xbd_bioq, bp); 331e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 332e4808c4bSKip Macy } 333e4808c4bSKip Macy 3349985113bSJustin T. Gibbs static inline struct bio * 33533eebb6aSJustin T. Gibbs xbd_dequeue_bio(struct xbd_softc *sc) 336e4808c4bSKip Macy { 337e4808c4bSKip Macy struct bio *bp; 338e4808c4bSKip Macy 33933eebb6aSJustin T. Gibbs if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) { 34033eebb6aSJustin T. Gibbs bioq_remove(&sc->xbd_bioq, bp); 341e2c1fe90SJustin T. Gibbs xbd_removed_qentry(sc, XBD_Q_BIO); 342e4808c4bSKip Macy } 343e4808c4bSKip Macy return (bp); 344e4808c4bSKip Macy } 34589e0f4d2SKip Macy 346e2c1fe90SJustin T. Gibbs static inline void 347e2c1fe90SJustin T. Gibbs xbd_initqs(struct xbd_softc *sc) 348e2c1fe90SJustin T. Gibbs { 349e2c1fe90SJustin T. Gibbs u_int index; 350e2c1fe90SJustin T. Gibbs 351e2c1fe90SJustin T. Gibbs for (index = 0; index < XBD_Q_COUNT; index++) 352e2c1fe90SJustin T. Gibbs xbd_initq_cm(sc, index); 353e2c1fe90SJustin T. Gibbs 354e2c1fe90SJustin T. Gibbs xbd_initq_bio(sc); 355e2c1fe90SJustin T. Gibbs } 356e2c1fe90SJustin T. Gibbs 35733eebb6aSJustin T. Gibbs #endif /* __XEN_BLKFRONT_BLOCK_H__ */ 358