189e0f4d2SKip Macy /* 29999d2cbSKip Macy * XenBSD block device driver 39999d2cbSKip Macy * 433eebb6aSJustin T. Gibbs * Copyright (c) 2010-2013 Spectra Logic Corporation 5e4808c4bSKip Macy * Copyright (c) 2009 Scott Long, Yahoo! 69999d2cbSKip Macy * Copyright (c) 2009 Frank Suchomel, Citrix 79999d2cbSKip Macy * Copyright (c) 2009 Doug F. Rabson, Citrix 89999d2cbSKip Macy * Copyright (c) 2005 Kip Macy 99999d2cbSKip Macy * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 109999d2cbSKip Macy * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 119999d2cbSKip Macy * 129999d2cbSKip Macy * 139999d2cbSKip Macy * Permission is hereby granted, free of charge, to any person obtaining a copy 149999d2cbSKip Macy * of this software and associated documentation files (the "Software"), to 159999d2cbSKip Macy * deal in the Software without restriction, including without limitation the 169999d2cbSKip Macy * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 179999d2cbSKip Macy * sell copies of the Software, and to permit persons to whom the Software is 189999d2cbSKip Macy * furnished to do so, subject to the following conditions: 199999d2cbSKip Macy * 209999d2cbSKip Macy * The above copyright notice and this permission notice shall be included in 219999d2cbSKip Macy * all copies or substantial portions of the Software. 2289e0f4d2SKip Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 2389e0f4d2SKip Macy * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2489e0f4d2SKip Macy * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 2589e0f4d2SKip Macy * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 2689e0f4d2SKip Macy * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27e4808c4bSKip Macy * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28e4808c4bSKip Macy * DEALINGS IN THE SOFTWARE. 2989e0f4d2SKip Macy * 3089e0f4d2SKip Macy * $FreeBSD$ 3189e0f4d2SKip Macy */ 3289e0f4d2SKip Macy 3333eebb6aSJustin T. Gibbs #ifndef __XEN_BLKFRONT_BLOCK_H__ 3433eebb6aSJustin T. Gibbs #define __XEN_BLKFRONT_BLOCK_H__ 35ff662b5cSJustin T. Gibbs #include <xen/blkif.h> 36ff662b5cSJustin T. Gibbs 37ff662b5cSJustin T. Gibbs /** 38443cc4d4SJustin T. Gibbs * Given a number of blkif segments, compute the maximum I/O size supported. 39443cc4d4SJustin T. Gibbs * 40443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 41443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 42443cc4d4SJustin T. Gibbs * 43443cc4d4SJustin T. Gibbs * \note We reserve a segement from the maximum supported by the transport to 44443cc4d4SJustin T. Gibbs * guarantee we can handle an unaligned transfer without the need to 45443cc4d4SJustin T. Gibbs * use a bounce buffer. 46443cc4d4SJustin T. Gibbs */ 4733eebb6aSJustin T. Gibbs #define XBD_SEGS_TO_SIZE(segs) \ 48443cc4d4SJustin T. Gibbs (((segs) - 1) * PAGE_SIZE) 49443cc4d4SJustin T. Gibbs 50443cc4d4SJustin T. Gibbs /** 51443cc4d4SJustin T. Gibbs * Compute the maximum number of blkif segments requried to represent 52443cc4d4SJustin T. Gibbs * an I/O of the given size. 53443cc4d4SJustin T. Gibbs * 54443cc4d4SJustin T. Gibbs * \note This calculation assumes that all but the first and last segments 55443cc4d4SJustin T. Gibbs * of the I/O are fully utilized. 56443cc4d4SJustin T. Gibbs * 57443cc4d4SJustin T. Gibbs * \note We reserve a segement to guarantee we can handle an unaligned 58443cc4d4SJustin T. Gibbs * transfer without the need to use a bounce buffer. 59443cc4d4SJustin T. Gibbs */ 6033eebb6aSJustin T. Gibbs #define XBD_SIZE_TO_SEGS(size) \ 61443cc4d4SJustin T. Gibbs ((size / PAGE_SIZE) + 1) 62443cc4d4SJustin T. Gibbs 63443cc4d4SJustin T. Gibbs /** 64112cacaeSRoger Pau Monné * The maximum number of shared memory ring pages we will allow in a 65112cacaeSRoger Pau Monné * negotiated block-front/back communication channel. Allow enough 66112cacaeSRoger Pau Monné * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. 67112cacaeSRoger Pau Monné */ 68112cacaeSRoger Pau Monné #define XBD_MAX_RING_PAGES 32 69112cacaeSRoger Pau Monné 70112cacaeSRoger Pau Monné /** 71ad935ed2SColin Percival * The maximum number of outstanding requests we will allow in a negotiated 72ad935ed2SColin Percival * block-front/back communication channel. 73ff662b5cSJustin T. Gibbs */ 74112cacaeSRoger Pau Monné #define XBD_MAX_REQUESTS \ 75112cacaeSRoger Pau Monné __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES) 76ff662b5cSJustin T. Gibbs 77ff662b5cSJustin T. Gibbs /** 78*aaebf690SColin Percival * The maximum number of blkif segments which can be provided per indirect 79*aaebf690SColin Percival * page in an indirect request. 80ff662b5cSJustin T. Gibbs */ 81*aaebf690SColin Percival #define XBD_MAX_SEGMENTS_PER_PAGE \ 82*aaebf690SColin Percival (PAGE_SIZE / sizeof(struct blkif_request_segment)) 83*aaebf690SColin Percival 84*aaebf690SColin Percival /** 85*aaebf690SColin Percival * The maximum number of blkif segments which can be provided in an indirect 86*aaebf690SColin Percival * request. 87*aaebf690SColin Percival */ 88*aaebf690SColin Percival #define XBD_MAX_INDIRECT_SEGMENTS \ 89*aaebf690SColin Percival (BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * XBD_MAX_SEGMENTS_PER_PAGE) 90*aaebf690SColin Percival 91*aaebf690SColin Percival /** 92*aaebf690SColin Percival * Compute the number of indirect segment pages required for an I/O with the 93*aaebf690SColin Percival * specified number of indirect segments. 94*aaebf690SColin Percival */ 95*aaebf690SColin Percival #define XBD_INDIRECT_SEGS_TO_PAGES(segs) \ 96*aaebf690SColin Percival ((segs + XBD_MAX_SEGMENTS_PER_PAGE - 1) / XBD_MAX_SEGMENTS_PER_PAGE) 97ff662b5cSJustin T. Gibbs 98e2c1fe90SJustin T. Gibbs typedef enum { 99e2c1fe90SJustin T. Gibbs XBDCF_Q_MASK = 0xFF, 1009985113bSJustin T. Gibbs /* This command has contributed to xbd_qfrozen_cnt. */ 101e2c1fe90SJustin T. Gibbs XBDCF_FROZEN = 1<<8, 1029985113bSJustin T. Gibbs /* Freeze the command queue on dispatch (i.e. single step command). */ 1039985113bSJustin T. Gibbs XBDCF_Q_FREEZE = 1<<9, 1049985113bSJustin T. Gibbs /* Bus DMA returned EINPROGRESS for this command. */ 105127a9483SJustin T. Gibbs XBDCF_ASYNC_MAPPING = 1<<10, 106e2c1fe90SJustin T. Gibbs XBDCF_INITIALIZER = XBDCF_Q_MASK 107e2c1fe90SJustin T. Gibbs } xbdc_flag_t; 108e2c1fe90SJustin T. Gibbs 10933eebb6aSJustin T. Gibbs struct xbd_command; 11033eebb6aSJustin T. Gibbs typedef void xbd_cbcf_t(struct xbd_command *); 11189e0f4d2SKip Macy 11233eebb6aSJustin T. Gibbs struct xbd_command { 11333eebb6aSJustin T. Gibbs TAILQ_ENTRY(xbd_command) cm_link; 11433eebb6aSJustin T. Gibbs struct xbd_softc *cm_sc; 115e2c1fe90SJustin T. Gibbs xbdc_flag_t cm_flags; 11633eebb6aSJustin T. Gibbs bus_dmamap_t cm_map; 11733eebb6aSJustin T. Gibbs uint64_t cm_id; 11833eebb6aSJustin T. Gibbs grant_ref_t *cm_sg_refs; 11933eebb6aSJustin T. Gibbs struct bio *cm_bp; 12033eebb6aSJustin T. Gibbs grant_ref_t cm_gref_head; 12133eebb6aSJustin T. Gibbs void *cm_data; 12233eebb6aSJustin T. Gibbs size_t cm_datalen; 12333eebb6aSJustin T. Gibbs u_int cm_nseg; 12433eebb6aSJustin T. Gibbs int cm_operation; 12533eebb6aSJustin T. Gibbs blkif_sector_t cm_sector_number; 12633eebb6aSJustin T. Gibbs int cm_status; 12733eebb6aSJustin T. Gibbs xbd_cbcf_t *cm_complete; 128*aaebf690SColin Percival void *cm_indirectionpages; 129*aaebf690SColin Percival grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 13089e0f4d2SKip Macy }; 13189e0f4d2SKip Macy 132e2c1fe90SJustin T. Gibbs typedef enum { 133e2c1fe90SJustin T. Gibbs XBD_Q_FREE, 134e2c1fe90SJustin T. Gibbs XBD_Q_READY, 135e2c1fe90SJustin T. Gibbs XBD_Q_BUSY, 136e2c1fe90SJustin T. Gibbs XBD_Q_COMPLETE, 137e2c1fe90SJustin T. Gibbs XBD_Q_BIO, 138e2c1fe90SJustin T. Gibbs XBD_Q_COUNT, 139e2c1fe90SJustin T. Gibbs XBD_Q_NONE = XBDCF_Q_MASK 140e2c1fe90SJustin T. Gibbs } xbd_q_index_t; 14189e0f4d2SKip Macy 142e2c1fe90SJustin T. Gibbs typedef struct xbd_cm_q { 143e2c1fe90SJustin T. Gibbs TAILQ_HEAD(, xbd_command) q_tailq; 144e4808c4bSKip Macy uint32_t q_length; 145e4808c4bSKip Macy uint32_t q_max; 146e2c1fe90SJustin T. Gibbs } xbd_cm_q_t; 147e4808c4bSKip Macy 148e2c1fe90SJustin T. Gibbs typedef enum { 149e2c1fe90SJustin T. Gibbs XBD_STATE_DISCONNECTED, 150e2c1fe90SJustin T. Gibbs XBD_STATE_CONNECTED, 151e2c1fe90SJustin T. Gibbs XBD_STATE_SUSPENDED 152e2c1fe90SJustin T. Gibbs } xbd_state_t; 153e2c1fe90SJustin T. Gibbs 154e2c1fe90SJustin T. Gibbs typedef enum { 155127a9483SJustin T. Gibbs XBDF_NONE = 0, 156e2c1fe90SJustin T. Gibbs XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */ 157e2c1fe90SJustin T. Gibbs XBDF_BARRIER = 1 << 1, /* backend supports barriers */ 1589985113bSJustin T. Gibbs XBDF_FLUSH = 1 << 2, /* backend supports flush */ 1599985113bSJustin T. Gibbs XBDF_READY = 1 << 3, /* Is ready */ 1609985113bSJustin T. Gibbs XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */ 1619985113bSJustin T. Gibbs XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */ 1629985113bSJustin T. Gibbs XBDF_WAIT_IDLE = 1 << 6 /* 1639985113bSJustin T. Gibbs * No new work until oustanding work 1649985113bSJustin T. Gibbs * completes. 1659985113bSJustin T. Gibbs */ 166e2c1fe90SJustin T. Gibbs } xbd_flag_t; 167e4808c4bSKip Macy 168e4808c4bSKip Macy /* 169e4808c4bSKip Macy * We have one of these per vbd, whether ide, scsi or 'other'. 170e4808c4bSKip Macy */ 17133eebb6aSJustin T. Gibbs struct xbd_softc { 17233eebb6aSJustin T. Gibbs device_t xbd_dev; 17333eebb6aSJustin T. Gibbs struct disk *xbd_disk; /* disk params */ 17433eebb6aSJustin T. Gibbs struct bio_queue_head xbd_bioq; /* sort queue */ 17533eebb6aSJustin T. Gibbs int xbd_unit; 176e2c1fe90SJustin T. Gibbs xbd_flag_t xbd_flags; 177127a9483SJustin T. Gibbs int xbd_qfrozen_cnt; 17833eebb6aSJustin T. Gibbs int xbd_vdevice; 179e2c1fe90SJustin T. Gibbs xbd_state_t xbd_state; 18033eebb6aSJustin T. Gibbs u_int xbd_ring_pages; 18133eebb6aSJustin T. Gibbs uint32_t xbd_max_requests; 18233eebb6aSJustin T. Gibbs uint32_t xbd_max_request_segments; 18333eebb6aSJustin T. Gibbs uint32_t xbd_max_request_size; 184*aaebf690SColin Percival uint32_t xbd_max_request_indirectpages; 18533eebb6aSJustin T. Gibbs grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]; 18633eebb6aSJustin T. Gibbs blkif_front_ring_t xbd_ring; 18776acc41fSJustin T. Gibbs xen_intr_handle_t xen_intr_handle; 18833eebb6aSJustin T. Gibbs struct gnttab_free_callback xbd_callback; 189e2c1fe90SJustin T. Gibbs xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]; 19033eebb6aSJustin T. Gibbs bus_dma_tag_t xbd_io_dmat; 191e4808c4bSKip Macy 19289e0f4d2SKip Macy /** 19389e0f4d2SKip Macy * The number of people holding this device open. We won't allow a 19489e0f4d2SKip Macy * hot-unplug unless this is 0. 19589e0f4d2SKip Macy */ 19633eebb6aSJustin T. Gibbs int xbd_users; 19733eebb6aSJustin T. Gibbs struct mtx xbd_io_lock; 198ff662b5cSJustin T. Gibbs 19933eebb6aSJustin T. Gibbs struct xbd_command *xbd_shadow; 20089e0f4d2SKip Macy }; 201e4808c4bSKip Macy 20233eebb6aSJustin T. Gibbs int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, 203ff662b5cSJustin T. Gibbs uint16_t vdisk_info, unsigned long sector_size); 204e4808c4bSKip Macy 205e2c1fe90SJustin T. Gibbs static inline void 206e2c1fe90SJustin T. Gibbs xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index) 207e2c1fe90SJustin T. Gibbs { 208e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 209e4808c4bSKip Macy 210e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 211e2c1fe90SJustin T. Gibbs cmq->q_length++; 212e2c1fe90SJustin T. Gibbs if (cmq->q_length > cmq->q_max) 213e2c1fe90SJustin T. Gibbs cmq->q_max = cmq->q_length; 214e2c1fe90SJustin T. Gibbs } 215e4808c4bSKip Macy 216e2c1fe90SJustin T. Gibbs static inline void 217e2c1fe90SJustin T. Gibbs xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index) 218e2c1fe90SJustin T. Gibbs { 219e2c1fe90SJustin T. Gibbs sc->xbd_cm_q[index].q_length--; 220e2c1fe90SJustin T. Gibbs } 221e4808c4bSKip Macy 2229985113bSJustin T. Gibbs static inline uint32_t 2239985113bSJustin T. Gibbs xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index) 2249985113bSJustin T. Gibbs { 2259985113bSJustin T. Gibbs return (sc->xbd_cm_q[index].q_length); 2269985113bSJustin T. Gibbs } 2279985113bSJustin T. Gibbs 228e2c1fe90SJustin T. Gibbs static inline void 229e2c1fe90SJustin T. Gibbs xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index) 230e2c1fe90SJustin T. Gibbs { 231e2c1fe90SJustin T. Gibbs struct xbd_cm_q *cmq; 232e4808c4bSKip Macy 233e2c1fe90SJustin T. Gibbs cmq = &sc->xbd_cm_q[index]; 234e2c1fe90SJustin T. Gibbs TAILQ_INIT(&cmq->q_tailq); 235e2c1fe90SJustin T. Gibbs cmq->q_length = 0; 236e2c1fe90SJustin T. Gibbs cmq->q_max = 0; 237e2c1fe90SJustin T. Gibbs } 238e2c1fe90SJustin T. Gibbs 239e2c1fe90SJustin T. Gibbs static inline void 240e2c1fe90SJustin T. Gibbs xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index) 241e2c1fe90SJustin T. Gibbs { 242e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 243e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 244e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 245e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 246e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 247e2c1fe90SJustin T. Gibbs TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 248e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 249e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 250e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 251e2c1fe90SJustin T. Gibbs } 252e2c1fe90SJustin T. Gibbs 253e2c1fe90SJustin T. Gibbs static inline void 254e2c1fe90SJustin T. Gibbs xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index) 255e2c1fe90SJustin T. Gibbs { 256e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 257e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 258e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE) 259e2c1fe90SJustin T. Gibbs panic("%s: command %p is already on queue %d.", 260e2c1fe90SJustin T. Gibbs __func__, cm, cm->cm_flags & XBDCF_Q_MASK); 261e2c1fe90SJustin T. Gibbs TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 262e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 263e2c1fe90SJustin T. Gibbs cm->cm_flags |= index; 264e2c1fe90SJustin T. Gibbs xbd_added_qentry(cm->cm_sc, index); 265e2c1fe90SJustin T. Gibbs } 266e2c1fe90SJustin T. Gibbs 267e2c1fe90SJustin T. Gibbs static inline struct xbd_command * 268e2c1fe90SJustin T. Gibbs xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index) 269e2c1fe90SJustin T. Gibbs { 270e2c1fe90SJustin T. Gibbs struct xbd_command *cm; 271e2c1fe90SJustin T. Gibbs 272e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 273e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 274e2c1fe90SJustin T. Gibbs 275e2c1fe90SJustin T. Gibbs if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) { 276e2c1fe90SJustin T. Gibbs if ((cm->cm_flags & XBDCF_Q_MASK) != index) { 277e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, " 278e2c1fe90SJustin T. Gibbs "not specified queue %d", 279e2c1fe90SJustin T. Gibbs __func__, cm, 280e2c1fe90SJustin T. Gibbs cm->cm_flags & XBDCF_Q_MASK, 281e2c1fe90SJustin T. Gibbs index); 282e2c1fe90SJustin T. Gibbs } 283e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link); 284e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 285e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 286e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 287e2c1fe90SJustin T. Gibbs } 288e2c1fe90SJustin T. Gibbs return (cm); 289e2c1fe90SJustin T. Gibbs } 290e2c1fe90SJustin T. Gibbs 291e2c1fe90SJustin T. Gibbs static inline void 292e2c1fe90SJustin T. Gibbs xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index) 293e2c1fe90SJustin T. Gibbs { 294e2c1fe90SJustin T. Gibbs xbd_q_index_t index; 295e2c1fe90SJustin T. Gibbs 296e2c1fe90SJustin T. Gibbs index = cm->cm_flags & XBDCF_Q_MASK; 297e2c1fe90SJustin T. Gibbs 298e2c1fe90SJustin T. Gibbs KASSERT(index != XBD_Q_BIO, 299e2c1fe90SJustin T. Gibbs ("%s: Commands cannot access the bio queue.", __func__)); 300e2c1fe90SJustin T. Gibbs 301e2c1fe90SJustin T. Gibbs if (index != expected_index) { 302e2c1fe90SJustin T. Gibbs panic("%s: command %p is on queue %d, not specified queue %d", 303e2c1fe90SJustin T. Gibbs __func__, cm, index, expected_index); 304e2c1fe90SJustin T. Gibbs } 305e2c1fe90SJustin T. Gibbs TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link); 306e2c1fe90SJustin T. Gibbs cm->cm_flags &= ~XBDCF_Q_MASK; 307e2c1fe90SJustin T. Gibbs cm->cm_flags |= XBD_Q_NONE; 308e2c1fe90SJustin T. Gibbs xbd_removed_qentry(cm->cm_sc, index); 309e2c1fe90SJustin T. Gibbs } 310e4808c4bSKip Macy 3119985113bSJustin T. Gibbs static inline void 31233eebb6aSJustin T. Gibbs xbd_initq_bio(struct xbd_softc *sc) 313e4808c4bSKip Macy { 31433eebb6aSJustin T. Gibbs bioq_init(&sc->xbd_bioq); 315e4808c4bSKip Macy } 316e4808c4bSKip Macy 3179985113bSJustin T. Gibbs static inline void 31833eebb6aSJustin T. Gibbs xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp) 319e4808c4bSKip Macy { 32033eebb6aSJustin T. Gibbs bioq_insert_tail(&sc->xbd_bioq, bp); 321e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 322e4808c4bSKip Macy } 323e4808c4bSKip Macy 3249985113bSJustin T. Gibbs static inline void 32533eebb6aSJustin T. Gibbs xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp) 326e4808c4bSKip Macy { 32733eebb6aSJustin T. Gibbs bioq_insert_head(&sc->xbd_bioq, bp); 328e2c1fe90SJustin T. Gibbs xbd_added_qentry(sc, XBD_Q_BIO); 329e4808c4bSKip Macy } 330e4808c4bSKip Macy 3319985113bSJustin T. Gibbs static inline struct bio * 33233eebb6aSJustin T. Gibbs xbd_dequeue_bio(struct xbd_softc *sc) 333e4808c4bSKip Macy { 334e4808c4bSKip Macy struct bio *bp; 335e4808c4bSKip Macy 33633eebb6aSJustin T. Gibbs if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) { 33733eebb6aSJustin T. Gibbs bioq_remove(&sc->xbd_bioq, bp); 338e2c1fe90SJustin T. Gibbs xbd_removed_qentry(sc, XBD_Q_BIO); 339e4808c4bSKip Macy } 340e4808c4bSKip Macy return (bp); 341e4808c4bSKip Macy } 34289e0f4d2SKip Macy 343e2c1fe90SJustin T. Gibbs static inline void 344e2c1fe90SJustin T. Gibbs xbd_initqs(struct xbd_softc *sc) 345e2c1fe90SJustin T. Gibbs { 346e2c1fe90SJustin T. Gibbs u_int index; 347e2c1fe90SJustin T. Gibbs 348e2c1fe90SJustin T. Gibbs for (index = 0; index < XBD_Q_COUNT; index++) 349e2c1fe90SJustin T. Gibbs xbd_initq_cm(sc, index); 350e2c1fe90SJustin T. Gibbs 351e2c1fe90SJustin T. Gibbs xbd_initq_bio(sc); 352e2c1fe90SJustin T. Gibbs } 353e2c1fe90SJustin T. Gibbs 35433eebb6aSJustin T. Gibbs #endif /* __XEN_BLKFRONT_BLOCK_H__ */ 355