1 /* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License version 2 4 * as published by the Free Software Foundation; or, when distributed 5 * separately from the Linux kernel or incorporated into other 6 * software packages, subject to the following license: 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this source file (the "Software"), to deal in the Software without 10 * restriction, including without limitation the rights to use, copy, modify, 11 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 12 * and to permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 24 * IN THE SOFTWARE. 25 */ 26 27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__ 28 #define __XEN_BLKIF__BACKEND__COMMON_H__ 29 30 #include <linux/module.h> 31 #include <linux/interrupt.h> 32 #include <linux/slab.h> 33 #include <linux/blkdev.h> 34 #include <linux/vmalloc.h> 35 #include <linux/wait.h> 36 #include <linux/io.h> 37 #include <asm/setup.h> 38 #include <asm/pgalloc.h> 39 #include <asm/hypervisor.h> 40 #include <xen/grant_table.h> 41 #include <xen/xenbus.h> 42 #include <xen/interface/io/ring.h> 43 #include <xen/interface/io/blkif.h> 44 #include <xen/interface/io/protocols.h> 45 46 #define DRV_PFX "xen-blkback:" 47 #define DPRINTK(fmt, args...) \ 48 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ 49 __func__, __LINE__, ##args) 50 51 52 /* Not a real protocol. Used to generate ring structs which contain 53 * the elements common to all protocols only. This way we get a 54 * compiler-checkable way to use common struct elements, so we can 55 * avoid using switch(protocol) in a number of places. */ 56 struct blkif_common_request { 57 char dummy; 58 }; 59 struct blkif_common_response { 60 char dummy; 61 }; 62 63 /* i386 protocol version */ 64 #pragma pack(push, 4) 65 66 struct blkif_x86_32_request_rw { 67 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 68 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 69 }; 70 71 struct blkif_x86_32_request_discard { 72 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 73 uint64_t nr_sectors; 74 }; 75 76 struct blkif_x86_32_request { 77 uint8_t operation; /* BLKIF_OP_??? */ 78 uint8_t nr_segments; /* number of segments */ 79 blkif_vdev_t handle; /* only for read/write requests */ 80 uint64_t id; /* private guest value, echoed in resp */ 81 union { 82 struct blkif_x86_32_request_rw rw; 83 struct blkif_x86_32_request_discard discard; 84 } u; 85 }; 86 struct blkif_x86_32_response { 87 uint64_t id; /* copied from request */ 88 uint8_t operation; /* copied from request */ 89 int16_t status; /* BLKIF_RSP_??? */ 90 }; 91 #pragma pack(pop) 92 93 /* x86_64 protocol version */ 94 95 struct blkif_x86_64_request_rw { 96 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 97 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 98 }; 99 100 struct blkif_x86_64_request_discard { 101 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 102 uint64_t nr_sectors; 103 }; 104 105 struct blkif_x86_64_request { 106 uint8_t operation; /* BLKIF_OP_??? */ 107 uint8_t nr_segments; /* number of segments */ 108 blkif_vdev_t handle; /* only for read/write requests */ 109 uint64_t __attribute__((__aligned__(8))) id; 110 union { 111 struct blkif_x86_64_request_rw rw; 112 struct blkif_x86_64_request_discard discard; 113 } u; 114 }; 115 struct blkif_x86_64_response { 116 uint64_t __attribute__((__aligned__(8))) id; 117 uint8_t operation; /* copied from request */ 118 int16_t status; /* BLKIF_RSP_??? */ 119 }; 120 121 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, 122 struct blkif_common_response); 123 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, 124 struct blkif_x86_32_response); 125 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, 126 struct blkif_x86_64_response); 127 128 union blkif_back_rings { 129 struct blkif_back_ring native; 130 struct blkif_common_back_ring common; 131 struct blkif_x86_32_back_ring x86_32; 132 struct blkif_x86_64_back_ring x86_64; 133 }; 134 135 enum blkif_protocol { 136 BLKIF_PROTOCOL_NATIVE = 1, 137 BLKIF_PROTOCOL_X86_32 = 2, 138 BLKIF_PROTOCOL_X86_64 = 3, 139 }; 140 141 enum blkif_backend_type { 142 BLKIF_BACKEND_PHY = 1, 143 BLKIF_BACKEND_FILE = 2, 144 }; 145 146 struct xen_vbd { 147 /* What the domain refers to this vbd as. */ 148 blkif_vdev_t handle; 149 /* Non-zero -> read-only */ 150 unsigned char readonly; 151 /* VDISK_xxx */ 152 unsigned char type; 153 /* phys device that this vbd maps to. */ 154 u32 pdevice; 155 struct block_device *bdev; 156 /* Cached size parameter. */ 157 sector_t size; 158 bool flush_support; 159 }; 160 161 struct backend_info; 162 163 struct xen_blkif { 164 /* Unique identifier for this interface. */ 165 domid_t domid; 166 unsigned int handle; 167 /* Physical parameters of the comms window. */ 168 unsigned int irq; 169 /* Comms information. */ 170 enum blkif_protocol blk_protocol; 171 enum blkif_backend_type blk_backend_type; 172 union blkif_back_rings blk_rings; 173 void *blk_ring; 174 /* The VBD attached to this interface. */ 175 struct xen_vbd vbd; 176 /* Back pointer to the backend_info. */ 177 struct backend_info *be; 178 /* Private fields. */ 179 spinlock_t blk_ring_lock; 180 atomic_t refcnt; 181 182 wait_queue_head_t wq; 183 /* for barrier (drain) requests */ 184 struct completion drain_complete; 185 atomic_t drain; 186 /* One thread per one blkif. */ 187 struct task_struct *xenblkd; 188 unsigned int waiting_reqs; 189 190 /* statistics */ 191 unsigned long st_print; 192 int st_rd_req; 193 int st_wr_req; 194 int st_oo_req; 195 int st_f_req; 196 int st_ds_req; 197 int st_rd_sect; 198 int st_wr_sect; 199 200 wait_queue_head_t waiting_to_free; 201 }; 202 203 204 #define vbd_sz(_v) ((_v)->bdev->bd_part ? \ 205 (_v)->bdev->bd_part->nr_sects : \ 206 get_capacity((_v)->bdev->bd_disk)) 207 208 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt)) 209 #define xen_blkif_put(_b) \ 210 do { \ 211 if (atomic_dec_and_test(&(_b)->refcnt)) \ 212 wake_up(&(_b)->waiting_to_free);\ 213 } while (0) 214 215 struct phys_req { 216 unsigned short dev; 217 blkif_sector_t nr_sects; 218 struct block_device *bdev; 219 blkif_sector_t sector_number; 220 }; 221 int xen_blkif_interface_init(void); 222 223 int xen_blkif_xenbus_init(void); 224 225 irqreturn_t xen_blkif_be_int(int irq, void *dev_id); 226 int xen_blkif_schedule(void *arg); 227 228 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 229 struct backend_info *be, int state); 230 231 int xen_blkbk_barrier(struct xenbus_transaction xbt, 232 struct backend_info *be, int state); 233 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); 234 235 static inline void blkif_get_x86_32_req(struct blkif_request *dst, 236 struct blkif_x86_32_request *src) 237 { 238 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 239 dst->operation = src->operation; 240 dst->nr_segments = src->nr_segments; 241 dst->handle = src->handle; 242 dst->id = src->id; 243 switch (src->operation) { 244 case BLKIF_OP_READ: 245 case BLKIF_OP_WRITE: 246 case BLKIF_OP_WRITE_BARRIER: 247 case BLKIF_OP_FLUSH_DISKCACHE: 248 dst->u.rw.sector_number = src->u.rw.sector_number; 249 barrier(); 250 if (n > dst->nr_segments) 251 n = dst->nr_segments; 252 for (i = 0; i < n; i++) 253 dst->u.rw.seg[i] = src->u.rw.seg[i]; 254 break; 255 case BLKIF_OP_DISCARD: 256 dst->u.discard.sector_number = src->u.discard.sector_number; 257 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 258 break; 259 default: 260 break; 261 } 262 } 263 264 static inline void blkif_get_x86_64_req(struct blkif_request *dst, 265 struct blkif_x86_64_request *src) 266 { 267 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 268 dst->operation = src->operation; 269 dst->nr_segments = src->nr_segments; 270 dst->handle = src->handle; 271 dst->id = src->id; 272 switch (src->operation) { 273 case BLKIF_OP_READ: 274 case BLKIF_OP_WRITE: 275 case BLKIF_OP_WRITE_BARRIER: 276 case BLKIF_OP_FLUSH_DISKCACHE: 277 dst->u.rw.sector_number = src->u.rw.sector_number; 278 barrier(); 279 if (n > dst->nr_segments) 280 n = dst->nr_segments; 281 for (i = 0; i < n; i++) 282 dst->u.rw.seg[i] = src->u.rw.seg[i]; 283 break; 284 case BLKIF_OP_DISCARD: 285 dst->u.discard.sector_number = src->u.discard.sector_number; 286 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 287 break; 288 default: 289 break; 290 } 291 } 292 293 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ 294