xref: /freebsd/sys/dev/xen/blkfront/block.h (revision c6ec7d31830ab1c80edae95ad5e4b9dba10c47ac)
1 /*
2  * XenBSD block device driver
3  *
4  * Copyright (c) 2009 Scott Long, Yahoo!
5  * Copyright (c) 2009 Frank Suchomel, Citrix
6  * Copyright (c) 2009 Doug F. Rabson, Citrix
7  * Copyright (c) 2005 Kip Macy
8  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
9  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
10  *
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a copy
13  * of this software and associated documentation files (the "Software"), to
14  * deal in the Software without restriction, including without limitation the
15  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
16  * sell copies of the Software, and to permit persons to whom the Software is
17  * furnished to do so, subject to the following conditions:
18  *
19  * The above copyright notice and this permission notice shall be included in
20  * all copies or substantial portions of the Software.
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27  * DEALINGS IN THE SOFTWARE.
28  *
29  * $FreeBSD$
30  */
31 
32 
33 #ifndef __XEN_DRIVERS_BLOCK_H__
34 #define __XEN_DRIVERS_BLOCK_H__
35 #include <xen/blkif.h>
36 
37 /**
38  * Given a number of blkif segments, compute the maximum I/O size supported.
39  *
40  * \note This calculation assumes that all but the first and last segments
41  *       of the I/O are fully utilized.
42  *
43  * \note We reserve a segement from the maximum supported by the transport to
44  *       guarantee we can handle an unaligned transfer without the need to
45  *       use a bounce buffer.
46  */
47 #define	XBF_SEGS_TO_SIZE(segs)						\
48 	(((segs) - 1) * PAGE_SIZE)
49 
50 /**
51  * Compute the maximum number of blkif segments requried to represent
52  * an I/O of the given size.
53  *
54  * \note This calculation assumes that all but the first and last segments
55  *       of the I/O are fully utilized.
56  *
57  * \note We reserve a segement to guarantee we can handle an unaligned
58  *       transfer without the need to use a bounce buffer.
59  */
60 #define	XBF_SIZE_TO_SEGS(size)						\
61 	((size / PAGE_SIZE) + 1)
62 
63 /**
64  * The maximum number of outstanding requests blocks (request headers plus
65  * additional segment blocks) we will allow in a negotiated block-front/back
66  * communication channel.
67  */
68 #define XBF_MAX_REQUESTS		256
69 
70 /**
71  * The maximum mapped region size per request we will allow in a negotiated
72  * block-front/back communication channel.
73  */
74 #define	XBF_MAX_REQUEST_SIZE						\
75 	MIN(MAXPHYS, XBF_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
76 
77 /**
78  * The maximum number of segments (within a request header and accompanying
79  * segment blocks) per request we will allow in a negotiated block-front/back
80  * communication channel.
81  */
82 #define	XBF_MAX_SEGMENTS_PER_REQUEST					\
83 	(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST,				\
84 	     XBF_SIZE_TO_SEGS(XBF_MAX_REQUEST_SIZE)))
85 
86 /**
87  * The maximum number of shared memory ring pages we will allow in a
88  * negotiated block-front/back communication channel.  Allow enough
89  * ring space for all requests to be  XBF_MAX_REQUEST_SIZE'd.
90  */
91 #define XBF_MAX_RING_PAGES						    \
92 	BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBF_MAX_SEGMENTS_PER_REQUEST) \
93 		       * XBF_MAX_REQUESTS)
94 
95 struct xlbd_type_info
96 {
97 	int partn_shift;
98 	int disks_per_major;
99 	char *devname;
100 	char *diskname;
101 };
102 
103 struct xlbd_major_info
104 {
105 	int major;
106 	int index;
107 	int usage;
108 	struct xlbd_type_info *type;
109 };
110 
111 struct xb_command {
112 	TAILQ_ENTRY(xb_command)	cm_link;
113 	struct xb_softc		*cm_sc;
114 	u_int			cm_flags;
115 #define XB_CMD_FROZEN		(1<<0)
116 #define XB_CMD_POLLED		(1<<1)
117 #define XB_ON_XBQ_FREE		(1<<2)
118 #define XB_ON_XBQ_READY		(1<<3)
119 #define XB_ON_XBQ_BUSY		(1<<4)
120 #define XB_ON_XBQ_COMPLETE	(1<<5)
121 #define XB_ON_XBQ_MASK		((1<<2)|(1<<3)|(1<<4)|(1<<5))
122 	bus_dmamap_t		map;
123 	uint64_t		id;
124 	grant_ref_t		*sg_refs;
125 	struct bio		*bp;
126 	grant_ref_t		gref_head;
127 	void			*data;
128 	size_t			datalen;
129 	u_int			nseg;
130 	int			operation;
131 	blkif_sector_t		sector_number;
132 	int			status;
133 	void			(* cm_complete)(struct xb_command *);
134 };
135 
136 #define XBQ_FREE	0
137 #define XBQ_BIO		1
138 #define XBQ_READY	2
139 #define XBQ_BUSY	3
140 #define XBQ_COMPLETE	4
141 #define XBQ_COUNT	5
142 
143 struct xb_qstat {
144 	uint32_t	q_length;
145 	uint32_t	q_max;
146 };
147 
148 union xb_statrequest {
149 	uint32_t		ms_item;
150 	struct xb_qstat		ms_qstat;
151 };
152 
153 /*
154  * We have one of these per vbd, whether ide, scsi or 'other'.
155  */
156 struct xb_softc {
157 	device_t		xb_dev;
158 	struct disk		*xb_disk;		/* disk params */
159 	struct bio_queue_head   xb_bioq;		/* sort queue */
160 	int			xb_unit;
161 	int			xb_flags;
162 #define XB_OPEN		(1<<0)		/* drive is open (can't shut down) */
163 #define XB_BARRIER	(1 << 1)	/* backend supports barriers */
164 #define XB_READY	(1 << 2)	/* Is ready */
165 #define XB_FROZEN	(1 << 3)	/* Waiting for resources */
166 	int			vdevice;
167 	int			connected;
168 	u_int			ring_pages;
169 	uint32_t		max_requests;
170 	uint32_t		max_request_segments;
171 	uint32_t		max_request_blocks;
172 	uint32_t		max_request_size;
173 	grant_ref_t		ring_ref[XBF_MAX_RING_PAGES];
174 	blkif_front_ring_t	ring;
175 	unsigned int		irq;
176 	struct gnttab_free_callback	callback;
177 	TAILQ_HEAD(,xb_command)	cm_free;
178 	TAILQ_HEAD(,xb_command)	cm_ready;
179 	TAILQ_HEAD(,xb_command)	cm_busy;
180 	TAILQ_HEAD(,xb_command)	cm_complete;
181 	struct xb_qstat		xb_qstat[XBQ_COUNT];
182 	bus_dma_tag_t		xb_io_dmat;
183 
184 	/**
185 	 * The number of people holding this device open.  We won't allow a
186 	 * hot-unplug unless this is 0.
187 	 */
188 	int			users;
189 	struct mtx		xb_io_lock;
190 
191 	struct xb_command      *shadow;
192 };
193 
194 int xlvbd_add(struct xb_softc *, blkif_sector_t sectors, int device,
195 	      uint16_t vdisk_info, unsigned long sector_size);
196 void xlvbd_del(struct xb_softc *);
197 
198 #define XBQ_ADD(sc, qname)					\
199 	do {							\
200 		struct xb_qstat *qs;				\
201 								\
202 		qs = &(sc)->xb_qstat[qname];			\
203 		qs->q_length++;					\
204 		if (qs->q_length > qs->q_max)			\
205 			qs->q_max = qs->q_length;		\
206 	} while (0)
207 
208 #define XBQ_REMOVE(sc, qname)	(sc)->xb_qstat[qname].q_length--
209 
210 #define XBQ_INIT(sc, qname)					\
211 	do {							\
212 		sc->xb_qstat[qname].q_length = 0;		\
213 		sc->xb_qstat[qname].q_max = 0;			\
214 	} while (0)
215 
216 #define XBQ_COMMAND_QUEUE(name, index)					\
217 	static __inline void						\
218 	xb_initq_ ## name (struct xb_softc *sc)				\
219 	{								\
220 		TAILQ_INIT(&sc->cm_ ## name);				\
221 		XBQ_INIT(sc, index);					\
222 	}								\
223 	static __inline void						\
224 	xb_enqueue_ ## name (struct xb_command *cm)			\
225 	{								\
226 		if ((cm->cm_flags & XB_ON_XBQ_MASK) != 0) {		\
227 			printf("command %p is on another queue, "	\
228 			    "flags = %#x\n", cm, cm->cm_flags);		\
229 			panic("command is on another queue");		\
230 		}							\
231 		TAILQ_INSERT_TAIL(&cm->cm_sc->cm_ ## name, cm, cm_link); \
232 		cm->cm_flags |= XB_ON_ ## index;			\
233 		XBQ_ADD(cm->cm_sc, index);				\
234 	}								\
235 	static __inline void						\
236 	xb_requeue_ ## name (struct xb_command *cm)			\
237 	{								\
238 		if ((cm->cm_flags & XB_ON_XBQ_MASK) != 0) {		\
239 			printf("command %p is on another queue, "	\
240 			    "flags = %#x\n", cm, cm->cm_flags);		\
241 			panic("command is on another queue");		\
242 		}							\
243 		TAILQ_INSERT_HEAD(&cm->cm_sc->cm_ ## name, cm, cm_link); \
244 		cm->cm_flags |= XB_ON_ ## index;			\
245 		XBQ_ADD(cm->cm_sc, index);				\
246 	}								\
247 	static __inline struct xb_command *				\
248 	xb_dequeue_ ## name (struct xb_softc *sc)			\
249 	{								\
250 		struct xb_command *cm;					\
251 									\
252 		if ((cm = TAILQ_FIRST(&sc->cm_ ## name)) != NULL) {	\
253 			if ((cm->cm_flags & XB_ON_XBQ_MASK) !=		\
254 			     XB_ON_ ## index) {				\
255 				printf("command %p not in queue, "	\
256 				    "flags = %#x, bit = %#x\n", cm,	\
257 				    cm->cm_flags, XB_ON_ ## index);	\
258 				panic("command not in queue");		\
259 			}						\
260 			TAILQ_REMOVE(&sc->cm_ ## name, cm, cm_link);	\
261 			cm->cm_flags &= ~XB_ON_ ## index;		\
262 			XBQ_REMOVE(sc, index);				\
263 		}							\
264 		return (cm);						\
265 	}								\
266 	static __inline void						\
267 	xb_remove_ ## name (struct xb_command *cm)			\
268 	{								\
269 		if ((cm->cm_flags & XB_ON_XBQ_MASK) != XB_ON_ ## index){\
270 			printf("command %p not in queue, flags = %#x, " \
271 			    "bit = %#x\n", cm, cm->cm_flags,		\
272 			    XB_ON_ ## index);				\
273 			panic("command not in queue");			\
274 		}							\
275 		TAILQ_REMOVE(&cm->cm_sc->cm_ ## name, cm, cm_link);	\
276 		cm->cm_flags &= ~XB_ON_ ## index;			\
277 		XBQ_REMOVE(cm->cm_sc, index);				\
278 	}								\
279 struct hack
280 
281 XBQ_COMMAND_QUEUE(free, XBQ_FREE);
282 XBQ_COMMAND_QUEUE(ready, XBQ_READY);
283 XBQ_COMMAND_QUEUE(busy, XBQ_BUSY);
284 XBQ_COMMAND_QUEUE(complete, XBQ_COMPLETE);
285 
286 static __inline void
287 xb_initq_bio(struct xb_softc *sc)
288 {
289 	bioq_init(&sc->xb_bioq);
290 	XBQ_INIT(sc, XBQ_BIO);
291 }
292 
293 static __inline void
294 xb_enqueue_bio(struct xb_softc *sc, struct bio *bp)
295 {
296 	bioq_insert_tail(&sc->xb_bioq, bp);
297 	XBQ_ADD(sc, XBQ_BIO);
298 }
299 
300 static __inline void
301 xb_requeue_bio(struct xb_softc *sc, struct bio *bp)
302 {
303 	bioq_insert_head(&sc->xb_bioq, bp);
304 	XBQ_ADD(sc, XBQ_BIO);
305 }
306 
307 static __inline struct bio *
308 xb_dequeue_bio(struct xb_softc *sc)
309 {
310 	struct bio *bp;
311 
312 	if ((bp = bioq_first(&sc->xb_bioq)) != NULL) {
313 		bioq_remove(&sc->xb_bioq, bp);
314 		XBQ_REMOVE(sc, XBQ_BIO);
315 	}
316 	return (bp);
317 }
318 
319 #endif /* __XEN_DRIVERS_BLOCK_H__ */
320 
321