xref: /freebsd/sys/dev/xen/blkfront/block.h (revision fd962ac6997aba7e64007113ed1354e0a88b6e38)
1 /*
2  * XenBSD block device driver
3  *
4  * Copyright (c) 2010-2013 Spectra Logic Corporation
5  * Copyright (c) 2009 Scott Long, Yahoo!
6  * Copyright (c) 2009 Frank Suchomel, Citrix
7  * Copyright (c) 2009 Doug F. Rabson, Citrix
8  * Copyright (c) 2005 Kip Macy
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
11  *
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this software and associated documentation files (the "Software"), to
15  * deal in the Software without restriction, including without limitation the
16  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17  * sell copies of the Software, and to permit persons to whom the Software is
18  * furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28  * DEALINGS IN THE SOFTWARE.
29  *
30  * $FreeBSD$
31  */
32 
33 
34 #ifndef __XEN_BLKFRONT_BLOCK_H__
35 #define __XEN_BLKFRONT_BLOCK_H__
36 #include <xen/blkif.h>
37 
38 /**
39  * Given a number of blkif segments, compute the maximum I/O size supported.
40  *
41  * \note This calculation assumes that all but the first and last segments
42  *       of the I/O are fully utilized.
43  *
44  * \note We reserve a segement from the maximum supported by the transport to
45  *       guarantee we can handle an unaligned transfer without the need to
46  *       use a bounce buffer.
47  */
48 #define	XBD_SEGS_TO_SIZE(segs)						\
49 	(((segs) - 1) * PAGE_SIZE)
50 
51 /**
52  * Compute the maximum number of blkif segments requried to represent
53  * an I/O of the given size.
54  *
55  * \note This calculation assumes that all but the first and last segments
56  *       of the I/O are fully utilized.
57  *
58  * \note We reserve a segement to guarantee we can handle an unaligned
59  *       transfer without the need to use a bounce buffer.
60  */
61 #define	XBD_SIZE_TO_SEGS(size)						\
62 	((size / PAGE_SIZE) + 1)
63 
64 /**
65  * The maximum number of outstanding requests blocks (request headers plus
66  * additional segment blocks) we will allow in a negotiated block-front/back
67  * communication channel.
68  */
69 #define XBD_MAX_REQUESTS		256
70 
71 /**
72  * The maximum mapped region size per request we will allow in a negotiated
73  * block-front/back communication channel.
74  */
75 #define	XBD_MAX_REQUEST_SIZE						\
76 	MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
77 
78 /**
79  * The maximum number of segments (within a request header and accompanying
80  * segment blocks) per request we will allow in a negotiated block-front/back
81  * communication channel.
82  */
83 #define	XBD_MAX_SEGMENTS_PER_REQUEST					\
84 	(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST,				\
85 	     XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE)))
86 
87 /**
88  * The maximum number of shared memory ring pages we will allow in a
89  * negotiated block-front/back communication channel.  Allow enough
90  * ring space for all requests to be  XBD_MAX_REQUEST_SIZE'd.
91  */
92 #define XBD_MAX_RING_PAGES						    \
93 	BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \
94 		       * XBD_MAX_REQUESTS)
95 
96 struct xbd_command;
97 typedef void xbd_cbcf_t(struct xbd_command *);
98 
99 struct xbd_command {
100 	TAILQ_ENTRY(xbd_command) cm_link;
101 	struct xbd_softc	*cm_sc;
102 	u_int			 cm_flags;
103 #define XBD_CMD_FROZEN		(1<<0)
104 #define XBD_CMD_POLLED		(1<<1)
105 #define XBD_ON_XBDQ_FREE	(1<<2)
106 #define XBD_ON_XBDQ_READY	(1<<3)
107 #define XBD_ON_XBDQ_BUSY	(1<<4)
108 #define XBD_ON_XBDQ_COMPLETE	(1<<5)
109 #define XBD_ON_XBDQ_MASK	((1<<2)|(1<<3)|(1<<4)|(1<<5))
110 	bus_dmamap_t		 cm_map;
111 	uint64_t		 cm_id;
112 	grant_ref_t		*cm_sg_refs;
113 	struct bio		*cm_bp;
114 	grant_ref_t		 cm_gref_head;
115 	void			*cm_data;
116 	size_t			 cm_datalen;
117 	u_int			 cm_nseg;
118 	int			 cm_operation;
119 	blkif_sector_t		 cm_sector_number;
120 	int			 cm_status;
121 	xbd_cbcf_t		*cm_complete;
122 };
123 
124 #define XBDQ_FREE	0
125 #define XBDQ_BIO	1
126 #define XBDQ_READY	2
127 #define XBDQ_BUSY	3
128 #define XBDQ_COMPLETE	4
129 #define XBDQ_COUNT	5
130 
131 struct xbd_qstat {
132 	uint32_t	q_length;
133 	uint32_t	q_max;
134 };
135 
136 union xbd_statrequest {
137 	uint32_t		ms_item;
138 	struct xbd_qstat	ms_qstat;
139 };
140 
141 /*
142  * We have one of these per vbd, whether ide, scsi or 'other'.
143  */
144 struct xbd_softc {
145 	device_t			 xbd_dev;
146 	struct disk			*xbd_disk;	/* disk params */
147 	struct bio_queue_head 		 xbd_bioq;	/* sort queue */
148 	int				 xbd_unit;
149 	int				 xbd_flags;
150 #define XBD_OPEN	(1<<0)		/* drive is open (can't shut down) */
151 #define XBD_BARRIER	(1 << 1)	/* backend supports barriers */
152 #define XBD_READY	(1 << 2)	/* Is ready */
153 #define XBD_FROZEN	(1 << 3)	/* Waiting for resources */
154 	int				 xbd_vdevice;
155 	int				 xbd_connected;
156 	u_int				 xbd_ring_pages;
157 	uint32_t			 xbd_max_requests;
158 	uint32_t			 xbd_max_request_segments;
159 	uint32_t			 xbd_max_request_blocks;
160 	uint32_t			 xbd_max_request_size;
161 	grant_ref_t			 xbd_ring_ref[XBD_MAX_RING_PAGES];
162 	blkif_front_ring_t		 xbd_ring;
163 	unsigned int			 xbd_irq;
164 	struct gnttab_free_callback	 xbd_callback;
165 	TAILQ_HEAD(,xbd_command)	 xbd_cm_free;
166 	TAILQ_HEAD(,xbd_command)	 xbd_cm_ready;
167 	TAILQ_HEAD(,xbd_command)	 xbd_cm_busy;
168 	TAILQ_HEAD(,xbd_command)	 xbd_cm_complete;
169 	struct xbd_qstat		 xbd_qstat[XBDQ_COUNT];
170 	bus_dma_tag_t			 xbd_io_dmat;
171 
172 	/**
173 	 * The number of people holding this device open.  We won't allow a
174 	 * hot-unplug unless this is 0.
175 	 */
176 	int				 xbd_users;
177 	struct mtx			 xbd_io_lock;
178 
179 	struct xbd_command		*xbd_shadow;
180 };
181 
182 int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
183 			uint16_t vdisk_info, unsigned long sector_size);
184 
185 #define XBDQ_ADD(sc, qname)					\
186 	do {							\
187 		struct xbd_qstat *qs;				\
188 								\
189 		qs = &(sc)->xbd_qstat[qname];			\
190 		qs->q_length++;					\
191 		if (qs->q_length > qs->q_max)			\
192 			qs->q_max = qs->q_length;		\
193 	} while (0)
194 
195 #define XBDQ_REMOVE(sc, qname)	(sc)->xbd_qstat[qname].q_length--
196 
197 #define XBDQ_INIT(sc, qname)					\
198 	do {							\
199 		sc->xbd_qstat[qname].q_length = 0;		\
200 		sc->xbd_qstat[qname].q_max = 0;			\
201 	} while (0)
202 
203 #define XBDQ_COMMAND_QUEUE(name, index)					\
204 	static __inline void						\
205 	xbd_initq_ ## name (struct xbd_softc *sc)			\
206 	{								\
207 		TAILQ_INIT(&sc->xbd_cm_ ## name);			\
208 		XBDQ_INIT(sc, index);					\
209 	}								\
210 	static __inline void						\
211 	xbd_enqueue_ ## name (struct xbd_command *cm)			\
212 	{								\
213 		if ((cm->cm_flags & XBD_ON_XBDQ_MASK) != 0) {		\
214 			printf("command %p is on another queue, "	\
215 			    "flags = %#x\n", cm, cm->cm_flags);		\
216 			panic("command is on another queue");		\
217 		}							\
218 		TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_ ## name, cm, cm_link); \
219 		cm->cm_flags |= XBD_ON_ ## index;			\
220 		XBDQ_ADD(cm->cm_sc, index);				\
221 	}								\
222 	static __inline void						\
223 	xbd_requeue_ ## name (struct xbd_command *cm)			\
224 	{								\
225 		if ((cm->cm_flags & XBD_ON_XBDQ_MASK) != 0) {		\
226 			printf("command %p is on another queue, "	\
227 			    "flags = %#x\n", cm, cm->cm_flags);		\
228 			panic("command is on another queue");		\
229 		}							\
230 		TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_ ## name, cm, cm_link); \
231 		cm->cm_flags |= XBD_ON_ ## index;			\
232 		XBDQ_ADD(cm->cm_sc, index);				\
233 	}								\
234 	static __inline struct xbd_command *				\
235 	xbd_dequeue_ ## name (struct xbd_softc *sc)			\
236 	{								\
237 		struct xbd_command *cm;					\
238 									\
239 		if ((cm = TAILQ_FIRST(&sc->xbd_cm_ ## name)) != NULL) {	\
240 			if ((cm->cm_flags & XBD_ON_XBDQ_MASK) !=		\
241 			     XBD_ON_ ## index) {				\
242 				printf("command %p not in queue, "	\
243 				    "flags = %#x, bit = %#x\n", cm,	\
244 				    cm->cm_flags, XBD_ON_ ## index);	\
245 				panic("command not in queue");		\
246 			}						\
247 			TAILQ_REMOVE(&sc->xbd_cm_ ## name, cm, cm_link);\
248 			cm->cm_flags &= ~XBD_ON_ ## index;		\
249 			XBDQ_REMOVE(sc, index);				\
250 		}							\
251 		return (cm);						\
252 	}								\
253 	static __inline void						\
254 	xbd_remove_ ## name (struct xbd_command *cm)			\
255 	{								\
256 		if ((cm->cm_flags & XBD_ON_XBDQ_MASK) != XBD_ON_ ## index){\
257 			printf("command %p not in queue, flags = %#x, " \
258 			    "bit = %#x\n", cm, cm->cm_flags,		\
259 			    XBD_ON_ ## index);				\
260 			panic("command not in queue");			\
261 		}							\
262 		TAILQ_REMOVE(&cm->cm_sc->xbd_cm_ ## name, cm, cm_link);	\
263 		cm->cm_flags &= ~XBD_ON_ ## index;			\
264 		XBDQ_REMOVE(cm->cm_sc, index);				\
265 	}								\
266 struct hack
267 
268 XBDQ_COMMAND_QUEUE(free, XBDQ_FREE);
269 XBDQ_COMMAND_QUEUE(ready, XBDQ_READY);
270 XBDQ_COMMAND_QUEUE(busy, XBDQ_BUSY);
271 XBDQ_COMMAND_QUEUE(complete, XBDQ_COMPLETE);
272 
273 static __inline void
274 xbd_initq_bio(struct xbd_softc *sc)
275 {
276 	bioq_init(&sc->xbd_bioq);
277 	XBDQ_INIT(sc, XBDQ_BIO);
278 }
279 
280 static __inline void
281 xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
282 {
283 	bioq_insert_tail(&sc->xbd_bioq, bp);
284 	XBDQ_ADD(sc, XBDQ_BIO);
285 }
286 
287 static __inline void
288 xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
289 {
290 	bioq_insert_head(&sc->xbd_bioq, bp);
291 	XBDQ_ADD(sc, XBDQ_BIO);
292 }
293 
294 static __inline struct bio *
295 xbd_dequeue_bio(struct xbd_softc *sc)
296 {
297 	struct bio *bp;
298 
299 	if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
300 		bioq_remove(&sc->xbd_bioq, bp);
301 		XBDQ_REMOVE(sc, XBDQ_BIO);
302 	}
303 	return (bp);
304 }
305 
306 #endif /* __XEN_BLKFRONT_BLOCK_H__ */
307