xref: /freebsd/sys/dev/xen/blkfront/block.h (revision 10b59a9b4add0320d52c15ce057dd697261e7dfc)
1 /*
2  * XenBSD block device driver
3  *
4  * Copyright (c) 2009 Scott Long, Yahoo!
5  * Copyright (c) 2009 Frank Suchomel, Citrix
6  * Copyright (c) 2009 Doug F. Rabson, Citrix
7  * Copyright (c) 2005 Kip Macy
8  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
9  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
10  *
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a copy
13  * of this software and associated documentation files (the "Software"), to
14  * deal in the Software without restriction, including without limitation the
15  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
16  * sell copies of the Software, and to permit persons to whom the Software is
17  * furnished to do so, subject to the following conditions:
18  *
19  * The above copyright notice and this permission notice shall be included in
20  * all copies or substantial portions of the Software.
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27  * DEALINGS IN THE SOFTWARE.
28  *
29  * $FreeBSD$
30  */
31 
32 
33 #ifndef __XEN_DRIVERS_BLOCK_H__
34 #define __XEN_DRIVERS_BLOCK_H__
35 #include <xen/blkif.h>
36 
37 /**
38  * The maximum number of outstanding requests blocks (request headers plus
39  * additional segment blocks) we will allow in a negotiated block-front/back
40  * communication channel.
41  */
42 #define XBF_MAX_REQUESTS		256
43 
44 /**
45  * The maximum mapped region size per request we will allow in a negotiated
46  * block-front/back communication channel.
47  *
48  * \note We reserve a segement from the maximum supported by the transport to
49  *       guarantee we can handle an unaligned transfer without the need to
50  *       use a bounce buffer..
51  */
52 #define	XBF_MAX_REQUEST_SIZE		\
53 	MIN(MAXPHYS, (BLKIF_MAX_SEGMENTS_PER_REQUEST - 1) * PAGE_SIZE)
54 
55 /**
56  * The maximum number of segments (within a request header and accompanying
57  * segment blocks) per request we will allow in a negotiated block-front/back
58  * communication channel.
59  */
60 #define	XBF_MAX_SEGMENTS_PER_REQUEST		\
61 	(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST,	\
62 	     (XBF_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))
63 
64 /**
65  * The maximum number of shared memory ring pages we will allow in a
66  * negotiated block-front/back communication channel.  Allow enough
67  * ring space for all requests to be  XBF_MAX_REQUEST_SIZE'd.
68  */
69 #define XBF_MAX_RING_PAGES						    \
70 	BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBF_MAX_SEGMENTS_PER_REQUEST) \
71 		       * XBF_MAX_REQUESTS)
72 
73 struct xlbd_type_info
74 {
75 	int partn_shift;
76 	int disks_per_major;
77 	char *devname;
78 	char *diskname;
79 };
80 
81 struct xlbd_major_info
82 {
83 	int major;
84 	int index;
85 	int usage;
86 	struct xlbd_type_info *type;
87 };
88 
89 struct xb_command {
90 	TAILQ_ENTRY(xb_command)	cm_link;
91 	struct xb_softc		*cm_sc;
92 	u_int			cm_flags;
93 #define XB_CMD_FROZEN		(1<<0)
94 #define XB_CMD_POLLED		(1<<1)
95 #define XB_ON_XBQ_FREE		(1<<2)
96 #define XB_ON_XBQ_READY		(1<<3)
97 #define XB_ON_XBQ_BUSY		(1<<4)
98 #define XB_ON_XBQ_COMPLETE	(1<<5)
99 #define XB_ON_XBQ_MASK		((1<<2)|(1<<3)|(1<<4)|(1<<5))
100 	bus_dmamap_t		map;
101 	uint64_t		id;
102 	grant_ref_t		*sg_refs;
103 	struct bio		*bp;
104 	grant_ref_t		gref_head;
105 	void			*data;
106 	size_t			datalen;
107 	u_int			nseg;
108 	int			operation;
109 	blkif_sector_t		sector_number;
110 	int			status;
111 	void			(* cm_complete)(struct xb_command *);
112 };
113 
114 #define XBQ_FREE	0
115 #define XBQ_BIO		1
116 #define XBQ_READY	2
117 #define XBQ_BUSY	3
118 #define XBQ_COMPLETE	4
119 #define XBQ_COUNT	5
120 
121 struct xb_qstat {
122 	uint32_t	q_length;
123 	uint32_t	q_max;
124 };
125 
126 union xb_statrequest {
127 	uint32_t		ms_item;
128 	struct xb_qstat		ms_qstat;
129 };
130 
131 /*
132  * We have one of these per vbd, whether ide, scsi or 'other'.
133  */
134 struct xb_softc {
135 	device_t		xb_dev;
136 	struct disk		*xb_disk;		/* disk params */
137 	struct bio_queue_head   xb_bioq;		/* sort queue */
138 	int			xb_unit;
139 	int			xb_flags;
140 #define XB_OPEN		(1<<0)		/* drive is open (can't shut down) */
141 #define XB_BARRIER	(1 << 1)	/* backend supports barriers */
142 #define XB_READY	(1 << 2)	/* Is ready */
143 #define XB_FROZEN	(1 << 3)	/* Waiting for resources */
144 	int			vdevice;
145 	int			connected;
146 	u_int			ring_pages;
147 	uint32_t		max_requests;
148 	uint32_t		max_request_segments;
149 	uint32_t		max_request_blocks;
150 	uint32_t		max_request_size;
151 	grant_ref_t		ring_ref[XBF_MAX_RING_PAGES];
152 	blkif_front_ring_t	ring;
153 	unsigned int		irq;
154 	struct gnttab_free_callback	callback;
155 	TAILQ_HEAD(,xb_command)	cm_free;
156 	TAILQ_HEAD(,xb_command)	cm_ready;
157 	TAILQ_HEAD(,xb_command)	cm_busy;
158 	TAILQ_HEAD(,xb_command)	cm_complete;
159 	struct xb_qstat		xb_qstat[XBQ_COUNT];
160 	bus_dma_tag_t		xb_io_dmat;
161 
162 	/**
163 	 * The number of people holding this device open.  We won't allow a
164 	 * hot-unplug unless this is 0.
165 	 */
166 	int			users;
167 	struct mtx		xb_io_lock;
168 
169 	struct xb_command      *shadow;
170 };
171 
172 int xlvbd_add(struct xb_softc *, blkif_sector_t sectors, int device,
173 	      uint16_t vdisk_info, unsigned long sector_size);
174 void xlvbd_del(struct xb_softc *);
175 
176 #define XBQ_ADD(sc, qname)					\
177 	do {							\
178 		struct xb_qstat *qs;				\
179 								\
180 		qs = &(sc)->xb_qstat[qname];			\
181 		qs->q_length++;					\
182 		if (qs->q_length > qs->q_max)			\
183 			qs->q_max = qs->q_length;		\
184 	} while (0)
185 
186 #define XBQ_REMOVE(sc, qname)	(sc)->xb_qstat[qname].q_length--
187 
188 #define XBQ_INIT(sc, qname)					\
189 	do {							\
190 		sc->xb_qstat[qname].q_length = 0;		\
191 		sc->xb_qstat[qname].q_max = 0;			\
192 	} while (0)
193 
194 #define XBQ_COMMAND_QUEUE(name, index)					\
195 	static __inline void						\
196 	xb_initq_ ## name (struct xb_softc *sc)				\
197 	{								\
198 		TAILQ_INIT(&sc->cm_ ## name);				\
199 		XBQ_INIT(sc, index);					\
200 	}								\
201 	static __inline void						\
202 	xb_enqueue_ ## name (struct xb_command *cm)			\
203 	{								\
204 		if ((cm->cm_flags & XB_ON_XBQ_MASK) != 0) {		\
205 			printf("command %p is on another queue, "	\
206 			    "flags = %#x\n", cm, cm->cm_flags);		\
207 			panic("command is on another queue");		\
208 		}							\
209 		TAILQ_INSERT_TAIL(&cm->cm_sc->cm_ ## name, cm, cm_link); \
210 		cm->cm_flags |= XB_ON_ ## index;			\
211 		XBQ_ADD(cm->cm_sc, index);				\
212 	}								\
213 	static __inline void						\
214 	xb_requeue_ ## name (struct xb_command *cm)			\
215 	{								\
216 		if ((cm->cm_flags & XB_ON_XBQ_MASK) != 0) {		\
217 			printf("command %p is on another queue, "	\
218 			    "flags = %#x\n", cm, cm->cm_flags);		\
219 			panic("command is on another queue");		\
220 		}							\
221 		TAILQ_INSERT_HEAD(&cm->cm_sc->cm_ ## name, cm, cm_link); \
222 		cm->cm_flags |= XB_ON_ ## index;			\
223 		XBQ_ADD(cm->cm_sc, index);				\
224 	}								\
225 	static __inline struct xb_command *				\
226 	xb_dequeue_ ## name (struct xb_softc *sc)			\
227 	{								\
228 		struct xb_command *cm;					\
229 									\
230 		if ((cm = TAILQ_FIRST(&sc->cm_ ## name)) != NULL) {	\
231 			if ((cm->cm_flags & XB_ON_XBQ_MASK) !=		\
232 			     XB_ON_ ## index) {				\
233 				printf("command %p not in queue, "	\
234 				    "flags = %#x, bit = %#x\n", cm,	\
235 				    cm->cm_flags, XB_ON_ ## index);	\
236 				panic("command not in queue");		\
237 			}						\
238 			TAILQ_REMOVE(&sc->cm_ ## name, cm, cm_link);	\
239 			cm->cm_flags &= ~XB_ON_ ## index;		\
240 			XBQ_REMOVE(sc, index);				\
241 		}							\
242 		return (cm);						\
243 	}								\
244 	static __inline void						\
245 	xb_remove_ ## name (struct xb_command *cm)			\
246 	{								\
247 		if ((cm->cm_flags & XB_ON_XBQ_MASK) != XB_ON_ ## index){\
248 			printf("command %p not in queue, flags = %#x, " \
249 			    "bit = %#x\n", cm, cm->cm_flags,		\
250 			    XB_ON_ ## index);				\
251 			panic("command not in queue");			\
252 		}							\
253 		TAILQ_REMOVE(&cm->cm_sc->cm_ ## name, cm, cm_link);	\
254 		cm->cm_flags &= ~XB_ON_ ## index;			\
255 		XBQ_REMOVE(cm->cm_sc, index);				\
256 	}								\
257 struct hack
258 
259 XBQ_COMMAND_QUEUE(free, XBQ_FREE);
260 XBQ_COMMAND_QUEUE(ready, XBQ_READY);
261 XBQ_COMMAND_QUEUE(busy, XBQ_BUSY);
262 XBQ_COMMAND_QUEUE(complete, XBQ_COMPLETE);
263 
264 static __inline void
265 xb_initq_bio(struct xb_softc *sc)
266 {
267 	bioq_init(&sc->xb_bioq);
268 	XBQ_INIT(sc, XBQ_BIO);
269 }
270 
271 static __inline void
272 xb_enqueue_bio(struct xb_softc *sc, struct bio *bp)
273 {
274 	bioq_insert_tail(&sc->xb_bioq, bp);
275 	XBQ_ADD(sc, XBQ_BIO);
276 }
277 
278 static __inline void
279 xb_requeue_bio(struct xb_softc *sc, struct bio *bp)
280 {
281 	bioq_insert_head(&sc->xb_bioq, bp);
282 	XBQ_ADD(sc, XBQ_BIO);
283 }
284 
285 static __inline struct bio *
286 xb_dequeue_bio(struct xb_softc *sc)
287 {
288 	struct bio *bp;
289 
290 	if ((bp = bioq_first(&sc->xb_bioq)) != NULL) {
291 		bioq_remove(&sc->xb_bioq, bp);
292 		XBQ_REMOVE(sc, XBQ_BIO);
293 	}
294 	return (bp);
295 }
296 
297 #endif /* __XEN_DRIVERS_BLOCK_H__ */
298 
299