xref: /freebsd/sys/cam/ctl/ctl_backend_block.c (revision 2a2443d8332be10a2d2b4421eb9c180d875bd95c)
1130f4520SKenneth D. Merry /*-
2130f4520SKenneth D. Merry  * Copyright (c) 2003 Silicon Graphics International Corp.
3130f4520SKenneth D. Merry  * Copyright (c) 2009-2011 Spectra Logic Corporation
4130f4520SKenneth D. Merry  * All rights reserved.
5130f4520SKenneth D. Merry  *
6130f4520SKenneth D. Merry  * Redistribution and use in source and binary forms, with or without
7130f4520SKenneth D. Merry  * modification, are permitted provided that the following conditions
8130f4520SKenneth D. Merry  * are met:
9130f4520SKenneth D. Merry  * 1. Redistributions of source code must retain the above copyright
10130f4520SKenneth D. Merry  *    notice, this list of conditions, and the following disclaimer,
11130f4520SKenneth D. Merry  *    without modification.
12130f4520SKenneth D. Merry  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13130f4520SKenneth D. Merry  *    substantially similar to the "NO WARRANTY" disclaimer below
14130f4520SKenneth D. Merry  *    ("Disclaimer") and any redistribution must be conditioned upon
15130f4520SKenneth D. Merry  *    including a substantially similar Disclaimer requirement for further
16130f4520SKenneth D. Merry  *    binary redistribution.
17130f4520SKenneth D. Merry  *
18130f4520SKenneth D. Merry  * NO WARRANTY
19130f4520SKenneth D. Merry  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20130f4520SKenneth D. Merry  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21130f4520SKenneth D. Merry  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
22130f4520SKenneth D. Merry  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23130f4520SKenneth D. Merry  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24130f4520SKenneth D. Merry  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25130f4520SKenneth D. Merry  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26130f4520SKenneth D. Merry  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27130f4520SKenneth D. Merry  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
28130f4520SKenneth D. Merry  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29130f4520SKenneth D. Merry  * POSSIBILITY OF SUCH DAMAGES.
30130f4520SKenneth D. Merry  *
31130f4520SKenneth D. Merry  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
32130f4520SKenneth D. Merry  */
33130f4520SKenneth D. Merry /*
34130f4520SKenneth D. Merry  * CAM Target Layer driver backend for block devices.
35130f4520SKenneth D. Merry  *
36130f4520SKenneth D. Merry  * Author: Ken Merry <ken@FreeBSD.org>
37130f4520SKenneth D. Merry  */
38130f4520SKenneth D. Merry #include <sys/cdefs.h>
39130f4520SKenneth D. Merry __FBSDID("$FreeBSD$");
40130f4520SKenneth D. Merry 
41130f4520SKenneth D. Merry #include <opt_kdtrace.h>
42130f4520SKenneth D. Merry 
43130f4520SKenneth D. Merry #include <sys/param.h>
44130f4520SKenneth D. Merry #include <sys/systm.h>
45130f4520SKenneth D. Merry #include <sys/kernel.h>
46130f4520SKenneth D. Merry #include <sys/types.h>
47130f4520SKenneth D. Merry #include <sys/kthread.h>
48130f4520SKenneth D. Merry #include <sys/bio.h>
49130f4520SKenneth D. Merry #include <sys/fcntl.h>
50130f4520SKenneth D. Merry #include <sys/lock.h>
51130f4520SKenneth D. Merry #include <sys/mutex.h>
52130f4520SKenneth D. Merry #include <sys/condvar.h>
53130f4520SKenneth D. Merry #include <sys/malloc.h>
54130f4520SKenneth D. Merry #include <sys/conf.h>
55130f4520SKenneth D. Merry #include <sys/ioccom.h>
56130f4520SKenneth D. Merry #include <sys/queue.h>
57130f4520SKenneth D. Merry #include <sys/sbuf.h>
58130f4520SKenneth D. Merry #include <sys/endian.h>
59130f4520SKenneth D. Merry #include <sys/uio.h>
60130f4520SKenneth D. Merry #include <sys/buf.h>
61130f4520SKenneth D. Merry #include <sys/taskqueue.h>
62130f4520SKenneth D. Merry #include <sys/vnode.h>
63130f4520SKenneth D. Merry #include <sys/namei.h>
64130f4520SKenneth D. Merry #include <sys/mount.h>
65130f4520SKenneth D. Merry #include <sys/disk.h>
66130f4520SKenneth D. Merry #include <sys/fcntl.h>
67130f4520SKenneth D. Merry #include <sys/filedesc.h>
68130f4520SKenneth D. Merry #include <sys/proc.h>
69130f4520SKenneth D. Merry #include <sys/pcpu.h>
70130f4520SKenneth D. Merry #include <sys/module.h>
71130f4520SKenneth D. Merry #include <sys/sdt.h>
72130f4520SKenneth D. Merry #include <sys/devicestat.h>
73130f4520SKenneth D. Merry #include <sys/sysctl.h>
74130f4520SKenneth D. Merry 
75130f4520SKenneth D. Merry #include <geom/geom.h>
76130f4520SKenneth D. Merry 
77130f4520SKenneth D. Merry #include <cam/cam.h>
78130f4520SKenneth D. Merry #include <cam/scsi/scsi_all.h>
79130f4520SKenneth D. Merry #include <cam/scsi/scsi_da.h>
80130f4520SKenneth D. Merry #include <cam/ctl/ctl_io.h>
81130f4520SKenneth D. Merry #include <cam/ctl/ctl.h>
82130f4520SKenneth D. Merry #include <cam/ctl/ctl_backend.h>
83130f4520SKenneth D. Merry #include <cam/ctl/ctl_frontend_internal.h>
84130f4520SKenneth D. Merry #include <cam/ctl/ctl_ioctl.h>
85130f4520SKenneth D. Merry #include <cam/ctl/ctl_scsi_all.h>
86130f4520SKenneth D. Merry #include <cam/ctl/ctl_error.h>
87130f4520SKenneth D. Merry 
88130f4520SKenneth D. Merry /*
89130f4520SKenneth D. Merry  * The idea here is that we'll allocate enough S/G space to hold a 16MB
90130f4520SKenneth D. Merry  * I/O.  If we get an I/O larger than that, we'll reject it.
91130f4520SKenneth D. Merry  */
92130f4520SKenneth D. Merry #define	CTLBLK_MAX_IO_SIZE	(16 * 1024 * 1024)
93130f4520SKenneth D. Merry #define	CTLBLK_MAX_SEGS		(CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1
94130f4520SKenneth D. Merry 
95130f4520SKenneth D. Merry #ifdef CTLBLK_DEBUG
96130f4520SKenneth D. Merry #define DPRINTF(fmt, args...) \
97130f4520SKenneth D. Merry     printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
98130f4520SKenneth D. Merry #else
99130f4520SKenneth D. Merry #define DPRINTF(fmt, args...) do {} while(0)
100130f4520SKenneth D. Merry #endif
101130f4520SKenneth D. Merry 
102130f4520SKenneth D. Merry SDT_PROVIDER_DEFINE(cbb);
103130f4520SKenneth D. Merry 
104130f4520SKenneth D. Merry typedef enum {
105130f4520SKenneth D. Merry 	CTL_BE_BLOCK_LUN_UNCONFIGURED	= 0x01,
106130f4520SKenneth D. Merry 	CTL_BE_BLOCK_LUN_CONFIG_ERR	= 0x02,
107130f4520SKenneth D. Merry 	CTL_BE_BLOCK_LUN_WAITING	= 0x04,
108130f4520SKenneth D. Merry 	CTL_BE_BLOCK_LUN_MULTI_THREAD	= 0x08
109130f4520SKenneth D. Merry } ctl_be_block_lun_flags;
110130f4520SKenneth D. Merry 
111130f4520SKenneth D. Merry typedef enum {
112130f4520SKenneth D. Merry 	CTL_BE_BLOCK_NONE,
113130f4520SKenneth D. Merry 	CTL_BE_BLOCK_DEV,
114130f4520SKenneth D. Merry 	CTL_BE_BLOCK_FILE
115130f4520SKenneth D. Merry } ctl_be_block_type;
116130f4520SKenneth D. Merry 
117130f4520SKenneth D. Merry struct ctl_be_block_devdata {
118130f4520SKenneth D. Merry 	struct cdev *cdev;
119130f4520SKenneth D. Merry 	struct cdevsw *csw;
120130f4520SKenneth D. Merry 	int dev_ref;
121130f4520SKenneth D. Merry };
122130f4520SKenneth D. Merry 
123130f4520SKenneth D. Merry struct ctl_be_block_filedata {
124130f4520SKenneth D. Merry 	struct ucred *cred;
125130f4520SKenneth D. Merry };
126130f4520SKenneth D. Merry 
127130f4520SKenneth D. Merry union ctl_be_block_bedata {
128130f4520SKenneth D. Merry 	struct ctl_be_block_devdata dev;
129130f4520SKenneth D. Merry 	struct ctl_be_block_filedata file;
130130f4520SKenneth D. Merry };
131130f4520SKenneth D. Merry 
132130f4520SKenneth D. Merry struct ctl_be_block_io;
133130f4520SKenneth D. Merry struct ctl_be_block_lun;
134130f4520SKenneth D. Merry 
135130f4520SKenneth D. Merry typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
136130f4520SKenneth D. Merry 			       struct ctl_be_block_io *beio);
137130f4520SKenneth D. Merry 
138130f4520SKenneth D. Merry /*
139130f4520SKenneth D. Merry  * Backend LUN structure.  There is a 1:1 mapping between a block device
140130f4520SKenneth D. Merry  * and a backend block LUN, and between a backend block LUN and a CTL LUN.
141130f4520SKenneth D. Merry  */
142130f4520SKenneth D. Merry struct ctl_be_block_lun {
143130f4520SKenneth D. Merry 	struct ctl_block_disk *disk;
144130f4520SKenneth D. Merry 	char lunname[32];
145130f4520SKenneth D. Merry 	char *dev_path;
146130f4520SKenneth D. Merry 	ctl_be_block_type dev_type;
147130f4520SKenneth D. Merry 	struct vnode *vn;
148130f4520SKenneth D. Merry 	union ctl_be_block_bedata backend;
149130f4520SKenneth D. Merry 	cbb_dispatch_t dispatch;
150130f4520SKenneth D. Merry 	cbb_dispatch_t lun_flush;
151130f4520SKenneth D. Merry 	struct mtx lock;
152130f4520SKenneth D. Merry 	uma_zone_t lun_zone;
153130f4520SKenneth D. Merry 	uint64_t size_blocks;
154130f4520SKenneth D. Merry 	uint64_t size_bytes;
155130f4520SKenneth D. Merry 	uint32_t blocksize;
156130f4520SKenneth D. Merry 	int blocksize_shift;
157130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
158130f4520SKenneth D. Merry 	struct devstat *disk_stats;
159130f4520SKenneth D. Merry 	ctl_be_block_lun_flags flags;
160130f4520SKenneth D. Merry 	STAILQ_ENTRY(ctl_be_block_lun) links;
161130f4520SKenneth D. Merry 	struct ctl_be_lun ctl_be_lun;
162130f4520SKenneth D. Merry 	struct taskqueue *io_taskqueue;
163130f4520SKenneth D. Merry 	struct task io_task;
164130f4520SKenneth D. Merry 	int num_threads;
165130f4520SKenneth D. Merry 	STAILQ_HEAD(, ctl_io_hdr) input_queue;
166130f4520SKenneth D. Merry 	STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
167130f4520SKenneth D. Merry 	STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
168130f4520SKenneth D. Merry };
169130f4520SKenneth D. Merry 
170130f4520SKenneth D. Merry /*
171130f4520SKenneth D. Merry  * Overall softc structure for the block backend module.
172130f4520SKenneth D. Merry  */
173130f4520SKenneth D. Merry struct ctl_be_block_softc {
174130f4520SKenneth D. Merry 	STAILQ_HEAD(, ctl_be_block_io)   beio_free_queue;
175130f4520SKenneth D. Merry 	struct mtx			 lock;
176130f4520SKenneth D. Merry 	int				 prealloc_beio;
177130f4520SKenneth D. Merry 	int				 num_disks;
178130f4520SKenneth D. Merry 	STAILQ_HEAD(, ctl_block_disk)	 disk_list;
179130f4520SKenneth D. Merry 	int				 num_luns;
180130f4520SKenneth D. Merry 	STAILQ_HEAD(, ctl_be_block_lun)	 lun_list;
181130f4520SKenneth D. Merry };
182130f4520SKenneth D. Merry 
183130f4520SKenneth D. Merry static struct ctl_be_block_softc backend_block_softc;
184130f4520SKenneth D. Merry 
185130f4520SKenneth D. Merry /*
186130f4520SKenneth D. Merry  * Per-I/O information.
187130f4520SKenneth D. Merry  */
188130f4520SKenneth D. Merry struct ctl_be_block_io {
189130f4520SKenneth D. Merry 	union ctl_io			*io;
190130f4520SKenneth D. Merry 	struct ctl_sg_entry		sg_segs[CTLBLK_MAX_SEGS];
191130f4520SKenneth D. Merry 	struct iovec			xiovecs[CTLBLK_MAX_SEGS];
192130f4520SKenneth D. Merry 	int				bio_cmd;
193130f4520SKenneth D. Merry 	int				bio_flags;
194130f4520SKenneth D. Merry 	int				num_segs;
195130f4520SKenneth D. Merry 	int				num_bios_sent;
196130f4520SKenneth D. Merry 	int				num_bios_done;
197130f4520SKenneth D. Merry 	int				send_complete;
198130f4520SKenneth D. Merry 	int				num_errors;
199130f4520SKenneth D. Merry 	struct bintime			ds_t0;
200130f4520SKenneth D. Merry 	devstat_tag_type		ds_tag_type;
201130f4520SKenneth D. Merry 	devstat_trans_flags		ds_trans_type;
202130f4520SKenneth D. Merry 	uint64_t			io_len;
203130f4520SKenneth D. Merry 	uint64_t			io_offset;
204130f4520SKenneth D. Merry 	struct ctl_be_block_softc	*softc;
205130f4520SKenneth D. Merry 	struct ctl_be_block_lun		*lun;
206130f4520SKenneth D. Merry 	STAILQ_ENTRY(ctl_be_block_io)	links;
207130f4520SKenneth D. Merry };
208130f4520SKenneth D. Merry 
209130f4520SKenneth D. Merry static int cbb_num_threads = 14;
210130f4520SKenneth D. Merry TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
211130f4520SKenneth D. Merry SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
212130f4520SKenneth D. Merry 	    "CAM Target Layer Block Backend");
213130f4520SKenneth D. Merry SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
214130f4520SKenneth D. Merry            &cbb_num_threads, 0, "Number of threads per backing file");
215130f4520SKenneth D. Merry 
216130f4520SKenneth D. Merry static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
217130f4520SKenneth D. Merry static void ctl_free_beio(struct ctl_be_block_io *beio);
218130f4520SKenneth D. Merry static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count);
219130f4520SKenneth D. Merry #if 0
220130f4520SKenneth D. Merry static void ctl_shrink_beio(struct ctl_be_block_softc *softc);
221130f4520SKenneth D. Merry #endif
222130f4520SKenneth D. Merry static void ctl_complete_beio(struct ctl_be_block_io *beio);
223130f4520SKenneth D. Merry static int ctl_be_block_move_done(union ctl_io *io);
224130f4520SKenneth D. Merry static void ctl_be_block_biodone(struct bio *bio);
225130f4520SKenneth D. Merry static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
226130f4520SKenneth D. Merry 				    struct ctl_be_block_io *beio);
227130f4520SKenneth D. Merry static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
228130f4520SKenneth D. Merry 				       struct ctl_be_block_io *beio);
229130f4520SKenneth D. Merry static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
230130f4520SKenneth D. Merry 				   struct ctl_be_block_io *beio);
231130f4520SKenneth D. Merry static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
232130f4520SKenneth D. Merry 				      struct ctl_be_block_io *beio);
233130f4520SKenneth D. Merry static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
234130f4520SKenneth D. Merry 				    union ctl_io *io);
235130f4520SKenneth D. Merry static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
236130f4520SKenneth D. Merry 				  union ctl_io *io);
237130f4520SKenneth D. Merry static void ctl_be_block_worker(void *context, int pending);
238130f4520SKenneth D. Merry static int ctl_be_block_submit(union ctl_io *io);
239130f4520SKenneth D. Merry static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
240130f4520SKenneth D. Merry 				   int flag, struct thread *td);
241130f4520SKenneth D. Merry static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
242130f4520SKenneth D. Merry 				  struct ctl_lun_req *req);
243130f4520SKenneth D. Merry static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
244130f4520SKenneth D. Merry 				 struct ctl_lun_req *req);
245130f4520SKenneth D. Merry static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
246130f4520SKenneth D. Merry static int ctl_be_block_open(struct ctl_be_block_softc *softc,
247130f4520SKenneth D. Merry 			     struct ctl_be_block_lun *be_lun,
248130f4520SKenneth D. Merry 			     struct ctl_lun_req *req);
249130f4520SKenneth D. Merry static int ctl_be_block_create(struct ctl_be_block_softc *softc,
250130f4520SKenneth D. Merry 			       struct ctl_lun_req *req);
251130f4520SKenneth D. Merry static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
252130f4520SKenneth D. Merry 			   struct ctl_lun_req *req);
253130f4520SKenneth D. Merry static void ctl_be_block_lun_shutdown(void *be_lun);
254130f4520SKenneth D. Merry static void ctl_be_block_lun_config_status(void *be_lun,
255130f4520SKenneth D. Merry 					   ctl_lun_config_status status);
256130f4520SKenneth D. Merry static int ctl_be_block_config_write(union ctl_io *io);
257130f4520SKenneth D. Merry static int ctl_be_block_config_read(union ctl_io *io);
258130f4520SKenneth D. Merry static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
259130f4520SKenneth D. Merry int ctl_be_block_init(void);
260130f4520SKenneth D. Merry 
261130f4520SKenneth D. Merry static struct ctl_backend_driver ctl_be_block_driver =
262130f4520SKenneth D. Merry {
263*2a2443d8SKenneth D. Merry 	.name = "block",
264*2a2443d8SKenneth D. Merry 	.flags = CTL_BE_FLAG_HAS_CONFIG,
265*2a2443d8SKenneth D. Merry 	.init = ctl_be_block_init,
266*2a2443d8SKenneth D. Merry 	.data_submit = ctl_be_block_submit,
267*2a2443d8SKenneth D. Merry 	.data_move_done = ctl_be_block_move_done,
268*2a2443d8SKenneth D. Merry 	.config_read = ctl_be_block_config_read,
269*2a2443d8SKenneth D. Merry 	.config_write = ctl_be_block_config_write,
270*2a2443d8SKenneth D. Merry 	.ioctl = ctl_be_block_ioctl,
271*2a2443d8SKenneth D. Merry 	.lun_info = ctl_be_block_lun_info
272130f4520SKenneth D. Merry };
273130f4520SKenneth D. Merry 
274130f4520SKenneth D. Merry MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
275130f4520SKenneth D. Merry CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
276130f4520SKenneth D. Merry 
277130f4520SKenneth D. Merry static struct ctl_be_block_io *
278130f4520SKenneth D. Merry ctl_alloc_beio(struct ctl_be_block_softc *softc)
279130f4520SKenneth D. Merry {
280130f4520SKenneth D. Merry 	struct ctl_be_block_io *beio;
281130f4520SKenneth D. Merry 	int count;
282130f4520SKenneth D. Merry 
283130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
284130f4520SKenneth D. Merry 
285130f4520SKenneth D. Merry 	beio = STAILQ_FIRST(&softc->beio_free_queue);
286130f4520SKenneth D. Merry 	if (beio != NULL) {
287130f4520SKenneth D. Merry 		STAILQ_REMOVE(&softc->beio_free_queue, beio,
288130f4520SKenneth D. Merry 			      ctl_be_block_io, links);
289130f4520SKenneth D. Merry 	}
290130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
291130f4520SKenneth D. Merry 
292130f4520SKenneth D. Merry 	if (beio != NULL) {
293130f4520SKenneth D. Merry 		bzero(beio, sizeof(*beio));
294130f4520SKenneth D. Merry 		beio->softc = softc;
295130f4520SKenneth D. Merry 		return (beio);
296130f4520SKenneth D. Merry 	}
297130f4520SKenneth D. Merry 
298130f4520SKenneth D. Merry 	for (;;) {
299130f4520SKenneth D. Merry 
300130f4520SKenneth D. Merry 		count = ctl_grow_beio(softc, /*count*/ 10);
301130f4520SKenneth D. Merry 
302130f4520SKenneth D. Merry 		/*
303130f4520SKenneth D. Merry 		 * This shouldn't be possible, since ctl_grow_beio() uses a
304130f4520SKenneth D. Merry 		 * blocking malloc.
305130f4520SKenneth D. Merry 		 */
306130f4520SKenneth D. Merry 		if (count == 0)
307130f4520SKenneth D. Merry 			return (NULL);
308130f4520SKenneth D. Merry 
309130f4520SKenneth D. Merry 		/*
310130f4520SKenneth D. Merry 		 * Since we have to drop the lock when we're allocating beio
311130f4520SKenneth D. Merry 		 * structures, it's possible someone else can come along and
312130f4520SKenneth D. Merry 		 * allocate the beio's we've just allocated.
313130f4520SKenneth D. Merry 		 */
314130f4520SKenneth D. Merry 		mtx_lock(&softc->lock);
315130f4520SKenneth D. Merry 		beio = STAILQ_FIRST(&softc->beio_free_queue);
316130f4520SKenneth D. Merry 		if (beio != NULL) {
317130f4520SKenneth D. Merry 			STAILQ_REMOVE(&softc->beio_free_queue, beio,
318130f4520SKenneth D. Merry 				      ctl_be_block_io, links);
319130f4520SKenneth D. Merry 		}
320130f4520SKenneth D. Merry 		mtx_unlock(&softc->lock);
321130f4520SKenneth D. Merry 
322130f4520SKenneth D. Merry 		if (beio != NULL) {
323130f4520SKenneth D. Merry 			bzero(beio, sizeof(*beio));
324130f4520SKenneth D. Merry 			beio->softc = softc;
325130f4520SKenneth D. Merry 			break;
326130f4520SKenneth D. Merry 		}
327130f4520SKenneth D. Merry 	}
328130f4520SKenneth D. Merry 	return (beio);
329130f4520SKenneth D. Merry }
330130f4520SKenneth D. Merry 
331130f4520SKenneth D. Merry static void
332130f4520SKenneth D. Merry ctl_free_beio(struct ctl_be_block_io *beio)
333130f4520SKenneth D. Merry {
334130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
335130f4520SKenneth D. Merry 	int duplicate_free;
336130f4520SKenneth D. Merry 	int i;
337130f4520SKenneth D. Merry 
338130f4520SKenneth D. Merry 	softc = beio->softc;
339130f4520SKenneth D. Merry 	duplicate_free = 0;
340130f4520SKenneth D. Merry 
341130f4520SKenneth D. Merry 	for (i = 0; i < beio->num_segs; i++) {
342130f4520SKenneth D. Merry 		if (beio->sg_segs[i].addr == NULL)
343130f4520SKenneth D. Merry 			duplicate_free++;
344130f4520SKenneth D. Merry 
345130f4520SKenneth D. Merry 		uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
346130f4520SKenneth D. Merry 		beio->sg_segs[i].addr = NULL;
347130f4520SKenneth D. Merry 	}
348130f4520SKenneth D. Merry 
349130f4520SKenneth D. Merry 	if (duplicate_free > 0) {
350130f4520SKenneth D. Merry 		printf("%s: %d duplicate frees out of %d segments\n", __func__,
351130f4520SKenneth D. Merry 		       duplicate_free, beio->num_segs);
352130f4520SKenneth D. Merry 	}
353130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
354130f4520SKenneth D. Merry 	STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
355130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
356130f4520SKenneth D. Merry }
357130f4520SKenneth D. Merry 
358130f4520SKenneth D. Merry static int
359130f4520SKenneth D. Merry ctl_grow_beio(struct ctl_be_block_softc *softc, int count)
360130f4520SKenneth D. Merry {
361130f4520SKenneth D. Merry 	int i;
362130f4520SKenneth D. Merry 
363130f4520SKenneth D. Merry 	for (i = 0; i < count; i++) {
364130f4520SKenneth D. Merry 		struct ctl_be_block_io *beio;
365130f4520SKenneth D. Merry 
366130f4520SKenneth D. Merry 		beio = (struct ctl_be_block_io *)malloc(sizeof(*beio),
367130f4520SKenneth D. Merry 							   M_CTLBLK,
368130f4520SKenneth D. Merry 							   M_WAITOK | M_ZERO);
369130f4520SKenneth D. Merry 		if (beio == NULL)
370130f4520SKenneth D. Merry 			break;
371130f4520SKenneth D. Merry 
372130f4520SKenneth D. Merry 		bzero(beio, sizeof(*beio));
373130f4520SKenneth D. Merry 		beio->softc = softc;
374130f4520SKenneth D. Merry 		mtx_lock(&softc->lock);
375130f4520SKenneth D. Merry 		STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
376130f4520SKenneth D. Merry 		mtx_unlock(&softc->lock);
377130f4520SKenneth D. Merry 	}
378130f4520SKenneth D. Merry 
379130f4520SKenneth D. Merry 	return (i);
380130f4520SKenneth D. Merry }
381130f4520SKenneth D. Merry 
382130f4520SKenneth D. Merry #if 0
383130f4520SKenneth D. Merry static void
384130f4520SKenneth D. Merry ctl_shrink_beio(struct ctl_be_block_softc *softc)
385130f4520SKenneth D. Merry {
386130f4520SKenneth D. Merry 	struct ctl_be_block_io *beio, *beio_tmp;
387130f4520SKenneth D. Merry 
388130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
389130f4520SKenneth D. Merry 	STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) {
390130f4520SKenneth D. Merry 		STAILQ_REMOVE(&softc->beio_free_queue, beio,
391130f4520SKenneth D. Merry 			      ctl_be_block_io, links);
392130f4520SKenneth D. Merry 		free(beio, M_CTLBLK);
393130f4520SKenneth D. Merry 	}
394130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
395130f4520SKenneth D. Merry }
396130f4520SKenneth D. Merry #endif
397130f4520SKenneth D. Merry 
398130f4520SKenneth D. Merry static void
399130f4520SKenneth D. Merry ctl_complete_beio(struct ctl_be_block_io *beio)
400130f4520SKenneth D. Merry {
401130f4520SKenneth D. Merry 	union ctl_io *io;
402130f4520SKenneth D. Merry 	int io_len;
403130f4520SKenneth D. Merry 
404130f4520SKenneth D. Merry 	io = beio->io;
405130f4520SKenneth D. Merry 
406130f4520SKenneth D. Merry 	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
407130f4520SKenneth D. Merry 		io_len = beio->io_len;
408130f4520SKenneth D. Merry 	else
409130f4520SKenneth D. Merry 		io_len = 0;
410130f4520SKenneth D. Merry 
411130f4520SKenneth D. Merry 	devstat_end_transaction(beio->lun->disk_stats,
412130f4520SKenneth D. Merry 				/*bytes*/ io_len,
413130f4520SKenneth D. Merry 				beio->ds_tag_type,
414130f4520SKenneth D. Merry 				beio->ds_trans_type,
415130f4520SKenneth D. Merry 				/*now*/ NULL,
416130f4520SKenneth D. Merry 				/*then*/&beio->ds_t0);
417130f4520SKenneth D. Merry 
418130f4520SKenneth D. Merry 	ctl_free_beio(beio);
419130f4520SKenneth D. Merry 	ctl_done(io);
420130f4520SKenneth D. Merry }
421130f4520SKenneth D. Merry 
422130f4520SKenneth D. Merry static int
423130f4520SKenneth D. Merry ctl_be_block_move_done(union ctl_io *io)
424130f4520SKenneth D. Merry {
425130f4520SKenneth D. Merry 	struct ctl_be_block_io *beio;
426130f4520SKenneth D. Merry 	struct ctl_be_block_lun *be_lun;
427130f4520SKenneth D. Merry #ifdef CTL_TIME_IO
428130f4520SKenneth D. Merry 	struct bintime cur_bt;
429130f4520SKenneth D. Merry #endif
430130f4520SKenneth D. Merry 
431130f4520SKenneth D. Merry 	beio = (struct ctl_be_block_io *)
432130f4520SKenneth D. Merry 		io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
433130f4520SKenneth D. Merry 
434130f4520SKenneth D. Merry 	be_lun = beio->lun;
435130f4520SKenneth D. Merry 
436130f4520SKenneth D. Merry 	DPRINTF("entered\n");
437130f4520SKenneth D. Merry 
438130f4520SKenneth D. Merry #ifdef CTL_TIME_IO
439130f4520SKenneth D. Merry 	getbintime(&cur_bt);
440130f4520SKenneth D. Merry 	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
441130f4520SKenneth D. Merry 	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
442130f4520SKenneth D. Merry 	io->io_hdr.num_dmas++;
443130f4520SKenneth D. Merry #endif
444130f4520SKenneth D. Merry 
445130f4520SKenneth D. Merry 	/*
446130f4520SKenneth D. Merry 	 * We set status at this point for read commands, and write
447130f4520SKenneth D. Merry 	 * commands with errors.
448130f4520SKenneth D. Merry 	 */
449130f4520SKenneth D. Merry 	if ((beio->bio_cmd == BIO_READ)
450130f4520SKenneth D. Merry 	 && (io->io_hdr.port_status == 0)
451130f4520SKenneth D. Merry 	 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
452130f4520SKenneth D. Merry 	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
453130f4520SKenneth D. Merry 		ctl_set_success(&io->scsiio);
454130f4520SKenneth D. Merry 	else if ((io->io_hdr.port_status != 0)
455130f4520SKenneth D. Merry 	      && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
456130f4520SKenneth D. Merry 	      && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
457130f4520SKenneth D. Merry 		/*
458130f4520SKenneth D. Merry 		 * For hardware error sense keys, the sense key
459130f4520SKenneth D. Merry 		 * specific value is defined to be a retry count,
460130f4520SKenneth D. Merry 		 * but we use it to pass back an internal FETD
461130f4520SKenneth D. Merry 		 * error code.  XXX KDM  Hopefully the FETD is only
462130f4520SKenneth D. Merry 		 * using 16 bits for an error code, since that's
463130f4520SKenneth D. Merry 		 * all the space we have in the sks field.
464130f4520SKenneth D. Merry 		 */
465130f4520SKenneth D. Merry 		ctl_set_internal_failure(&io->scsiio,
466130f4520SKenneth D. Merry 					 /*sks_valid*/ 1,
467130f4520SKenneth D. Merry 					 /*retry_count*/
468130f4520SKenneth D. Merry 					 io->io_hdr.port_status);
469130f4520SKenneth D. Merry 	}
470130f4520SKenneth D. Merry 
471130f4520SKenneth D. Merry 	/*
472130f4520SKenneth D. Merry 	 * If this is a read, or a write with errors, it is done.
473130f4520SKenneth D. Merry 	 */
474130f4520SKenneth D. Merry 	if ((beio->bio_cmd == BIO_READ)
475130f4520SKenneth D. Merry 	 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
476130f4520SKenneth D. Merry 	 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
477130f4520SKenneth D. Merry 		ctl_complete_beio(beio);
478130f4520SKenneth D. Merry 		return (0);
479130f4520SKenneth D. Merry 	}
480130f4520SKenneth D. Merry 
481130f4520SKenneth D. Merry 	/*
482130f4520SKenneth D. Merry 	 * At this point, we have a write and the DMA completed
483130f4520SKenneth D. Merry 	 * successfully.  We now have to queue it to the task queue to
484130f4520SKenneth D. Merry 	 * execute the backend I/O.  That is because we do blocking
485130f4520SKenneth D. Merry 	 * memory allocations, and in the file backing case, blocking I/O.
486130f4520SKenneth D. Merry 	 * This move done routine is generally called in the SIM's
487130f4520SKenneth D. Merry 	 * interrupt context, and therefore we cannot block.
488130f4520SKenneth D. Merry 	 */
489130f4520SKenneth D. Merry 	mtx_lock(&be_lun->lock);
490130f4520SKenneth D. Merry 	/*
491130f4520SKenneth D. Merry 	 * XXX KDM make sure that links is okay to use at this point.
492130f4520SKenneth D. Merry 	 * Otherwise, we either need to add another field to ctl_io_hdr,
493130f4520SKenneth D. Merry 	 * or deal with resource allocation here.
494130f4520SKenneth D. Merry 	 */
495130f4520SKenneth D. Merry 	STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
496130f4520SKenneth D. Merry 	mtx_unlock(&be_lun->lock);
497130f4520SKenneth D. Merry 
498130f4520SKenneth D. Merry 	taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
499130f4520SKenneth D. Merry 
500130f4520SKenneth D. Merry 	return (0);
501130f4520SKenneth D. Merry }
502130f4520SKenneth D. Merry 
503130f4520SKenneth D. Merry static void
504130f4520SKenneth D. Merry ctl_be_block_biodone(struct bio *bio)
505130f4520SKenneth D. Merry {
506130f4520SKenneth D. Merry 	struct ctl_be_block_io *beio;
507130f4520SKenneth D. Merry 	struct ctl_be_block_lun *be_lun;
508130f4520SKenneth D. Merry 	union ctl_io *io;
509130f4520SKenneth D. Merry 
510130f4520SKenneth D. Merry 	beio = bio->bio_caller1;
511130f4520SKenneth D. Merry 	be_lun = beio->lun;
512130f4520SKenneth D. Merry 	io = beio->io;
513130f4520SKenneth D. Merry 
514130f4520SKenneth D. Merry 	DPRINTF("entered\n");
515130f4520SKenneth D. Merry 
516130f4520SKenneth D. Merry 	mtx_lock(&be_lun->lock);
517130f4520SKenneth D. Merry 	if (bio->bio_error != 0)
518130f4520SKenneth D. Merry 		beio->num_errors++;
519130f4520SKenneth D. Merry 
520130f4520SKenneth D. Merry 	beio->num_bios_done++;
521130f4520SKenneth D. Merry 
522130f4520SKenneth D. Merry 	/*
523130f4520SKenneth D. Merry 	 * XXX KDM will this cause WITNESS to complain?  Holding a lock
524130f4520SKenneth D. Merry 	 * during the free might cause it to complain.
525130f4520SKenneth D. Merry 	 */
526130f4520SKenneth D. Merry 	g_destroy_bio(bio);
527130f4520SKenneth D. Merry 
528130f4520SKenneth D. Merry 	/*
529130f4520SKenneth D. Merry 	 * If the send complete bit isn't set, or we aren't the last I/O to
530130f4520SKenneth D. Merry 	 * complete, then we're done.
531130f4520SKenneth D. Merry 	 */
532130f4520SKenneth D. Merry 	if ((beio->send_complete == 0)
533130f4520SKenneth D. Merry 	 || (beio->num_bios_done < beio->num_bios_sent)) {
534130f4520SKenneth D. Merry 		mtx_unlock(&be_lun->lock);
535130f4520SKenneth D. Merry 		return;
536130f4520SKenneth D. Merry 	}
537130f4520SKenneth D. Merry 
538130f4520SKenneth D. Merry 	/*
539130f4520SKenneth D. Merry 	 * At this point, we've verified that we are the last I/O to
540130f4520SKenneth D. Merry 	 * complete, so it's safe to drop the lock.
541130f4520SKenneth D. Merry 	 */
542130f4520SKenneth D. Merry 	mtx_unlock(&be_lun->lock);
543130f4520SKenneth D. Merry 
544130f4520SKenneth D. Merry 	/*
545130f4520SKenneth D. Merry 	 * If there are any errors from the backing device, we fail the
546130f4520SKenneth D. Merry 	 * entire I/O with a medium error.
547130f4520SKenneth D. Merry 	 */
548130f4520SKenneth D. Merry 	if (beio->num_errors > 0) {
549130f4520SKenneth D. Merry 		if (beio->bio_cmd == BIO_FLUSH) {
550130f4520SKenneth D. Merry 			/* XXX KDM is there is a better error here? */
551130f4520SKenneth D. Merry 			ctl_set_internal_failure(&io->scsiio,
552130f4520SKenneth D. Merry 						 /*sks_valid*/ 1,
553130f4520SKenneth D. Merry 						 /*retry_count*/ 0xbad2);
554130f4520SKenneth D. Merry 		} else
555130f4520SKenneth D. Merry 			ctl_set_medium_error(&io->scsiio);
556130f4520SKenneth D. Merry 		ctl_complete_beio(beio);
557130f4520SKenneth D. Merry 		return;
558130f4520SKenneth D. Merry 	}
559130f4520SKenneth D. Merry 
560130f4520SKenneth D. Merry 	/*
561130f4520SKenneth D. Merry 	 * If this is a write or a flush, we're all done.
562130f4520SKenneth D. Merry 	 * If this is a read, we can now send the data to the user.
563130f4520SKenneth D. Merry 	 */
564130f4520SKenneth D. Merry 	if ((beio->bio_cmd == BIO_WRITE)
565130f4520SKenneth D. Merry 	 || (beio->bio_cmd == BIO_FLUSH)) {
566130f4520SKenneth D. Merry 		ctl_set_success(&io->scsiio);
567130f4520SKenneth D. Merry 		ctl_complete_beio(beio);
568130f4520SKenneth D. Merry 	} else {
569130f4520SKenneth D. Merry 		io->scsiio.be_move_done = ctl_be_block_move_done;
570130f4520SKenneth D. Merry 		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
571130f4520SKenneth D. Merry 		io->scsiio.kern_data_len = beio->io_len;
572130f4520SKenneth D. Merry 		io->scsiio.kern_total_len = beio->io_len;
573130f4520SKenneth D. Merry 		io->scsiio.kern_rel_offset = 0;
574130f4520SKenneth D. Merry 		io->scsiio.kern_data_resid = 0;
575130f4520SKenneth D. Merry 		io->scsiio.kern_sg_entries = beio->num_segs;
576130f4520SKenneth D. Merry 		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
577130f4520SKenneth D. Merry #ifdef CTL_TIME_IO
578130f4520SKenneth D. Merry         	getbintime(&io->io_hdr.dma_start_bt);
579130f4520SKenneth D. Merry #endif
580130f4520SKenneth D. Merry 		ctl_datamove(io);
581130f4520SKenneth D. Merry 	}
582130f4520SKenneth D. Merry }
583130f4520SKenneth D. Merry 
584130f4520SKenneth D. Merry static void
585130f4520SKenneth D. Merry ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
586130f4520SKenneth D. Merry 			struct ctl_be_block_io *beio)
587130f4520SKenneth D. Merry {
588130f4520SKenneth D. Merry 	union ctl_io *io;
589130f4520SKenneth D. Merry 	struct mount *mountpoint;
590130f4520SKenneth D. Merry 	int vfs_is_locked, error, lock_flags;
591130f4520SKenneth D. Merry 
592130f4520SKenneth D. Merry 	DPRINTF("entered\n");
593130f4520SKenneth D. Merry 
594130f4520SKenneth D. Merry 	io = beio->io;
595130f4520SKenneth D. Merry 
596130f4520SKenneth D. Merry 	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
597130f4520SKenneth D. Merry 
598130f4520SKenneth D. Merry        	(void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
599130f4520SKenneth D. Merry 
600130f4520SKenneth D. Merry 	if (MNT_SHARED_WRITES(mountpoint)
601130f4520SKenneth D. Merry 	 || ((mountpoint == NULL)
602130f4520SKenneth D. Merry 	  && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
603130f4520SKenneth D. Merry 		lock_flags = LK_SHARED;
604130f4520SKenneth D. Merry 	else
605130f4520SKenneth D. Merry 		lock_flags = LK_EXCLUSIVE;
606130f4520SKenneth D. Merry 
607130f4520SKenneth D. Merry 	vn_lock(be_lun->vn, lock_flags | LK_RETRY);
608130f4520SKenneth D. Merry 
609130f4520SKenneth D. Merry 	binuptime(&beio->ds_t0);
610130f4520SKenneth D. Merry 	devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
611130f4520SKenneth D. Merry 
612130f4520SKenneth D. Merry 	error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
613130f4520SKenneth D. Merry 	VOP_UNLOCK(be_lun->vn, 0);
614130f4520SKenneth D. Merry 
615130f4520SKenneth D. Merry 	vn_finished_write(mountpoint);
616130f4520SKenneth D. Merry 
617130f4520SKenneth D. Merry 	VFS_UNLOCK_GIANT(vfs_is_locked);
618130f4520SKenneth D. Merry 
619130f4520SKenneth D. Merry 	if (error == 0)
620130f4520SKenneth D. Merry 		ctl_set_success(&io->scsiio);
621130f4520SKenneth D. Merry 	else {
622130f4520SKenneth D. Merry 		/* XXX KDM is there is a better error here? */
623130f4520SKenneth D. Merry 		ctl_set_internal_failure(&io->scsiio,
624130f4520SKenneth D. Merry 					 /*sks_valid*/ 1,
625130f4520SKenneth D. Merry 					 /*retry_count*/ 0xbad1);
626130f4520SKenneth D. Merry 	}
627130f4520SKenneth D. Merry 
628130f4520SKenneth D. Merry 	ctl_complete_beio(beio);
629130f4520SKenneth D. Merry }
630130f4520SKenneth D. Merry 
631130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, file_start, "uint64_t");
632130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, file_start, "uint64_t");
633130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, read, file_done, file_done,"uint64_t");
634130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, file_done, "uint64_t");
635130f4520SKenneth D. Merry 
636130f4520SKenneth D. Merry static void
637130f4520SKenneth D. Merry ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
638130f4520SKenneth D. Merry 			   struct ctl_be_block_io *beio)
639130f4520SKenneth D. Merry {
640130f4520SKenneth D. Merry 	struct ctl_be_block_filedata *file_data;
641130f4520SKenneth D. Merry 	union ctl_io *io;
642130f4520SKenneth D. Merry 	struct uio xuio;
643130f4520SKenneth D. Merry 	struct iovec *xiovec;
644130f4520SKenneth D. Merry 	int vfs_is_locked, flags;
645130f4520SKenneth D. Merry 	int error, i;
646130f4520SKenneth D. Merry 
647130f4520SKenneth D. Merry 	DPRINTF("entered\n");
648130f4520SKenneth D. Merry 
649130f4520SKenneth D. Merry 	file_data = &be_lun->backend.file;
650130f4520SKenneth D. Merry 	io = beio->io;
651130f4520SKenneth D. Merry 	flags = beio->bio_flags;
652130f4520SKenneth D. Merry 
653130f4520SKenneth D. Merry 	if (beio->bio_cmd == BIO_READ) {
654130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
655130f4520SKenneth D. Merry 	} else {
656130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
657130f4520SKenneth D. Merry 	}
658130f4520SKenneth D. Merry 
659130f4520SKenneth D. Merry 	bzero(&xuio, sizeof(xuio));
660130f4520SKenneth D. Merry 	if (beio->bio_cmd == BIO_READ)
661130f4520SKenneth D. Merry 		xuio.uio_rw = UIO_READ;
662130f4520SKenneth D. Merry 	else
663130f4520SKenneth D. Merry 		xuio.uio_rw = UIO_WRITE;
664130f4520SKenneth D. Merry 
665130f4520SKenneth D. Merry 	xuio.uio_offset = beio->io_offset;
666130f4520SKenneth D. Merry 	xuio.uio_resid = beio->io_len;
667130f4520SKenneth D. Merry 	xuio.uio_segflg = UIO_SYSSPACE;
668130f4520SKenneth D. Merry 	xuio.uio_iov = beio->xiovecs;
669130f4520SKenneth D. Merry 	xuio.uio_iovcnt = beio->num_segs;
670130f4520SKenneth D. Merry 	xuio.uio_td = curthread;
671130f4520SKenneth D. Merry 
672130f4520SKenneth D. Merry 	for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
673130f4520SKenneth D. Merry 		xiovec->iov_base = beio->sg_segs[i].addr;
674130f4520SKenneth D. Merry 		xiovec->iov_len = beio->sg_segs[i].len;
675130f4520SKenneth D. Merry 	}
676130f4520SKenneth D. Merry 
677130f4520SKenneth D. Merry 	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
678130f4520SKenneth D. Merry 	if (beio->bio_cmd == BIO_READ) {
679130f4520SKenneth D. Merry 		vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
680130f4520SKenneth D. Merry 
681130f4520SKenneth D. Merry 		binuptime(&beio->ds_t0);
682130f4520SKenneth D. Merry 		devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
683130f4520SKenneth D. Merry 
684130f4520SKenneth D. Merry 		/*
685130f4520SKenneth D. Merry 		 * UFS pays attention to IO_DIRECT for reads.  If the
686130f4520SKenneth D. Merry 		 * DIRECTIO option is configured into the kernel, it calls
687130f4520SKenneth D. Merry 		 * ffs_rawread().  But that only works for single-segment
688130f4520SKenneth D. Merry 		 * uios with user space addresses.  In our case, with a
689130f4520SKenneth D. Merry 		 * kernel uio, it still reads into the buffer cache, but it
690130f4520SKenneth D. Merry 		 * will just try to release the buffer from the cache later
691130f4520SKenneth D. Merry 		 * on in ffs_read().
692130f4520SKenneth D. Merry 		 *
693130f4520SKenneth D. Merry 		 * ZFS does not pay attention to IO_DIRECT for reads.
694130f4520SKenneth D. Merry 		 *
695130f4520SKenneth D. Merry 		 * UFS does not pay attention to IO_SYNC for reads.
696130f4520SKenneth D. Merry 		 *
697130f4520SKenneth D. Merry 		 * ZFS pays attention to IO_SYNC (which translates into the
698130f4520SKenneth D. Merry 		 * Solaris define FRSYNC for zfs_read()) for reads.  It
699130f4520SKenneth D. Merry 		 * attempts to sync the file before reading.
700130f4520SKenneth D. Merry 		 *
701130f4520SKenneth D. Merry 		 * So, to attempt to provide some barrier semantics in the
702130f4520SKenneth D. Merry 		 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
703130f4520SKenneth D. Merry 		 */
704130f4520SKenneth D. Merry 		error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
705130f4520SKenneth D. Merry 				 (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
706130f4520SKenneth D. Merry 
707130f4520SKenneth D. Merry 		VOP_UNLOCK(be_lun->vn, 0);
708130f4520SKenneth D. Merry 	} else {
709130f4520SKenneth D. Merry 		struct mount *mountpoint;
710130f4520SKenneth D. Merry 		int lock_flags;
711130f4520SKenneth D. Merry 
712130f4520SKenneth D. Merry 		(void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
713130f4520SKenneth D. Merry 
714130f4520SKenneth D. Merry 		if (MNT_SHARED_WRITES(mountpoint)
715130f4520SKenneth D. Merry 		 || ((mountpoint == NULL)
716130f4520SKenneth D. Merry 		  && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
717130f4520SKenneth D. Merry 			lock_flags = LK_SHARED;
718130f4520SKenneth D. Merry 		else
719130f4520SKenneth D. Merry 			lock_flags = LK_EXCLUSIVE;
720130f4520SKenneth D. Merry 
721130f4520SKenneth D. Merry 		vn_lock(be_lun->vn, lock_flags | LK_RETRY);
722130f4520SKenneth D. Merry 
723130f4520SKenneth D. Merry 		binuptime(&beio->ds_t0);
724130f4520SKenneth D. Merry 		devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
725130f4520SKenneth D. Merry 
726130f4520SKenneth D. Merry 		/*
727130f4520SKenneth D. Merry 		 * UFS pays attention to IO_DIRECT for writes.  The write
728130f4520SKenneth D. Merry 		 * is done asynchronously.  (Normally the write would just
729130f4520SKenneth D. Merry 		 * get put into cache.
730130f4520SKenneth D. Merry 		 *
731130f4520SKenneth D. Merry 		 * UFS pays attention to IO_SYNC for writes.  It will
732130f4520SKenneth D. Merry 		 * attempt to write the buffer out synchronously if that
733130f4520SKenneth D. Merry 		 * flag is set.
734130f4520SKenneth D. Merry 		 *
735130f4520SKenneth D. Merry 		 * ZFS does not pay attention to IO_DIRECT for writes.
736130f4520SKenneth D. Merry 		 *
737130f4520SKenneth D. Merry 		 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
738130f4520SKenneth D. Merry 		 * for writes.  It will flush the transaction from the
739130f4520SKenneth D. Merry 		 * cache before returning.
740130f4520SKenneth D. Merry 		 *
741130f4520SKenneth D. Merry 		 * So if we've got the BIO_ORDERED flag set, we want
742130f4520SKenneth D. Merry 		 * IO_SYNC in either the UFS or ZFS case.
743130f4520SKenneth D. Merry 		 */
744130f4520SKenneth D. Merry 		error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
745130f4520SKenneth D. Merry 				  IO_SYNC : 0, file_data->cred);
746130f4520SKenneth D. Merry 		VOP_UNLOCK(be_lun->vn, 0);
747130f4520SKenneth D. Merry 
748130f4520SKenneth D. Merry 		vn_finished_write(mountpoint);
749130f4520SKenneth D. Merry         }
750130f4520SKenneth D. Merry         VFS_UNLOCK_GIANT(vfs_is_locked);
751130f4520SKenneth D. Merry 
752130f4520SKenneth D. Merry 	/*
753130f4520SKenneth D. Merry 	 * If we got an error, set the sense data to "MEDIUM ERROR" and
754130f4520SKenneth D. Merry 	 * return the I/O to the user.
755130f4520SKenneth D. Merry 	 */
756130f4520SKenneth D. Merry 	if (error != 0) {
757130f4520SKenneth D. Merry 		char path_str[32];
758130f4520SKenneth D. Merry 
759130f4520SKenneth D. Merry 		ctl_scsi_path_string(io, path_str, sizeof(path_str));
760130f4520SKenneth D. Merry 		/*
761130f4520SKenneth D. Merry 		 * XXX KDM ZFS returns ENOSPC when the underlying
762130f4520SKenneth D. Merry 		 * filesystem fills up.  What kind of SCSI error should we
763130f4520SKenneth D. Merry 		 * return for that?
764130f4520SKenneth D. Merry 		 */
765130f4520SKenneth D. Merry 		printf("%s%s command returned errno %d\n", path_str,
766130f4520SKenneth D. Merry 		       (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
767130f4520SKenneth D. Merry 		ctl_set_medium_error(&io->scsiio);
768130f4520SKenneth D. Merry 		ctl_complete_beio(beio);
769130f4520SKenneth D. Merry 		return;
770130f4520SKenneth D. Merry 	}
771130f4520SKenneth D. Merry 
772130f4520SKenneth D. Merry 	/*
773130f4520SKenneth D. Merry 	 * If this is a write, we're all done.
774130f4520SKenneth D. Merry 	 * If this is a read, we can now send the data to the user.
775130f4520SKenneth D. Merry 	 */
776130f4520SKenneth D. Merry 	if (beio->bio_cmd == BIO_WRITE) {
777130f4520SKenneth D. Merry 		ctl_set_success(&io->scsiio);
778130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
779130f4520SKenneth D. Merry 		ctl_complete_beio(beio);
780130f4520SKenneth D. Merry 	} else {
781130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
782130f4520SKenneth D. Merry 		io->scsiio.be_move_done = ctl_be_block_move_done;
783130f4520SKenneth D. Merry 		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
784130f4520SKenneth D. Merry 		io->scsiio.kern_data_len = beio->io_len;
785130f4520SKenneth D. Merry 		io->scsiio.kern_total_len = beio->io_len;
786130f4520SKenneth D. Merry 		io->scsiio.kern_rel_offset = 0;
787130f4520SKenneth D. Merry 		io->scsiio.kern_data_resid = 0;
788130f4520SKenneth D. Merry 		io->scsiio.kern_sg_entries = beio->num_segs;
789130f4520SKenneth D. Merry 		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
790130f4520SKenneth D. Merry #ifdef CTL_TIME_IO
791130f4520SKenneth D. Merry         	getbintime(&io->io_hdr.dma_start_bt);
792130f4520SKenneth D. Merry #endif
793130f4520SKenneth D. Merry 		ctl_datamove(io);
794130f4520SKenneth D. Merry 	}
795130f4520SKenneth D. Merry }
796130f4520SKenneth D. Merry 
797130f4520SKenneth D. Merry static void
798130f4520SKenneth D. Merry ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
799130f4520SKenneth D. Merry 		       struct ctl_be_block_io *beio)
800130f4520SKenneth D. Merry {
801130f4520SKenneth D. Merry 	struct bio *bio;
802130f4520SKenneth D. Merry 	union ctl_io *io;
803130f4520SKenneth D. Merry 	struct ctl_be_block_devdata *dev_data;
804130f4520SKenneth D. Merry 
805130f4520SKenneth D. Merry 	dev_data = &be_lun->backend.dev;
806130f4520SKenneth D. Merry 	io = beio->io;
807130f4520SKenneth D. Merry 
808130f4520SKenneth D. Merry 	DPRINTF("entered\n");
809130f4520SKenneth D. Merry 
810130f4520SKenneth D. Merry 	/* This can't fail, it's a blocking allocation. */
811130f4520SKenneth D. Merry 	bio = g_alloc_bio();
812130f4520SKenneth D. Merry 
813130f4520SKenneth D. Merry 	bio->bio_cmd	    = BIO_FLUSH;
814130f4520SKenneth D. Merry 	bio->bio_flags	   |= BIO_ORDERED;
815130f4520SKenneth D. Merry 	bio->bio_dev	    = dev_data->cdev;
816130f4520SKenneth D. Merry 	bio->bio_offset	    = 0;
817130f4520SKenneth D. Merry 	bio->bio_data	    = 0;
818130f4520SKenneth D. Merry 	bio->bio_done	    = ctl_be_block_biodone;
819130f4520SKenneth D. Merry 	bio->bio_caller1    = beio;
820130f4520SKenneth D. Merry 	bio->bio_pblkno	    = 0;
821130f4520SKenneth D. Merry 
822130f4520SKenneth D. Merry 	/*
823130f4520SKenneth D. Merry 	 * We don't need to acquire the LUN lock here, because we are only
824130f4520SKenneth D. Merry 	 * sending one bio, and so there is no other context to synchronize
825130f4520SKenneth D. Merry 	 * with.
826130f4520SKenneth D. Merry 	 */
827130f4520SKenneth D. Merry 	beio->num_bios_sent = 1;
828130f4520SKenneth D. Merry 	beio->send_complete = 1;
829130f4520SKenneth D. Merry 
830130f4520SKenneth D. Merry 	binuptime(&beio->ds_t0);
831130f4520SKenneth D. Merry 	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
832130f4520SKenneth D. Merry 
833130f4520SKenneth D. Merry 	(*dev_data->csw->d_strategy)(bio);
834130f4520SKenneth D. Merry }
835130f4520SKenneth D. Merry 
836130f4520SKenneth D. Merry static void
837130f4520SKenneth D. Merry ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
838130f4520SKenneth D. Merry 			  struct ctl_be_block_io *beio)
839130f4520SKenneth D. Merry {
840130f4520SKenneth D. Merry 	int i;
841130f4520SKenneth D. Merry 	struct bio *bio;
842130f4520SKenneth D. Merry 	struct ctl_be_block_devdata *dev_data;
843130f4520SKenneth D. Merry 	off_t cur_offset;
844130f4520SKenneth D. Merry 	int max_iosize;
845130f4520SKenneth D. Merry 
846130f4520SKenneth D. Merry 	DPRINTF("entered\n");
847130f4520SKenneth D. Merry 
848130f4520SKenneth D. Merry 	dev_data = &be_lun->backend.dev;
849130f4520SKenneth D. Merry 
850130f4520SKenneth D. Merry 	/*
851130f4520SKenneth D. Merry 	 * We have to limit our I/O size to the maximum supported by the
852130f4520SKenneth D. Merry 	 * backend device.  Hopefully it is MAXPHYS.  If the driver doesn't
853130f4520SKenneth D. Merry 	 * set it properly, use DFLTPHYS.
854130f4520SKenneth D. Merry 	 */
855130f4520SKenneth D. Merry 	max_iosize = dev_data->cdev->si_iosize_max;
856130f4520SKenneth D. Merry 	if (max_iosize < PAGE_SIZE)
857130f4520SKenneth D. Merry 		max_iosize = DFLTPHYS;
858130f4520SKenneth D. Merry 
859130f4520SKenneth D. Merry 	cur_offset = beio->io_offset;
860130f4520SKenneth D. Merry 
861130f4520SKenneth D. Merry 	/*
862130f4520SKenneth D. Merry 	 * XXX KDM need to accurately reflect the number of I/Os outstanding
863130f4520SKenneth D. Merry 	 * to a device.
864130f4520SKenneth D. Merry 	 */
865130f4520SKenneth D. Merry 	binuptime(&beio->ds_t0);
866130f4520SKenneth D. Merry 	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
867130f4520SKenneth D. Merry 
868130f4520SKenneth D. Merry 	for (i = 0; i < beio->num_segs; i++) {
869130f4520SKenneth D. Merry 		size_t cur_size;
870130f4520SKenneth D. Merry 		uint8_t *cur_ptr;
871130f4520SKenneth D. Merry 
872130f4520SKenneth D. Merry 		cur_size = beio->sg_segs[i].len;
873130f4520SKenneth D. Merry 		cur_ptr = beio->sg_segs[i].addr;
874130f4520SKenneth D. Merry 
875130f4520SKenneth D. Merry 		while (cur_size > 0) {
876130f4520SKenneth D. Merry 			/* This can't fail, it's a blocking allocation. */
877130f4520SKenneth D. Merry 			bio = g_alloc_bio();
878130f4520SKenneth D. Merry 
879130f4520SKenneth D. Merry 			KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
880130f4520SKenneth D. Merry 
881130f4520SKenneth D. Merry 			bio->bio_cmd = beio->bio_cmd;
882130f4520SKenneth D. Merry 			bio->bio_flags |= beio->bio_flags;
883130f4520SKenneth D. Merry 			bio->bio_dev = dev_data->cdev;
884130f4520SKenneth D. Merry 			bio->bio_caller1 = beio;
885130f4520SKenneth D. Merry 			bio->bio_length = min(cur_size, max_iosize);
886130f4520SKenneth D. Merry 			bio->bio_offset = cur_offset;
887130f4520SKenneth D. Merry 			bio->bio_data = cur_ptr;
888130f4520SKenneth D. Merry 			bio->bio_done = ctl_be_block_biodone;
889130f4520SKenneth D. Merry 			bio->bio_pblkno = cur_offset / be_lun->blocksize;
890130f4520SKenneth D. Merry 
891130f4520SKenneth D. Merry 			cur_offset += bio->bio_length;
892130f4520SKenneth D. Merry 			cur_ptr += bio->bio_length;
893130f4520SKenneth D. Merry 			cur_size -= bio->bio_length;
894130f4520SKenneth D. Merry 
895130f4520SKenneth D. Merry 			/*
896130f4520SKenneth D. Merry 			 * Make sure we set the complete bit just before we
897130f4520SKenneth D. Merry 			 * issue the last bio so we don't wind up with a
898130f4520SKenneth D. Merry 			 * race.
899130f4520SKenneth D. Merry 			 *
900130f4520SKenneth D. Merry 			 * Use the LUN mutex here instead of a combination
901130f4520SKenneth D. Merry 			 * of atomic variables for simplicity.
902130f4520SKenneth D. Merry 			 *
903130f4520SKenneth D. Merry 			 * XXX KDM we could have a per-IO lock, but that
904130f4520SKenneth D. Merry 			 * would cause additional per-IO setup and teardown
905130f4520SKenneth D. Merry 			 * overhead.  Hopefully there won't be too much
906130f4520SKenneth D. Merry 			 * contention on the LUN lock.
907130f4520SKenneth D. Merry 			 */
908130f4520SKenneth D. Merry 			mtx_lock(&be_lun->lock);
909130f4520SKenneth D. Merry 
910130f4520SKenneth D. Merry 			beio->num_bios_sent++;
911130f4520SKenneth D. Merry 
912130f4520SKenneth D. Merry 			if ((i == beio->num_segs - 1)
913130f4520SKenneth D. Merry 			 && (cur_size == 0))
914130f4520SKenneth D. Merry 				beio->send_complete = 1;
915130f4520SKenneth D. Merry 
916130f4520SKenneth D. Merry 			mtx_unlock(&be_lun->lock);
917130f4520SKenneth D. Merry 
918130f4520SKenneth D. Merry 			(*dev_data->csw->d_strategy)(bio);
919130f4520SKenneth D. Merry 		}
920130f4520SKenneth D. Merry 	}
921130f4520SKenneth D. Merry }
922130f4520SKenneth D. Merry 
923130f4520SKenneth D. Merry static void
924130f4520SKenneth D. Merry ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
925130f4520SKenneth D. Merry 			 union ctl_io *io)
926130f4520SKenneth D. Merry {
927130f4520SKenneth D. Merry 	struct ctl_be_block_io *beio;
928130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
929130f4520SKenneth D. Merry 
930130f4520SKenneth D. Merry 	DPRINTF("entered\n");
931130f4520SKenneth D. Merry 
932130f4520SKenneth D. Merry 	softc = be_lun->softc;
933130f4520SKenneth D. Merry 	beio = ctl_alloc_beio(softc);
934130f4520SKenneth D. Merry 	if (beio == NULL) {
935130f4520SKenneth D. Merry 		/*
936130f4520SKenneth D. Merry 		 * This should not happen.  ctl_alloc_beio() will call
937130f4520SKenneth D. Merry 		 * ctl_grow_beio() with a blocking malloc as needed.
938130f4520SKenneth D. Merry 		 * A malloc with M_WAITOK should not fail.
939130f4520SKenneth D. Merry 		 */
940130f4520SKenneth D. Merry 		ctl_set_busy(&io->scsiio);
941130f4520SKenneth D. Merry 		ctl_done(io);
942130f4520SKenneth D. Merry 		return;
943130f4520SKenneth D. Merry 	}
944130f4520SKenneth D. Merry 
945130f4520SKenneth D. Merry 	beio->io = io;
946130f4520SKenneth D. Merry 	beio->softc = softc;
947130f4520SKenneth D. Merry 	beio->lun = be_lun;
948130f4520SKenneth D. Merry 	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
949130f4520SKenneth D. Merry 
950130f4520SKenneth D. Merry 	switch (io->scsiio.cdb[0]) {
951130f4520SKenneth D. Merry 	case SYNCHRONIZE_CACHE:
952130f4520SKenneth D. Merry 	case SYNCHRONIZE_CACHE_16:
953130f4520SKenneth D. Merry 		beio->ds_trans_type = DEVSTAT_NO_DATA;
954130f4520SKenneth D. Merry 		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
955130f4520SKenneth D. Merry 		beio->io_len = 0;
956130f4520SKenneth D. Merry 		be_lun->lun_flush(be_lun, beio);
957130f4520SKenneth D. Merry 		break;
958130f4520SKenneth D. Merry 	default:
959130f4520SKenneth D. Merry 		panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
960130f4520SKenneth D. Merry 		break;
961130f4520SKenneth D. Merry 	}
962130f4520SKenneth D. Merry }
963130f4520SKenneth D. Merry 
964130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, read, start, start, "uint64_t");
965130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, write, start, start, "uint64_t");
966130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, alloc_done, "uint64_t");
967130f4520SKenneth D. Merry SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, alloc_done, "uint64_t");
968130f4520SKenneth D. Merry 
969130f4520SKenneth D. Merry static void
970130f4520SKenneth D. Merry ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
971130f4520SKenneth D. Merry 			   union ctl_io *io)
972130f4520SKenneth D. Merry {
973130f4520SKenneth D. Merry 	struct ctl_be_block_io *beio;
974130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
975130f4520SKenneth D. Merry 	struct ctl_lba_len lbalen;
976130f4520SKenneth D. Merry 	uint64_t len_left, io_size_bytes;
977130f4520SKenneth D. Merry 	int i;
978130f4520SKenneth D. Merry 
979130f4520SKenneth D. Merry 	softc = be_lun->softc;
980130f4520SKenneth D. Merry 
981130f4520SKenneth D. Merry 	DPRINTF("entered\n");
982130f4520SKenneth D. Merry 
983130f4520SKenneth D. Merry 	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
984130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
985130f4520SKenneth D. Merry 	} else {
986130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
987130f4520SKenneth D. Merry 	}
988130f4520SKenneth D. Merry 
989130f4520SKenneth D. Merry 	memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
990130f4520SKenneth D. Merry 	       sizeof(lbalen));
991130f4520SKenneth D. Merry 
992130f4520SKenneth D. Merry 	io_size_bytes = lbalen.len * be_lun->blocksize;
993130f4520SKenneth D. Merry 
994130f4520SKenneth D. Merry 	/*
995130f4520SKenneth D. Merry 	 * XXX KDM this is temporary, until we implement chaining of beio
996130f4520SKenneth D. Merry 	 * structures and multiple datamove calls to move all the data in
997130f4520SKenneth D. Merry 	 * or out.
998130f4520SKenneth D. Merry 	 */
999130f4520SKenneth D. Merry 	if (io_size_bytes > CTLBLK_MAX_IO_SIZE) {
1000130f4520SKenneth D. Merry 		printf("%s: IO length %ju > max io size %u\n", __func__,
1001130f4520SKenneth D. Merry 		       io_size_bytes, CTLBLK_MAX_IO_SIZE);
1002130f4520SKenneth D. Merry 		ctl_set_invalid_field(&io->scsiio,
1003130f4520SKenneth D. Merry 				      /*sks_valid*/ 0,
1004130f4520SKenneth D. Merry 				      /*command*/ 1,
1005130f4520SKenneth D. Merry 				      /*field*/ 0,
1006130f4520SKenneth D. Merry 				      /*bit_valid*/ 0,
1007130f4520SKenneth D. Merry 				      /*bit*/ 0);
1008130f4520SKenneth D. Merry 		ctl_done(io);
1009130f4520SKenneth D. Merry 		return;
1010130f4520SKenneth D. Merry 	}
1011130f4520SKenneth D. Merry 
1012130f4520SKenneth D. Merry 	beio = ctl_alloc_beio(softc);
1013130f4520SKenneth D. Merry 	if (beio == NULL) {
1014130f4520SKenneth D. Merry 		/*
1015130f4520SKenneth D. Merry 		 * This should not happen.  ctl_alloc_beio() will call
1016130f4520SKenneth D. Merry 		 * ctl_grow_beio() with a blocking malloc as needed.
1017130f4520SKenneth D. Merry 		 * A malloc with M_WAITOK should not fail.
1018130f4520SKenneth D. Merry 		 */
1019130f4520SKenneth D. Merry 		ctl_set_busy(&io->scsiio);
1020130f4520SKenneth D. Merry 		ctl_done(io);
1021130f4520SKenneth D. Merry 		return;
1022130f4520SKenneth D. Merry 	}
1023130f4520SKenneth D. Merry 
1024130f4520SKenneth D. Merry 	beio->io = io;
1025130f4520SKenneth D. Merry 	beio->softc = softc;
1026130f4520SKenneth D. Merry 	beio->lun = be_lun;
1027130f4520SKenneth D. Merry 	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
1028130f4520SKenneth D. Merry 
1029130f4520SKenneth D. Merry 	/*
1030130f4520SKenneth D. Merry 	 * If the I/O came down with an ordered or head of queue tag, set
1031130f4520SKenneth D. Merry 	 * the BIO_ORDERED attribute.  For head of queue tags, that's
1032130f4520SKenneth D. Merry 	 * pretty much the best we can do.
1033130f4520SKenneth D. Merry 	 *
1034130f4520SKenneth D. Merry 	 * XXX KDM we don't have a great way to easily know about the FUA
1035130f4520SKenneth D. Merry 	 * bit right now (it is decoded in ctl_read_write(), but we don't
1036130f4520SKenneth D. Merry 	 * pass that knowledge to the backend), and in any case we would
1037130f4520SKenneth D. Merry 	 * need to determine how to handle it.
1038130f4520SKenneth D. Merry 	 */
1039130f4520SKenneth D. Merry 	if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
1040130f4520SKenneth D. Merry 	 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
1041130f4520SKenneth D. Merry 		beio->bio_flags = BIO_ORDERED;
1042130f4520SKenneth D. Merry 
1043130f4520SKenneth D. Merry 	switch (io->scsiio.tag_type) {
1044130f4520SKenneth D. Merry 	case CTL_TAG_ORDERED:
1045130f4520SKenneth D. Merry 		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1046130f4520SKenneth D. Merry 		break;
1047130f4520SKenneth D. Merry 	case CTL_TAG_HEAD_OF_QUEUE:
1048130f4520SKenneth D. Merry 		beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1049130f4520SKenneth D. Merry 		break;
1050130f4520SKenneth D. Merry 	case CTL_TAG_UNTAGGED:
1051130f4520SKenneth D. Merry 	case CTL_TAG_SIMPLE:
1052130f4520SKenneth D. Merry 	case CTL_TAG_ACA:
1053130f4520SKenneth D. Merry 	default:
1054130f4520SKenneth D. Merry 		beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1055130f4520SKenneth D. Merry 		break;
1056130f4520SKenneth D. Merry 	}
1057130f4520SKenneth D. Merry 
1058130f4520SKenneth D. Merry 	/*
1059130f4520SKenneth D. Merry 	 * This path handles read and write only.  The config write path
1060130f4520SKenneth D. Merry 	 * handles flush operations.
1061130f4520SKenneth D. Merry 	 */
1062130f4520SKenneth D. Merry 	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
1063130f4520SKenneth D. Merry 		beio->bio_cmd = BIO_READ;
1064130f4520SKenneth D. Merry 		beio->ds_trans_type = DEVSTAT_READ;
1065130f4520SKenneth D. Merry 	} else {
1066130f4520SKenneth D. Merry 		beio->bio_cmd = BIO_WRITE;
1067130f4520SKenneth D. Merry 		beio->ds_trans_type = DEVSTAT_WRITE;
1068130f4520SKenneth D. Merry 	}
1069130f4520SKenneth D. Merry 
1070130f4520SKenneth D. Merry 	beio->io_len = lbalen.len * be_lun->blocksize;
1071130f4520SKenneth D. Merry 	beio->io_offset = lbalen.lba * be_lun->blocksize;
1072130f4520SKenneth D. Merry 
1073130f4520SKenneth D. Merry 	DPRINTF("%s at LBA %jx len %u\n",
1074130f4520SKenneth D. Merry 	       (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
1075130f4520SKenneth D. Merry 	       (uintmax_t)lbalen.lba, lbalen.len);
1076130f4520SKenneth D. Merry 
1077130f4520SKenneth D. Merry 	for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS &&
1078130f4520SKenneth D. Merry 	     len_left > 0; i++) {
1079130f4520SKenneth D. Merry 
1080130f4520SKenneth D. Merry 		/*
1081130f4520SKenneth D. Merry 		 * Setup the S/G entry for this chunk.
1082130f4520SKenneth D. Merry 		 */
1083130f4520SKenneth D. Merry 		beio->sg_segs[i].len = min(MAXPHYS, len_left);
1084130f4520SKenneth D. Merry 		beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
1085130f4520SKenneth D. Merry 		/*
1086130f4520SKenneth D. Merry 		 * uma_zalloc() can in theory return NULL even with M_WAITOK
1087130f4520SKenneth D. Merry 		 * if it can't pull more memory into the zone.
1088130f4520SKenneth D. Merry 		 */
1089130f4520SKenneth D. Merry 		if (beio->sg_segs[i].addr == NULL) {
1090130f4520SKenneth D. Merry 			ctl_set_busy(&io->scsiio);
1091130f4520SKenneth D. Merry 			ctl_complete_beio(beio);
1092130f4520SKenneth D. Merry 			return;
1093130f4520SKenneth D. Merry 		}
1094130f4520SKenneth D. Merry 
1095130f4520SKenneth D. Merry 		DPRINTF("segment %d addr %p len %zd\n", i,
1096130f4520SKenneth D. Merry 			beio->sg_segs[i].addr, beio->sg_segs[i].len);
1097130f4520SKenneth D. Merry 
1098130f4520SKenneth D. Merry 		beio->num_segs++;
1099130f4520SKenneth D. Merry 		len_left -= beio->sg_segs[i].len;
1100130f4520SKenneth D. Merry 	}
1101130f4520SKenneth D. Merry 
1102130f4520SKenneth D. Merry 	/*
1103130f4520SKenneth D. Merry 	 * For the read case, we need to read the data into our buffers and
1104130f4520SKenneth D. Merry 	 * then we can send it back to the user.  For the write case, we
1105130f4520SKenneth D. Merry 	 * need to get the data from the user first.
1106130f4520SKenneth D. Merry 	 */
1107130f4520SKenneth D. Merry 	if (beio->bio_cmd == BIO_READ) {
1108130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
1109130f4520SKenneth D. Merry 		be_lun->dispatch(be_lun, beio);
1110130f4520SKenneth D. Merry 	} else {
1111130f4520SKenneth D. Merry 		SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
1112130f4520SKenneth D. Merry 		io->scsiio.be_move_done = ctl_be_block_move_done;
1113130f4520SKenneth D. Merry 		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
1114130f4520SKenneth D. Merry 		io->scsiio.kern_data_len = beio->io_len;
1115130f4520SKenneth D. Merry 		io->scsiio.kern_total_len = beio->io_len;
1116130f4520SKenneth D. Merry 		io->scsiio.kern_rel_offset = 0;
1117130f4520SKenneth D. Merry 		io->scsiio.kern_data_resid = 0;
1118130f4520SKenneth D. Merry 		io->scsiio.kern_sg_entries = beio->num_segs;
1119130f4520SKenneth D. Merry 		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
1120130f4520SKenneth D. Merry #ifdef CTL_TIME_IO
1121130f4520SKenneth D. Merry         	getbintime(&io->io_hdr.dma_start_bt);
1122130f4520SKenneth D. Merry #endif
1123130f4520SKenneth D. Merry 		ctl_datamove(io);
1124130f4520SKenneth D. Merry 	}
1125130f4520SKenneth D. Merry }
1126130f4520SKenneth D. Merry 
1127130f4520SKenneth D. Merry static void
1128130f4520SKenneth D. Merry ctl_be_block_worker(void *context, int pending)
1129130f4520SKenneth D. Merry {
1130130f4520SKenneth D. Merry 	struct ctl_be_block_lun *be_lun;
1131130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
1132130f4520SKenneth D. Merry 	union ctl_io *io;
1133130f4520SKenneth D. Merry 
1134130f4520SKenneth D. Merry 	be_lun = (struct ctl_be_block_lun *)context;
1135130f4520SKenneth D. Merry 	softc = be_lun->softc;
1136130f4520SKenneth D. Merry 
1137130f4520SKenneth D. Merry 	DPRINTF("entered\n");
1138130f4520SKenneth D. Merry 
1139130f4520SKenneth D. Merry 	mtx_lock(&be_lun->lock);
1140130f4520SKenneth D. Merry 	for (;;) {
1141130f4520SKenneth D. Merry 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
1142130f4520SKenneth D. Merry 		if (io != NULL) {
1143130f4520SKenneth D. Merry 			struct ctl_be_block_io *beio;
1144130f4520SKenneth D. Merry 
1145130f4520SKenneth D. Merry 			DPRINTF("datamove queue\n");
1146130f4520SKenneth D. Merry 
1147130f4520SKenneth D. Merry 			STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
1148130f4520SKenneth D. Merry 				      ctl_io_hdr, links);
1149130f4520SKenneth D. Merry 
1150130f4520SKenneth D. Merry 			mtx_unlock(&be_lun->lock);
1151130f4520SKenneth D. Merry 
1152130f4520SKenneth D. Merry 			beio = (struct ctl_be_block_io *)
1153130f4520SKenneth D. Merry 			    io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
1154130f4520SKenneth D. Merry 
1155130f4520SKenneth D. Merry 			be_lun->dispatch(be_lun, beio);
1156130f4520SKenneth D. Merry 
1157130f4520SKenneth D. Merry 			mtx_lock(&be_lun->lock);
1158130f4520SKenneth D. Merry 			continue;
1159130f4520SKenneth D. Merry 		}
1160130f4520SKenneth D. Merry 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
1161130f4520SKenneth D. Merry 		if (io != NULL) {
1162130f4520SKenneth D. Merry 
1163130f4520SKenneth D. Merry 			DPRINTF("config write queue\n");
1164130f4520SKenneth D. Merry 
1165130f4520SKenneth D. Merry 			STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
1166130f4520SKenneth D. Merry 				      ctl_io_hdr, links);
1167130f4520SKenneth D. Merry 
1168130f4520SKenneth D. Merry 			mtx_unlock(&be_lun->lock);
1169130f4520SKenneth D. Merry 
1170130f4520SKenneth D. Merry 			ctl_be_block_cw_dispatch(be_lun, io);
1171130f4520SKenneth D. Merry 
1172130f4520SKenneth D. Merry 			mtx_lock(&be_lun->lock);
1173130f4520SKenneth D. Merry 			continue;
1174130f4520SKenneth D. Merry 		}
1175130f4520SKenneth D. Merry 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
1176130f4520SKenneth D. Merry 		if (io != NULL) {
1177130f4520SKenneth D. Merry 			DPRINTF("input queue\n");
1178130f4520SKenneth D. Merry 
1179130f4520SKenneth D. Merry 			STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
1180130f4520SKenneth D. Merry 				      ctl_io_hdr, links);
1181130f4520SKenneth D. Merry 			mtx_unlock(&be_lun->lock);
1182130f4520SKenneth D. Merry 
1183130f4520SKenneth D. Merry 			/*
1184130f4520SKenneth D. Merry 			 * We must drop the lock, since this routine and
1185130f4520SKenneth D. Merry 			 * its children may sleep.
1186130f4520SKenneth D. Merry 			 */
1187130f4520SKenneth D. Merry 			ctl_be_block_dispatch(be_lun, io);
1188130f4520SKenneth D. Merry 
1189130f4520SKenneth D. Merry 			mtx_lock(&be_lun->lock);
1190130f4520SKenneth D. Merry 			continue;
1191130f4520SKenneth D. Merry 		}
1192130f4520SKenneth D. Merry 
1193130f4520SKenneth D. Merry 		/*
1194130f4520SKenneth D. Merry 		 * If we get here, there is no work left in the queues, so
1195130f4520SKenneth D. Merry 		 * just break out and let the task queue go to sleep.
1196130f4520SKenneth D. Merry 		 */
1197130f4520SKenneth D. Merry 		break;
1198130f4520SKenneth D. Merry 	}
1199130f4520SKenneth D. Merry 	mtx_unlock(&be_lun->lock);
1200130f4520SKenneth D. Merry }
1201130f4520SKenneth D. Merry 
1202130f4520SKenneth D. Merry /*
1203130f4520SKenneth D. Merry  * Entry point from CTL to the backend for I/O.  We queue everything to a
1204130f4520SKenneth D. Merry  * work thread, so this just puts the I/O on a queue and wakes up the
1205130f4520SKenneth D. Merry  * thread.
1206130f4520SKenneth D. Merry  */
1207130f4520SKenneth D. Merry static int
1208130f4520SKenneth D. Merry ctl_be_block_submit(union ctl_io *io)
1209130f4520SKenneth D. Merry {
1210130f4520SKenneth D. Merry 	struct ctl_be_block_lun *be_lun;
1211130f4520SKenneth D. Merry 	struct ctl_be_lun *ctl_be_lun;
1212130f4520SKenneth D. Merry 	int retval;
1213130f4520SKenneth D. Merry 
1214130f4520SKenneth D. Merry 	DPRINTF("entered\n");
1215130f4520SKenneth D. Merry 
1216130f4520SKenneth D. Merry 	retval = CTL_RETVAL_COMPLETE;
1217130f4520SKenneth D. Merry 
1218130f4520SKenneth D. Merry 	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
1219130f4520SKenneth D. Merry 		CTL_PRIV_BACKEND_LUN].ptr;
1220130f4520SKenneth D. Merry 	be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
1221130f4520SKenneth D. Merry 
1222130f4520SKenneth D. Merry 	/*
1223130f4520SKenneth D. Merry 	 * Make sure we only get SCSI I/O.
1224130f4520SKenneth D. Merry 	 */
1225130f4520SKenneth D. Merry 	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
1226130f4520SKenneth D. Merry 		"%#x) encountered", io->io_hdr.io_type));
1227130f4520SKenneth D. Merry 
1228130f4520SKenneth D. Merry 	mtx_lock(&be_lun->lock);
1229130f4520SKenneth D. Merry 	/*
1230130f4520SKenneth D. Merry 	 * XXX KDM make sure that links is okay to use at this point.
1231130f4520SKenneth D. Merry 	 * Otherwise, we either need to add another field to ctl_io_hdr,
1232130f4520SKenneth D. Merry 	 * or deal with resource allocation here.
1233130f4520SKenneth D. Merry 	 */
1234130f4520SKenneth D. Merry 	STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1235130f4520SKenneth D. Merry 	mtx_unlock(&be_lun->lock);
1236130f4520SKenneth D. Merry 
1237130f4520SKenneth D. Merry 	taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1238130f4520SKenneth D. Merry 
1239130f4520SKenneth D. Merry 	return (retval);
1240130f4520SKenneth D. Merry }
1241130f4520SKenneth D. Merry 
1242130f4520SKenneth D. Merry static int
1243130f4520SKenneth D. Merry ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
1244130f4520SKenneth D. Merry 			int flag, struct thread *td)
1245130f4520SKenneth D. Merry {
1246130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
1247130f4520SKenneth D. Merry 	int error;
1248130f4520SKenneth D. Merry 
1249130f4520SKenneth D. Merry 	softc = &backend_block_softc;
1250130f4520SKenneth D. Merry 
1251130f4520SKenneth D. Merry 	error = 0;
1252130f4520SKenneth D. Merry 
1253130f4520SKenneth D. Merry 	switch (cmd) {
1254130f4520SKenneth D. Merry 	case CTL_LUN_REQ: {
1255130f4520SKenneth D. Merry 		struct ctl_lun_req *lun_req;
1256130f4520SKenneth D. Merry 
1257130f4520SKenneth D. Merry 		lun_req = (struct ctl_lun_req *)addr;
1258130f4520SKenneth D. Merry 
1259130f4520SKenneth D. Merry 		switch (lun_req->reqtype) {
1260130f4520SKenneth D. Merry 		case CTL_LUNREQ_CREATE:
1261130f4520SKenneth D. Merry 			error = ctl_be_block_create(softc, lun_req);
1262130f4520SKenneth D. Merry 			break;
1263130f4520SKenneth D. Merry 		case CTL_LUNREQ_RM:
1264130f4520SKenneth D. Merry 			error = ctl_be_block_rm(softc, lun_req);
1265130f4520SKenneth D. Merry 			break;
1266130f4520SKenneth D. Merry 		default:
1267130f4520SKenneth D. Merry 			lun_req->status = CTL_LUN_ERROR;
1268130f4520SKenneth D. Merry 			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
1269130f4520SKenneth D. Merry 				 "%s: invalid LUN request type %d", __func__,
1270130f4520SKenneth D. Merry 				 lun_req->reqtype);
1271130f4520SKenneth D. Merry 			break;
1272130f4520SKenneth D. Merry 		}
1273130f4520SKenneth D. Merry 		break;
1274130f4520SKenneth D. Merry 	}
1275130f4520SKenneth D. Merry 	default:
1276130f4520SKenneth D. Merry 		error = ENOTTY;
1277130f4520SKenneth D. Merry 		break;
1278130f4520SKenneth D. Merry 	}
1279130f4520SKenneth D. Merry 
1280130f4520SKenneth D. Merry 	return (error);
1281130f4520SKenneth D. Merry }
1282130f4520SKenneth D. Merry 
1283130f4520SKenneth D. Merry static int
1284130f4520SKenneth D. Merry ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1285130f4520SKenneth D. Merry {
1286130f4520SKenneth D. Merry 	struct ctl_be_block_filedata *file_data;
1287130f4520SKenneth D. Merry 	struct ctl_lun_create_params *params;
1288130f4520SKenneth D. Merry 	struct vattr		      vattr;
1289130f4520SKenneth D. Merry 	int			      error;
1290130f4520SKenneth D. Merry 
1291130f4520SKenneth D. Merry 	error = 0;
1292130f4520SKenneth D. Merry 	file_data = &be_lun->backend.file;
1293130f4520SKenneth D. Merry 	params = &req->reqdata.create;
1294130f4520SKenneth D. Merry 
1295130f4520SKenneth D. Merry 	be_lun->dev_type = CTL_BE_BLOCK_FILE;
1296130f4520SKenneth D. Merry 	be_lun->dispatch = ctl_be_block_dispatch_file;
1297130f4520SKenneth D. Merry 	be_lun->lun_flush = ctl_be_block_flush_file;
1298130f4520SKenneth D. Merry 
1299130f4520SKenneth D. Merry 	error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1300130f4520SKenneth D. Merry 	if (error != 0) {
1301130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1302130f4520SKenneth D. Merry 			 "error calling VOP_GETATTR() for file %s",
1303130f4520SKenneth D. Merry 			 be_lun->dev_path);
1304130f4520SKenneth D. Merry 		return (error);
1305130f4520SKenneth D. Merry 	}
1306130f4520SKenneth D. Merry 
1307130f4520SKenneth D. Merry 	/*
1308130f4520SKenneth D. Merry 	 * Verify that we have the ability to upgrade to exclusive
1309130f4520SKenneth D. Merry 	 * access on this file so we can trap errors at open instead
1310130f4520SKenneth D. Merry 	 * of reporting them during first access.
1311130f4520SKenneth D. Merry 	 */
1312130f4520SKenneth D. Merry 	if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
1313130f4520SKenneth D. Merry 		vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
1314130f4520SKenneth D. Merry 		if (be_lun->vn->v_iflag & VI_DOOMED) {
1315130f4520SKenneth D. Merry 			error = EBADF;
1316130f4520SKenneth D. Merry 			snprintf(req->error_str, sizeof(req->error_str),
1317130f4520SKenneth D. Merry 				 "error locking file %s", be_lun->dev_path);
1318130f4520SKenneth D. Merry 			return (error);
1319130f4520SKenneth D. Merry 		}
1320130f4520SKenneth D. Merry 	}
1321130f4520SKenneth D. Merry 
1322130f4520SKenneth D. Merry 
1323130f4520SKenneth D. Merry 	file_data->cred = crhold(curthread->td_ucred);
1324130f4520SKenneth D. Merry 	be_lun->size_bytes = vattr.va_size;
1325130f4520SKenneth D. Merry 	/*
1326130f4520SKenneth D. Merry 	 * We set the multi thread flag for file operations because all
1327130f4520SKenneth D. Merry 	 * filesystems (in theory) are capable of allowing multiple readers
1328130f4520SKenneth D. Merry 	 * of a file at once.  So we want to get the maximum possible
1329130f4520SKenneth D. Merry 	 * concurrency.
1330130f4520SKenneth D. Merry 	 */
1331130f4520SKenneth D. Merry 	be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
1332130f4520SKenneth D. Merry 
1333130f4520SKenneth D. Merry 	/*
1334130f4520SKenneth D. Merry 	 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
1335130f4520SKenneth D. Merry 	 * With ZFS, it is 131072 bytes.  Block sizes that large don't work
1336130f4520SKenneth D. Merry 	 * with disklabel and UFS on FreeBSD at least.  Large block sizes
1337130f4520SKenneth D. Merry 	 * may not work with other OSes as well.  So just export a sector
1338130f4520SKenneth D. Merry 	 * size of 512 bytes, which should work with any OS or
1339130f4520SKenneth D. Merry 	 * application.  Since our backing is a file, any block size will
1340130f4520SKenneth D. Merry 	 * work fine for the backing store.
1341130f4520SKenneth D. Merry 	 */
1342130f4520SKenneth D. Merry #if 0
1343130f4520SKenneth D. Merry 	be_lun->blocksize= vattr.va_blocksize;
1344130f4520SKenneth D. Merry #endif
1345130f4520SKenneth D. Merry 	if (params->blocksize_bytes != 0)
1346130f4520SKenneth D. Merry 		be_lun->blocksize = params->blocksize_bytes;
1347130f4520SKenneth D. Merry 	else
1348130f4520SKenneth D. Merry 		be_lun->blocksize = 512;
1349130f4520SKenneth D. Merry 
1350130f4520SKenneth D. Merry 	/*
1351130f4520SKenneth D. Merry 	 * Sanity check.  The media size has to be at least one
1352130f4520SKenneth D. Merry 	 * sector long.
1353130f4520SKenneth D. Merry 	 */
1354130f4520SKenneth D. Merry 	if (be_lun->size_bytes < be_lun->blocksize) {
1355130f4520SKenneth D. Merry 		error = EINVAL;
1356130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1357130f4520SKenneth D. Merry 			 "file %s size %ju < block size %u", be_lun->dev_path,
1358130f4520SKenneth D. Merry 			 (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
1359130f4520SKenneth D. Merry 	}
1360130f4520SKenneth D. Merry 	return (error);
1361130f4520SKenneth D. Merry }
1362130f4520SKenneth D. Merry 
1363130f4520SKenneth D. Merry static int
1364130f4520SKenneth D. Merry ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1365130f4520SKenneth D. Merry {
1366130f4520SKenneth D. Merry 	struct ctl_lun_create_params *params;
1367130f4520SKenneth D. Merry 	struct vattr		      vattr;
1368130f4520SKenneth D. Merry 	struct cdev		     *dev;
1369130f4520SKenneth D. Merry 	struct cdevsw		     *devsw;
1370130f4520SKenneth D. Merry 	int			      error;
1371130f4520SKenneth D. Merry 
1372130f4520SKenneth D. Merry 	params = &req->reqdata.create;
1373130f4520SKenneth D. Merry 
1374130f4520SKenneth D. Merry 	be_lun->dev_type = CTL_BE_BLOCK_DEV;
1375130f4520SKenneth D. Merry 	be_lun->dispatch = ctl_be_block_dispatch_dev;
1376130f4520SKenneth D. Merry 	be_lun->lun_flush = ctl_be_block_flush_dev;
1377130f4520SKenneth D. Merry 	be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
1378130f4520SKenneth D. Merry 	be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
1379130f4520SKenneth D. Merry 					     &be_lun->backend.dev.dev_ref);
1380130f4520SKenneth D. Merry 	if (be_lun->backend.dev.csw == NULL)
1381130f4520SKenneth D. Merry 		panic("Unable to retrieve device switch");
1382130f4520SKenneth D. Merry 
1383130f4520SKenneth D. Merry 	error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
1384130f4520SKenneth D. Merry 	if (error) {
1385130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1386130f4520SKenneth D. Merry 			 "%s: error getting vnode attributes for device %s",
1387130f4520SKenneth D. Merry 			 __func__, be_lun->dev_path);
1388130f4520SKenneth D. Merry 		return (error);
1389130f4520SKenneth D. Merry 	}
1390130f4520SKenneth D. Merry 
1391130f4520SKenneth D. Merry 	dev = be_lun->vn->v_rdev;
1392130f4520SKenneth D. Merry 	devsw = dev->si_devsw;
1393130f4520SKenneth D. Merry 	if (!devsw->d_ioctl) {
1394130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1395130f4520SKenneth D. Merry 			 "%s: no d_ioctl for device %s!", __func__,
1396130f4520SKenneth D. Merry 			 be_lun->dev_path);
1397130f4520SKenneth D. Merry 		return (ENODEV);
1398130f4520SKenneth D. Merry 	}
1399130f4520SKenneth D. Merry 
1400130f4520SKenneth D. Merry 	error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
1401130f4520SKenneth D. Merry 			       (caddr_t)&be_lun->blocksize, FREAD,
1402130f4520SKenneth D. Merry 			       curthread);
1403130f4520SKenneth D. Merry 	if (error) {
1404130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1405130f4520SKenneth D. Merry 			 "%s: error %d returned for DIOCGSECTORSIZE ioctl "
1406130f4520SKenneth D. Merry 			 "on %s!", __func__, error, be_lun->dev_path);
1407130f4520SKenneth D. Merry 		return (error);
1408130f4520SKenneth D. Merry 	}
1409130f4520SKenneth D. Merry 
1410130f4520SKenneth D. Merry 	/*
1411130f4520SKenneth D. Merry 	 * If the user has asked for a blocksize that is greater than the
1412130f4520SKenneth D. Merry 	 * backing device's blocksize, we can do it only if the blocksize
1413130f4520SKenneth D. Merry 	 * the user is asking for is an even multiple of the underlying
1414130f4520SKenneth D. Merry 	 * device's blocksize.
1415130f4520SKenneth D. Merry 	 */
1416130f4520SKenneth D. Merry 	if ((params->blocksize_bytes != 0)
1417130f4520SKenneth D. Merry 	 && (params->blocksize_bytes > be_lun->blocksize)) {
1418130f4520SKenneth D. Merry 		uint32_t bs_multiple, tmp_blocksize;
1419130f4520SKenneth D. Merry 
1420130f4520SKenneth D. Merry 		bs_multiple = params->blocksize_bytes / be_lun->blocksize;
1421130f4520SKenneth D. Merry 
1422130f4520SKenneth D. Merry 		tmp_blocksize = bs_multiple * be_lun->blocksize;
1423130f4520SKenneth D. Merry 
1424130f4520SKenneth D. Merry 		if (tmp_blocksize == params->blocksize_bytes) {
1425130f4520SKenneth D. Merry 			be_lun->blocksize = params->blocksize_bytes;
1426130f4520SKenneth D. Merry 		} else {
1427130f4520SKenneth D. Merry 			snprintf(req->error_str, sizeof(req->error_str),
1428130f4520SKenneth D. Merry 				 "%s: requested blocksize %u is not an even "
1429130f4520SKenneth D. Merry 				 "multiple of backing device blocksize %u",
1430130f4520SKenneth D. Merry 				 __func__, params->blocksize_bytes,
1431130f4520SKenneth D. Merry 				 be_lun->blocksize);
1432130f4520SKenneth D. Merry 			return (EINVAL);
1433130f4520SKenneth D. Merry 
1434130f4520SKenneth D. Merry 		}
1435130f4520SKenneth D. Merry 	} else if ((params->blocksize_bytes != 0)
1436130f4520SKenneth D. Merry 		&& (params->blocksize_bytes != be_lun->blocksize)) {
1437130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1438130f4520SKenneth D. Merry 			 "%s: requested blocksize %u < backing device "
1439130f4520SKenneth D. Merry 			 "blocksize %u", __func__, params->blocksize_bytes,
1440130f4520SKenneth D. Merry 			 be_lun->blocksize);
1441130f4520SKenneth D. Merry 		return (EINVAL);
1442130f4520SKenneth D. Merry 	}
1443130f4520SKenneth D. Merry 
1444130f4520SKenneth D. Merry 	error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
1445130f4520SKenneth D. Merry 			       (caddr_t)&be_lun->size_bytes, FREAD,
1446130f4520SKenneth D. Merry 			       curthread);
1447130f4520SKenneth D. Merry 	if (error) {
1448130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1449130f4520SKenneth D. Merry 			 "%s: error %d returned for DIOCGMEDIASIZE ioctl "
1450130f4520SKenneth D. Merry 			 "on %s!", __func__, error, be_lun->dev_path);
1451130f4520SKenneth D. Merry 		return (error);
1452130f4520SKenneth D. Merry 	}
1453130f4520SKenneth D. Merry 
1454130f4520SKenneth D. Merry 	return (0);
1455130f4520SKenneth D. Merry 
1456130f4520SKenneth D. Merry }
1457130f4520SKenneth D. Merry 
1458130f4520SKenneth D. Merry 
1459130f4520SKenneth D. Merry static int
1460130f4520SKenneth D. Merry ctl_be_block_close(struct ctl_be_block_lun *be_lun)
1461130f4520SKenneth D. Merry {
1462130f4520SKenneth D. Merry 	DROP_GIANT();
1463130f4520SKenneth D. Merry 	if (be_lun->vn) {
1464130f4520SKenneth D. Merry 		int flags = FREAD | FWRITE;
1465130f4520SKenneth D. Merry 		int vfs_is_locked = 0;
1466130f4520SKenneth D. Merry 
1467130f4520SKenneth D. Merry 		switch (be_lun->dev_type) {
1468130f4520SKenneth D. Merry 		case CTL_BE_BLOCK_DEV:
1469130f4520SKenneth D. Merry 			if (be_lun->backend.dev.csw) {
1470130f4520SKenneth D. Merry 				dev_relthread(be_lun->backend.dev.cdev,
1471130f4520SKenneth D. Merry 					      be_lun->backend.dev.dev_ref);
1472130f4520SKenneth D. Merry 				be_lun->backend.dev.csw  = NULL;
1473130f4520SKenneth D. Merry 				be_lun->backend.dev.cdev = NULL;
1474130f4520SKenneth D. Merry 			}
1475130f4520SKenneth D. Merry 			break;
1476130f4520SKenneth D. Merry 		case CTL_BE_BLOCK_FILE:
1477130f4520SKenneth D. Merry 			vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
1478130f4520SKenneth D. Merry 			break;
1479130f4520SKenneth D. Merry 		case CTL_BE_BLOCK_NONE:
1480130f4520SKenneth D. Merry 		default:
1481130f4520SKenneth D. Merry 			panic("Unexpected backend type.");
1482130f4520SKenneth D. Merry 			break;
1483130f4520SKenneth D. Merry 		}
1484130f4520SKenneth D. Merry 
1485130f4520SKenneth D. Merry 		(void)vn_close(be_lun->vn, flags, NOCRED, curthread);
1486130f4520SKenneth D. Merry 		be_lun->vn = NULL;
1487130f4520SKenneth D. Merry 
1488130f4520SKenneth D. Merry 		switch (be_lun->dev_type) {
1489130f4520SKenneth D. Merry 		case CTL_BE_BLOCK_DEV:
1490130f4520SKenneth D. Merry 			break;
1491130f4520SKenneth D. Merry 		case CTL_BE_BLOCK_FILE:
1492130f4520SKenneth D. Merry 			VFS_UNLOCK_GIANT(vfs_is_locked);
1493130f4520SKenneth D. Merry 			if (be_lun->backend.file.cred != NULL) {
1494130f4520SKenneth D. Merry 				crfree(be_lun->backend.file.cred);
1495130f4520SKenneth D. Merry 				be_lun->backend.file.cred = NULL;
1496130f4520SKenneth D. Merry 			}
1497130f4520SKenneth D. Merry 			break;
1498130f4520SKenneth D. Merry 		case CTL_BE_BLOCK_NONE:
1499130f4520SKenneth D. Merry 		default:
1500130f4520SKenneth D. Merry 			panic("Unexpected backend type.");
1501130f4520SKenneth D. Merry 			break;
1502130f4520SKenneth D. Merry 		}
1503130f4520SKenneth D. Merry 	}
1504130f4520SKenneth D. Merry 	PICKUP_GIANT();
1505130f4520SKenneth D. Merry 
1506130f4520SKenneth D. Merry 	return (0);
1507130f4520SKenneth D. Merry }
1508130f4520SKenneth D. Merry 
1509130f4520SKenneth D. Merry static int
1510130f4520SKenneth D. Merry ctl_be_block_open(struct ctl_be_block_softc *softc,
1511130f4520SKenneth D. Merry 		       struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1512130f4520SKenneth D. Merry {
1513130f4520SKenneth D. Merry 	struct nameidata nd;
1514130f4520SKenneth D. Merry 	int		 flags;
1515130f4520SKenneth D. Merry 	int		 error;
1516130f4520SKenneth D. Merry 	int		 vfs_is_locked;
1517130f4520SKenneth D. Merry 
1518130f4520SKenneth D. Merry 	/*
1519130f4520SKenneth D. Merry 	 * XXX KDM allow a read-only option?
1520130f4520SKenneth D. Merry 	 */
1521130f4520SKenneth D. Merry 	flags = FREAD | FWRITE;
1522130f4520SKenneth D. Merry 	error = 0;
1523130f4520SKenneth D. Merry 
1524130f4520SKenneth D. Merry 	if (rootvnode == NULL) {
1525130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1526130f4520SKenneth D. Merry 			 "%s: Root filesystem is not mounted", __func__);
1527130f4520SKenneth D. Merry 		return (1);
1528130f4520SKenneth D. Merry 	}
1529130f4520SKenneth D. Merry 
1530130f4520SKenneth D. Merry 	if (!curthread->td_proc->p_fd->fd_cdir) {
1531130f4520SKenneth D. Merry 		curthread->td_proc->p_fd->fd_cdir = rootvnode;
1532130f4520SKenneth D. Merry 		VREF(rootvnode);
1533130f4520SKenneth D. Merry 	}
1534130f4520SKenneth D. Merry 	if (!curthread->td_proc->p_fd->fd_rdir) {
1535130f4520SKenneth D. Merry 		curthread->td_proc->p_fd->fd_rdir = rootvnode;
1536130f4520SKenneth D. Merry 		VREF(rootvnode);
1537130f4520SKenneth D. Merry 	}
1538130f4520SKenneth D. Merry 	if (!curthread->td_proc->p_fd->fd_jdir) {
1539130f4520SKenneth D. Merry 		curthread->td_proc->p_fd->fd_jdir = rootvnode;
1540130f4520SKenneth D. Merry 		VREF(rootvnode);
1541130f4520SKenneth D. Merry 	}
1542130f4520SKenneth D. Merry 
1543130f4520SKenneth D. Merry  again:
1544130f4520SKenneth D. Merry 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
1545130f4520SKenneth D. Merry 	error = vn_open(&nd, &flags, 0, NULL);
1546130f4520SKenneth D. Merry 	if (error) {
1547130f4520SKenneth D. Merry 		/*
1548130f4520SKenneth D. Merry 		 * This is the only reasonable guess we can make as far as
1549130f4520SKenneth D. Merry 		 * path if the user doesn't give us a fully qualified path.
1550130f4520SKenneth D. Merry 		 * If they want to specify a file, they need to specify the
1551130f4520SKenneth D. Merry 		 * full path.
1552130f4520SKenneth D. Merry 		 */
1553130f4520SKenneth D. Merry 		if (be_lun->dev_path[0] != '/') {
1554130f4520SKenneth D. Merry 			char *dev_path = "/dev/";
1555130f4520SKenneth D. Merry 			char *dev_name;
1556130f4520SKenneth D. Merry 
1557130f4520SKenneth D. Merry 			/* Try adding device path at beginning of name */
1558130f4520SKenneth D. Merry 			dev_name = malloc(strlen(be_lun->dev_path)
1559130f4520SKenneth D. Merry 					+ strlen(dev_path) + 1,
1560130f4520SKenneth D. Merry 					  M_CTLBLK, M_WAITOK);
1561130f4520SKenneth D. Merry 			if (dev_name) {
1562130f4520SKenneth D. Merry 				sprintf(dev_name, "%s%s", dev_path,
1563130f4520SKenneth D. Merry 					be_lun->dev_path);
1564130f4520SKenneth D. Merry 				free(be_lun->dev_path, M_CTLBLK);
1565130f4520SKenneth D. Merry 				be_lun->dev_path = dev_name;
1566130f4520SKenneth D. Merry 				goto again;
1567130f4520SKenneth D. Merry 			}
1568130f4520SKenneth D. Merry 		}
1569130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1570130f4520SKenneth D. Merry 			 "%s: error opening %s", __func__, be_lun->dev_path);
1571130f4520SKenneth D. Merry 		return (error);
1572130f4520SKenneth D. Merry 	}
1573130f4520SKenneth D. Merry 
1574130f4520SKenneth D. Merry 	vfs_is_locked = NDHASGIANT(&nd);
1575130f4520SKenneth D. Merry 
1576130f4520SKenneth D. Merry 	NDFREE(&nd, NDF_ONLY_PNBUF);
1577130f4520SKenneth D. Merry 
1578130f4520SKenneth D. Merry 	be_lun->vn = nd.ni_vp;
1579130f4520SKenneth D. Merry 
1580130f4520SKenneth D. Merry 	/* We only support disks and files. */
1581130f4520SKenneth D. Merry 	if (vn_isdisk(be_lun->vn, &error)) {
1582130f4520SKenneth D. Merry 		error = ctl_be_block_open_dev(be_lun, req);
1583130f4520SKenneth D. Merry 	} else if (be_lun->vn->v_type == VREG) {
1584130f4520SKenneth D. Merry 		error = ctl_be_block_open_file(be_lun, req);
1585130f4520SKenneth D. Merry 	} else {
1586130f4520SKenneth D. Merry 		error = EINVAL;
1587130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1588130f4520SKenneth D. Merry 			 "%s is not a disk or file", be_lun->dev_path);
1589130f4520SKenneth D. Merry 	}
1590130f4520SKenneth D. Merry 	VOP_UNLOCK(be_lun->vn, 0);
1591130f4520SKenneth D. Merry 	VFS_UNLOCK_GIANT(vfs_is_locked);
1592130f4520SKenneth D. Merry 
1593130f4520SKenneth D. Merry 	if (error != 0) {
1594130f4520SKenneth D. Merry 		ctl_be_block_close(be_lun);
1595130f4520SKenneth D. Merry 		return (error);
1596130f4520SKenneth D. Merry 	}
1597130f4520SKenneth D. Merry 
1598130f4520SKenneth D. Merry 	be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
1599130f4520SKenneth D. Merry 	be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
1600130f4520SKenneth D. Merry 
1601130f4520SKenneth D. Merry 	return (0);
1602130f4520SKenneth D. Merry 
1603130f4520SKenneth D. Merry }
1604130f4520SKenneth D. Merry 
1605130f4520SKenneth D. Merry static int
1606130f4520SKenneth D. Merry ctl_be_block_mem_ctor(void *mem, int size, void *arg, int flags)
1607130f4520SKenneth D. Merry {
1608130f4520SKenneth D. Merry 	return (0);
1609130f4520SKenneth D. Merry }
1610130f4520SKenneth D. Merry 
1611130f4520SKenneth D. Merry static void
1612130f4520SKenneth D. Merry ctl_be_block_mem_dtor(void *mem, int size, void *arg)
1613130f4520SKenneth D. Merry {
1614130f4520SKenneth D. Merry 	bzero(mem, size);
1615130f4520SKenneth D. Merry }
1616130f4520SKenneth D. Merry 
1617130f4520SKenneth D. Merry static int
1618130f4520SKenneth D. Merry ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1619130f4520SKenneth D. Merry {
1620130f4520SKenneth D. Merry 	struct ctl_be_block_lun *be_lun;
1621130f4520SKenneth D. Merry 	struct ctl_lun_create_params *params;
1622130f4520SKenneth D. Merry 	struct ctl_be_arg *file_arg;
1623130f4520SKenneth D. Merry 	char tmpstr[32];
1624130f4520SKenneth D. Merry 	int retval, num_threads;
1625130f4520SKenneth D. Merry 	int i;
1626130f4520SKenneth D. Merry 
1627130f4520SKenneth D. Merry 	params = &req->reqdata.create;
1628130f4520SKenneth D. Merry 	retval = 0;
1629130f4520SKenneth D. Merry 
1630130f4520SKenneth D. Merry 	num_threads = cbb_num_threads;
1631130f4520SKenneth D. Merry 
1632130f4520SKenneth D. Merry 	file_arg = NULL;
1633130f4520SKenneth D. Merry 
1634130f4520SKenneth D. Merry 	be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
1635130f4520SKenneth D. Merry 
1636130f4520SKenneth D. Merry 	if (be_lun == NULL) {
1637130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1638130f4520SKenneth D. Merry 			 "%s: error allocating %zd bytes", __func__,
1639130f4520SKenneth D. Merry 			 sizeof(*be_lun));
1640130f4520SKenneth D. Merry 		goto bailout_error;
1641130f4520SKenneth D. Merry 	}
1642130f4520SKenneth D. Merry 
1643130f4520SKenneth D. Merry 	be_lun->softc = softc;
1644130f4520SKenneth D. Merry 	STAILQ_INIT(&be_lun->input_queue);
1645130f4520SKenneth D. Merry 	STAILQ_INIT(&be_lun->config_write_queue);
1646130f4520SKenneth D. Merry 	STAILQ_INIT(&be_lun->datamove_queue);
1647130f4520SKenneth D. Merry 	sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
1648130f4520SKenneth D. Merry 	mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
1649130f4520SKenneth D. Merry 
1650130f4520SKenneth D. Merry 	be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS,
1651130f4520SKenneth D. Merry 	    ctl_be_block_mem_ctor, ctl_be_block_mem_dtor, NULL, NULL,
1652130f4520SKenneth D. Merry 	    /*align*/ 0, /*flags*/0);
1653130f4520SKenneth D. Merry 
1654130f4520SKenneth D. Merry 	if (be_lun->lun_zone == NULL) {
1655130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1656130f4520SKenneth D. Merry 			 "%s: error allocating UMA zone", __func__);
1657130f4520SKenneth D. Merry 		goto bailout_error;
1658130f4520SKenneth D. Merry 	}
1659130f4520SKenneth D. Merry 
1660130f4520SKenneth D. Merry 	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
1661130f4520SKenneth D. Merry 		be_lun->ctl_be_lun.lun_type = params->device_type;
1662130f4520SKenneth D. Merry 	else
1663130f4520SKenneth D. Merry 		be_lun->ctl_be_lun.lun_type = T_DIRECT;
1664130f4520SKenneth D. Merry 
1665130f4520SKenneth D. Merry 	if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
1666130f4520SKenneth D. Merry 		for (i = 0; i < req->num_be_args; i++) {
1667130f4520SKenneth D. Merry 			if (strcmp(req->kern_be_args[i].name, "file") == 0) {
1668130f4520SKenneth D. Merry 				file_arg = &req->kern_be_args[i];
1669130f4520SKenneth D. Merry 				break;
1670130f4520SKenneth D. Merry 			}
1671130f4520SKenneth D. Merry 		}
1672130f4520SKenneth D. Merry 
1673130f4520SKenneth D. Merry 		if (file_arg == NULL) {
1674130f4520SKenneth D. Merry 			snprintf(req->error_str, sizeof(req->error_str),
1675130f4520SKenneth D. Merry 				 "%s: no file argument specified", __func__);
1676130f4520SKenneth D. Merry 			goto bailout_error;
1677130f4520SKenneth D. Merry 		}
1678130f4520SKenneth D. Merry 
1679130f4520SKenneth D. Merry 		be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK,
1680130f4520SKenneth D. Merry 					  M_WAITOK | M_ZERO);
1681130f4520SKenneth D. Merry 		if (be_lun->dev_path == NULL) {
1682130f4520SKenneth D. Merry 			snprintf(req->error_str, sizeof(req->error_str),
1683130f4520SKenneth D. Merry 				 "%s: error allocating %d bytes", __func__,
1684130f4520SKenneth D. Merry 				 file_arg->vallen);
1685130f4520SKenneth D. Merry 			goto bailout_error;
1686130f4520SKenneth D. Merry 		}
1687130f4520SKenneth D. Merry 
1688130f4520SKenneth D. Merry 		strlcpy(be_lun->dev_path, (char *)file_arg->value,
1689130f4520SKenneth D. Merry 			file_arg->vallen);
1690130f4520SKenneth D. Merry 
1691130f4520SKenneth D. Merry 		retval = ctl_be_block_open(softc, be_lun, req);
1692130f4520SKenneth D. Merry 		if (retval != 0) {
1693130f4520SKenneth D. Merry 			retval = 0;
1694130f4520SKenneth D. Merry 			goto bailout_error;
1695130f4520SKenneth D. Merry 		}
1696130f4520SKenneth D. Merry 
1697130f4520SKenneth D. Merry 		/*
1698130f4520SKenneth D. Merry 		 * Tell the user the size of the file/device.
1699130f4520SKenneth D. Merry 		 */
1700130f4520SKenneth D. Merry 		params->lun_size_bytes = be_lun->size_bytes;
1701130f4520SKenneth D. Merry 
1702130f4520SKenneth D. Merry 		/*
1703130f4520SKenneth D. Merry 		 * The maximum LBA is the size - 1.
1704130f4520SKenneth D. Merry 		 */
1705130f4520SKenneth D. Merry 		be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
1706130f4520SKenneth D. Merry 	} else {
1707130f4520SKenneth D. Merry 		/*
1708130f4520SKenneth D. Merry 		 * For processor devices, we don't have any size.
1709130f4520SKenneth D. Merry 		 */
1710130f4520SKenneth D. Merry 		be_lun->blocksize = 0;
1711130f4520SKenneth D. Merry 		be_lun->size_blocks = 0;
1712130f4520SKenneth D. Merry 		be_lun->size_bytes = 0;
1713130f4520SKenneth D. Merry 		be_lun->ctl_be_lun.maxlba = 0;
1714130f4520SKenneth D. Merry 		params->lun_size_bytes = 0;
1715130f4520SKenneth D. Merry 
1716130f4520SKenneth D. Merry 		/*
1717130f4520SKenneth D. Merry 		 * Default to just 1 thread for processor devices.
1718130f4520SKenneth D. Merry 		 */
1719130f4520SKenneth D. Merry 		num_threads = 1;
1720130f4520SKenneth D. Merry 	}
1721130f4520SKenneth D. Merry 
1722130f4520SKenneth D. Merry 	/*
1723130f4520SKenneth D. Merry 	 * XXX This searching loop might be refactored to be combined with
1724130f4520SKenneth D. Merry 	 * the loop above,
1725130f4520SKenneth D. Merry 	 */
1726130f4520SKenneth D. Merry 	for (i = 0; i < req->num_be_args; i++) {
1727130f4520SKenneth D. Merry 		if (strcmp(req->kern_be_args[i].name, "num_threads") == 0) {
1728130f4520SKenneth D. Merry 			struct ctl_be_arg *thread_arg;
1729130f4520SKenneth D. Merry 			char num_thread_str[16];
1730130f4520SKenneth D. Merry 			int tmp_num_threads;
1731130f4520SKenneth D. Merry 
1732130f4520SKenneth D. Merry 
1733130f4520SKenneth D. Merry 			thread_arg = &req->kern_be_args[i];
1734130f4520SKenneth D. Merry 
1735130f4520SKenneth D. Merry 			strlcpy(num_thread_str, (char *)thread_arg->value,
1736130f4520SKenneth D. Merry 				min(thread_arg->vallen,
1737130f4520SKenneth D. Merry 				sizeof(num_thread_str)));
1738130f4520SKenneth D. Merry 
1739130f4520SKenneth D. Merry 			tmp_num_threads = strtol(num_thread_str, NULL, 0);
1740130f4520SKenneth D. Merry 
1741130f4520SKenneth D. Merry 			/*
1742130f4520SKenneth D. Merry 			 * We don't let the user specify less than one
1743130f4520SKenneth D. Merry 			 * thread, but hope he's clueful enough not to
1744130f4520SKenneth D. Merry 			 * specify 1000 threads.
1745130f4520SKenneth D. Merry 			 */
1746130f4520SKenneth D. Merry 			if (tmp_num_threads < 1) {
1747130f4520SKenneth D. Merry 				snprintf(req->error_str, sizeof(req->error_str),
1748130f4520SKenneth D. Merry 					 "%s: invalid number of threads %s",
1749130f4520SKenneth D. Merry 				         __func__, num_thread_str);
1750130f4520SKenneth D. Merry 				goto bailout_error;
1751130f4520SKenneth D. Merry 			}
1752130f4520SKenneth D. Merry 
1753130f4520SKenneth D. Merry 			num_threads = tmp_num_threads;
1754130f4520SKenneth D. Merry 		}
1755130f4520SKenneth D. Merry 	}
1756130f4520SKenneth D. Merry 
1757130f4520SKenneth D. Merry 	be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
1758130f4520SKenneth D. Merry 	be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
1759130f4520SKenneth D. Merry 	be_lun->ctl_be_lun.be_lun = be_lun;
1760130f4520SKenneth D. Merry 	be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
1761130f4520SKenneth D. Merry 	/* Tell the user the blocksize we ended up using */
1762130f4520SKenneth D. Merry 	params->blocksize_bytes = be_lun->blocksize;
1763130f4520SKenneth D. Merry 	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1764130f4520SKenneth D. Merry 		be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
1765130f4520SKenneth D. Merry 		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
1766130f4520SKenneth D. Merry 	} else
1767130f4520SKenneth D. Merry 		be_lun->ctl_be_lun.req_lun_id = 0;
1768130f4520SKenneth D. Merry 
1769130f4520SKenneth D. Merry 	be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
1770130f4520SKenneth D. Merry 	be_lun->ctl_be_lun.lun_config_status =
1771130f4520SKenneth D. Merry 		ctl_be_block_lun_config_status;
1772130f4520SKenneth D. Merry 	be_lun->ctl_be_lun.be = &ctl_be_block_driver;
1773130f4520SKenneth D. Merry 
1774130f4520SKenneth D. Merry 	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1775130f4520SKenneth D. Merry 		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
1776130f4520SKenneth D. Merry 			 softc->num_luns);
1777130f4520SKenneth D. Merry 		strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
1778130f4520SKenneth D. Merry 			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1779130f4520SKenneth D. Merry 			sizeof(tmpstr)));
1780130f4520SKenneth D. Merry 
1781130f4520SKenneth D. Merry 		/* Tell the user what we used for a serial number */
1782130f4520SKenneth D. Merry 		strncpy((char *)params->serial_num, tmpstr,
1783130f4520SKenneth D. Merry 			ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
1784130f4520SKenneth D. Merry 	} else {
1785130f4520SKenneth D. Merry 		strncpy((char *)be_lun->ctl_be_lun.serial_num,
1786130f4520SKenneth D. Merry 			params->serial_num,
1787130f4520SKenneth D. Merry 			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1788130f4520SKenneth D. Merry 			sizeof(params->serial_num)));
1789130f4520SKenneth D. Merry 	}
1790130f4520SKenneth D. Merry 	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1791130f4520SKenneth D. Merry 		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
1792130f4520SKenneth D. Merry 		strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
1793130f4520SKenneth D. Merry 			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1794130f4520SKenneth D. Merry 			sizeof(tmpstr)));
1795130f4520SKenneth D. Merry 
1796130f4520SKenneth D. Merry 		/* Tell the user what we used for a device ID */
1797130f4520SKenneth D. Merry 		strncpy((char *)params->device_id, tmpstr,
1798130f4520SKenneth D. Merry 			ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
1799130f4520SKenneth D. Merry 	} else {
1800130f4520SKenneth D. Merry 		strncpy((char *)be_lun->ctl_be_lun.device_id,
1801130f4520SKenneth D. Merry 			params->device_id,
1802130f4520SKenneth D. Merry 			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1803130f4520SKenneth D. Merry 				sizeof(params->device_id)));
1804130f4520SKenneth D. Merry 	}
1805130f4520SKenneth D. Merry 
1806130f4520SKenneth D. Merry 	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
1807130f4520SKenneth D. Merry 
1808130f4520SKenneth D. Merry 	be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
1809130f4520SKenneth D. Merry 	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1810130f4520SKenneth D. Merry 
1811130f4520SKenneth D. Merry 	if (be_lun->io_taskqueue == NULL) {
1812130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1813130f4520SKenneth D. Merry 			 "%s: Unable to create taskqueue", __func__);
1814130f4520SKenneth D. Merry 		goto bailout_error;
1815130f4520SKenneth D. Merry 	}
1816130f4520SKenneth D. Merry 
1817130f4520SKenneth D. Merry 	/*
1818130f4520SKenneth D. Merry 	 * Note that we start the same number of threads by default for
1819130f4520SKenneth D. Merry 	 * both the file case and the block device case.  For the file
1820130f4520SKenneth D. Merry 	 * case, we need multiple threads to allow concurrency, because the
1821130f4520SKenneth D. Merry 	 * vnode interface is designed to be a blocking interface.  For the
1822130f4520SKenneth D. Merry 	 * block device case, ZFS zvols at least will block the caller's
1823130f4520SKenneth D. Merry 	 * context in many instances, and so we need multiple threads to
1824130f4520SKenneth D. Merry 	 * overcome that problem.  Other block devices don't need as many
1825130f4520SKenneth D. Merry 	 * threads, but they shouldn't cause too many problems.
1826130f4520SKenneth D. Merry 	 *
1827130f4520SKenneth D. Merry 	 * If the user wants to just have a single thread for a block
1828130f4520SKenneth D. Merry 	 * device, he can specify that when the LUN is created, or change
1829130f4520SKenneth D. Merry 	 * the tunable/sysctl to alter the default number of threads.
1830130f4520SKenneth D. Merry 	 */
1831130f4520SKenneth D. Merry 	retval = taskqueue_start_threads(&be_lun->io_taskqueue,
1832130f4520SKenneth D. Merry 					 /*num threads*/num_threads,
1833130f4520SKenneth D. Merry 					 /*priority*/PWAIT,
1834130f4520SKenneth D. Merry 					 /*thread name*/
1835130f4520SKenneth D. Merry 					 "%s taskq", be_lun->lunname);
1836130f4520SKenneth D. Merry 
1837130f4520SKenneth D. Merry 	if (retval != 0)
1838130f4520SKenneth D. Merry 		goto bailout_error;
1839130f4520SKenneth D. Merry 
1840130f4520SKenneth D. Merry 	be_lun->num_threads = num_threads;
1841130f4520SKenneth D. Merry 
1842130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
1843130f4520SKenneth D. Merry 	softc->num_luns++;
1844130f4520SKenneth D. Merry 	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
1845130f4520SKenneth D. Merry 
1846130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
1847130f4520SKenneth D. Merry 
1848130f4520SKenneth D. Merry 	retval = ctl_add_lun(&be_lun->ctl_be_lun);
1849130f4520SKenneth D. Merry 	if (retval != 0) {
1850130f4520SKenneth D. Merry 		mtx_lock(&softc->lock);
1851130f4520SKenneth D. Merry 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1852130f4520SKenneth D. Merry 			      links);
1853130f4520SKenneth D. Merry 		softc->num_luns--;
1854130f4520SKenneth D. Merry 		mtx_unlock(&softc->lock);
1855130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1856130f4520SKenneth D. Merry 			 "%s: ctl_add_lun() returned error %d, see dmesg for "
1857130f4520SKenneth D. Merry 			"details", __func__, retval);
1858130f4520SKenneth D. Merry 		retval = 0;
1859130f4520SKenneth D. Merry 		goto bailout_error;
1860130f4520SKenneth D. Merry 	}
1861130f4520SKenneth D. Merry 
1862130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
1863130f4520SKenneth D. Merry 
1864130f4520SKenneth D. Merry 	/*
1865130f4520SKenneth D. Merry 	 * Tell the config_status routine that we're waiting so it won't
1866130f4520SKenneth D. Merry 	 * clean up the LUN in the event of an error.
1867130f4520SKenneth D. Merry 	 */
1868130f4520SKenneth D. Merry 	be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1869130f4520SKenneth D. Merry 
1870130f4520SKenneth D. Merry 	while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
1871130f4520SKenneth D. Merry 		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1872130f4520SKenneth D. Merry 		if (retval == EINTR)
1873130f4520SKenneth D. Merry 			break;
1874130f4520SKenneth D. Merry 	}
1875130f4520SKenneth D. Merry 	be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1876130f4520SKenneth D. Merry 
1877130f4520SKenneth D. Merry 	if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
1878130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1879130f4520SKenneth D. Merry 			 "%s: LUN configuration error, see dmesg for details",
1880130f4520SKenneth D. Merry 			 __func__);
1881130f4520SKenneth D. Merry 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1882130f4520SKenneth D. Merry 			      links);
1883130f4520SKenneth D. Merry 		softc->num_luns--;
1884130f4520SKenneth D. Merry 		mtx_unlock(&softc->lock);
1885130f4520SKenneth D. Merry 		goto bailout_error;
1886130f4520SKenneth D. Merry 	} else {
1887130f4520SKenneth D. Merry 		params->req_lun_id = be_lun->ctl_be_lun.lun_id;
1888130f4520SKenneth D. Merry 	}
1889130f4520SKenneth D. Merry 
1890130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
1891130f4520SKenneth D. Merry 
1892130f4520SKenneth D. Merry 	be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
1893130f4520SKenneth D. Merry 					       be_lun->blocksize,
1894130f4520SKenneth D. Merry 					       DEVSTAT_ALL_SUPPORTED,
1895130f4520SKenneth D. Merry 					       be_lun->ctl_be_lun.lun_type
1896130f4520SKenneth D. Merry 					       | DEVSTAT_TYPE_IF_OTHER,
1897130f4520SKenneth D. Merry 					       DEVSTAT_PRIORITY_OTHER);
1898130f4520SKenneth D. Merry 
1899130f4520SKenneth D. Merry 
1900130f4520SKenneth D. Merry 	req->status = CTL_LUN_OK;
1901130f4520SKenneth D. Merry 
1902130f4520SKenneth D. Merry 	return (retval);
1903130f4520SKenneth D. Merry 
1904130f4520SKenneth D. Merry bailout_error:
1905130f4520SKenneth D. Merry 	req->status = CTL_LUN_ERROR;
1906130f4520SKenneth D. Merry 
1907130f4520SKenneth D. Merry 	ctl_be_block_close(be_lun);
1908130f4520SKenneth D. Merry 
1909130f4520SKenneth D. Merry 	free(be_lun->dev_path, M_CTLBLK);
1910130f4520SKenneth D. Merry 	free(be_lun, M_CTLBLK);
1911130f4520SKenneth D. Merry 
1912130f4520SKenneth D. Merry 	return (retval);
1913130f4520SKenneth D. Merry }
1914130f4520SKenneth D. Merry 
1915130f4520SKenneth D. Merry static int
1916130f4520SKenneth D. Merry ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1917130f4520SKenneth D. Merry {
1918130f4520SKenneth D. Merry 	struct ctl_lun_rm_params *params;
1919130f4520SKenneth D. Merry 	struct ctl_be_block_lun *be_lun;
1920130f4520SKenneth D. Merry 	int retval;
1921130f4520SKenneth D. Merry 
1922130f4520SKenneth D. Merry 	params = &req->reqdata.rm;
1923130f4520SKenneth D. Merry 
1924130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
1925130f4520SKenneth D. Merry 
1926130f4520SKenneth D. Merry 	be_lun = NULL;
1927130f4520SKenneth D. Merry 
1928130f4520SKenneth D. Merry 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
1929130f4520SKenneth D. Merry 		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
1930130f4520SKenneth D. Merry 			break;
1931130f4520SKenneth D. Merry 	}
1932130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
1933130f4520SKenneth D. Merry 
1934130f4520SKenneth D. Merry 	if (be_lun == NULL) {
1935130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1936130f4520SKenneth D. Merry 			 "%s: LUN %u is not managed by the block backend",
1937130f4520SKenneth D. Merry 			 __func__, params->lun_id);
1938130f4520SKenneth D. Merry 		goto bailout_error;
1939130f4520SKenneth D. Merry 	}
1940130f4520SKenneth D. Merry 
1941130f4520SKenneth D. Merry 	retval = ctl_disable_lun(&be_lun->ctl_be_lun);
1942130f4520SKenneth D. Merry 
1943130f4520SKenneth D. Merry 	if (retval != 0) {
1944130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1945130f4520SKenneth D. Merry 			 "%s: error %d returned from ctl_disable_lun() for "
1946130f4520SKenneth D. Merry 			 "LUN %d", __func__, retval, params->lun_id);
1947130f4520SKenneth D. Merry 		goto bailout_error;
1948130f4520SKenneth D. Merry 
1949130f4520SKenneth D. Merry 	}
1950130f4520SKenneth D. Merry 
1951130f4520SKenneth D. Merry 	retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
1952130f4520SKenneth D. Merry 	if (retval != 0) {
1953130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1954130f4520SKenneth D. Merry 			 "%s: error %d returned from ctl_invalidate_lun() for "
1955130f4520SKenneth D. Merry 			 "LUN %d", __func__, retval, params->lun_id);
1956130f4520SKenneth D. Merry 		goto bailout_error;
1957130f4520SKenneth D. Merry 	}
1958130f4520SKenneth D. Merry 
1959130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
1960130f4520SKenneth D. Merry 
1961130f4520SKenneth D. Merry 	be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1962130f4520SKenneth D. Merry 
1963130f4520SKenneth D. Merry 	while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1964130f4520SKenneth D. Merry                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1965130f4520SKenneth D. Merry                 if (retval == EINTR)
1966130f4520SKenneth D. Merry                         break;
1967130f4520SKenneth D. Merry         }
1968130f4520SKenneth D. Merry 
1969130f4520SKenneth D. Merry 	be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1970130f4520SKenneth D. Merry 
1971130f4520SKenneth D. Merry 	if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1972130f4520SKenneth D. Merry 		snprintf(req->error_str, sizeof(req->error_str),
1973130f4520SKenneth D. Merry 			 "%s: interrupted waiting for LUN to be freed",
1974130f4520SKenneth D. Merry 			 __func__);
1975130f4520SKenneth D. Merry 		mtx_unlock(&softc->lock);
1976130f4520SKenneth D. Merry 		goto bailout_error;
1977130f4520SKenneth D. Merry 	}
1978130f4520SKenneth D. Merry 
1979130f4520SKenneth D. Merry 	STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links);
1980130f4520SKenneth D. Merry 
1981130f4520SKenneth D. Merry 	softc->num_luns--;
1982130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
1983130f4520SKenneth D. Merry 
1984130f4520SKenneth D. Merry 	taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
1985130f4520SKenneth D. Merry 
1986130f4520SKenneth D. Merry 	taskqueue_free(be_lun->io_taskqueue);
1987130f4520SKenneth D. Merry 
1988130f4520SKenneth D. Merry 	ctl_be_block_close(be_lun);
1989130f4520SKenneth D. Merry 
1990130f4520SKenneth D. Merry 	if (be_lun->disk_stats != NULL)
1991130f4520SKenneth D. Merry 		devstat_remove_entry(be_lun->disk_stats);
1992130f4520SKenneth D. Merry 
1993130f4520SKenneth D. Merry 	uma_zdestroy(be_lun->lun_zone);
1994130f4520SKenneth D. Merry 
1995130f4520SKenneth D. Merry 	free(be_lun->dev_path, M_CTLBLK);
1996130f4520SKenneth D. Merry 
1997130f4520SKenneth D. Merry 	free(be_lun, M_CTLBLK);
1998130f4520SKenneth D. Merry 
1999130f4520SKenneth D. Merry 	req->status = CTL_LUN_OK;
2000130f4520SKenneth D. Merry 
2001130f4520SKenneth D. Merry 	return (0);
2002130f4520SKenneth D. Merry 
2003130f4520SKenneth D. Merry bailout_error:
2004130f4520SKenneth D. Merry 
2005130f4520SKenneth D. Merry 	req->status = CTL_LUN_ERROR;
2006130f4520SKenneth D. Merry 
2007130f4520SKenneth D. Merry 	return (0);
2008130f4520SKenneth D. Merry }
2009130f4520SKenneth D. Merry 
2010130f4520SKenneth D. Merry static void
2011130f4520SKenneth D. Merry ctl_be_block_lun_shutdown(void *be_lun)
2012130f4520SKenneth D. Merry {
2013130f4520SKenneth D. Merry 	struct ctl_be_block_lun *lun;
2014130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
2015130f4520SKenneth D. Merry 
2016130f4520SKenneth D. Merry 	lun = (struct ctl_be_block_lun *)be_lun;
2017130f4520SKenneth D. Merry 
2018130f4520SKenneth D. Merry 	softc = lun->softc;
2019130f4520SKenneth D. Merry 
2020130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
2021130f4520SKenneth D. Merry 	lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
2022130f4520SKenneth D. Merry 	if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2023130f4520SKenneth D. Merry 		wakeup(lun);
2024130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
2025130f4520SKenneth D. Merry 
2026130f4520SKenneth D. Merry }
2027130f4520SKenneth D. Merry 
2028130f4520SKenneth D. Merry static void
2029130f4520SKenneth D. Merry ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status)
2030130f4520SKenneth D. Merry {
2031130f4520SKenneth D. Merry 	struct ctl_be_block_lun *lun;
2032130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
2033130f4520SKenneth D. Merry 
2034130f4520SKenneth D. Merry 	lun = (struct ctl_be_block_lun *)be_lun;
2035130f4520SKenneth D. Merry 	softc = lun->softc;
2036130f4520SKenneth D. Merry 
2037130f4520SKenneth D. Merry 	if (status == CTL_LUN_CONFIG_OK) {
2038130f4520SKenneth D. Merry 		mtx_lock(&softc->lock);
2039130f4520SKenneth D. Merry 		lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2040130f4520SKenneth D. Merry 		if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2041130f4520SKenneth D. Merry 			wakeup(lun);
2042130f4520SKenneth D. Merry 		mtx_unlock(&softc->lock);
2043130f4520SKenneth D. Merry 
2044130f4520SKenneth D. Merry 		/*
2045130f4520SKenneth D. Merry 		 * We successfully added the LUN, attempt to enable it.
2046130f4520SKenneth D. Merry 		 */
2047130f4520SKenneth D. Merry 		if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
2048130f4520SKenneth D. Merry 			printf("%s: ctl_enable_lun() failed!\n", __func__);
2049130f4520SKenneth D. Merry 			if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
2050130f4520SKenneth D. Merry 				printf("%s: ctl_invalidate_lun() failed!\n",
2051130f4520SKenneth D. Merry 				       __func__);
2052130f4520SKenneth D. Merry 			}
2053130f4520SKenneth D. Merry 		}
2054130f4520SKenneth D. Merry 
2055130f4520SKenneth D. Merry 		return;
2056130f4520SKenneth D. Merry 	}
2057130f4520SKenneth D. Merry 
2058130f4520SKenneth D. Merry 
2059130f4520SKenneth D. Merry 	mtx_lock(&softc->lock);
2060130f4520SKenneth D. Merry 	lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2061130f4520SKenneth D. Merry 	lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR;
2062130f4520SKenneth D. Merry 	wakeup(lun);
2063130f4520SKenneth D. Merry 	mtx_unlock(&softc->lock);
2064130f4520SKenneth D. Merry }
2065130f4520SKenneth D. Merry 
2066130f4520SKenneth D. Merry 
2067130f4520SKenneth D. Merry static int
2068130f4520SKenneth D. Merry ctl_be_block_config_write(union ctl_io *io)
2069130f4520SKenneth D. Merry {
2070130f4520SKenneth D. Merry 	struct ctl_be_block_lun *be_lun;
2071130f4520SKenneth D. Merry 	struct ctl_be_lun *ctl_be_lun;
2072130f4520SKenneth D. Merry 	int retval;
2073130f4520SKenneth D. Merry 
2074130f4520SKenneth D. Merry 	retval = 0;
2075130f4520SKenneth D. Merry 
2076130f4520SKenneth D. Merry 	DPRINTF("entered\n");
2077130f4520SKenneth D. Merry 
2078130f4520SKenneth D. Merry 	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
2079130f4520SKenneth D. Merry 		CTL_PRIV_BACKEND_LUN].ptr;
2080130f4520SKenneth D. Merry 	be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
2081130f4520SKenneth D. Merry 
2082130f4520SKenneth D. Merry 	switch (io->scsiio.cdb[0]) {
2083130f4520SKenneth D. Merry 	case SYNCHRONIZE_CACHE:
2084130f4520SKenneth D. Merry 	case SYNCHRONIZE_CACHE_16:
2085130f4520SKenneth D. Merry 		/*
2086130f4520SKenneth D. Merry 		 * The upper level CTL code will filter out any CDBs with
2087130f4520SKenneth D. Merry 		 * the immediate bit set and return the proper error.
2088130f4520SKenneth D. Merry 		 *
2089130f4520SKenneth D. Merry 		 * We don't really need to worry about what LBA range the
2090130f4520SKenneth D. Merry 		 * user asked to be synced out.  When they issue a sync
2091130f4520SKenneth D. Merry 		 * cache command, we'll sync out the whole thing.
2092130f4520SKenneth D. Merry 		 */
2093130f4520SKenneth D. Merry 		mtx_lock(&be_lun->lock);
2094130f4520SKenneth D. Merry 		STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
2095130f4520SKenneth D. Merry 				   links);
2096130f4520SKenneth D. Merry 		mtx_unlock(&be_lun->lock);
2097130f4520SKenneth D. Merry 		taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
2098130f4520SKenneth D. Merry 		break;
2099130f4520SKenneth D. Merry 	case START_STOP_UNIT: {
2100130f4520SKenneth D. Merry 		struct scsi_start_stop_unit *cdb;
2101130f4520SKenneth D. Merry 
2102130f4520SKenneth D. Merry 		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
2103130f4520SKenneth D. Merry 
2104130f4520SKenneth D. Merry 		if (cdb->how & SSS_START)
2105130f4520SKenneth D. Merry 			retval = ctl_start_lun(ctl_be_lun);
2106130f4520SKenneth D. Merry 		else {
2107130f4520SKenneth D. Merry 			retval = ctl_stop_lun(ctl_be_lun);
2108130f4520SKenneth D. Merry 			/*
2109130f4520SKenneth D. Merry 			 * XXX KDM Copan-specific offline behavior.
2110130f4520SKenneth D. Merry 			 * Figure out a reasonable way to port this?
2111130f4520SKenneth D. Merry 			 */
2112130f4520SKenneth D. Merry #ifdef NEEDTOPORT
2113130f4520SKenneth D. Merry 			if ((retval == 0)
2114130f4520SKenneth D. Merry 			 && (cdb->byte2 & SSS_ONOFFLINE))
2115130f4520SKenneth D. Merry 				retval = ctl_lun_offline(ctl_be_lun);
2116130f4520SKenneth D. Merry #endif
2117130f4520SKenneth D. Merry 		}
2118130f4520SKenneth D. Merry 
2119130f4520SKenneth D. Merry 		/*
2120130f4520SKenneth D. Merry 		 * In general, the above routines should not fail.  They
2121130f4520SKenneth D. Merry 		 * just set state for the LUN.  So we've got something
2122130f4520SKenneth D. Merry 		 * pretty wrong here if we can't start or stop the LUN.
2123130f4520SKenneth D. Merry 		 */
2124130f4520SKenneth D. Merry 		if (retval != 0) {
2125130f4520SKenneth D. Merry 			ctl_set_internal_failure(&io->scsiio,
2126130f4520SKenneth D. Merry 						 /*sks_valid*/ 1,
2127130f4520SKenneth D. Merry 						 /*retry_count*/ 0xf051);
2128130f4520SKenneth D. Merry 			retval = CTL_RETVAL_COMPLETE;
2129130f4520SKenneth D. Merry 		} else {
2130130f4520SKenneth D. Merry 			ctl_set_success(&io->scsiio);
2131130f4520SKenneth D. Merry 		}
2132130f4520SKenneth D. Merry 		ctl_config_write_done(io);
2133130f4520SKenneth D. Merry 		break;
2134130f4520SKenneth D. Merry 	}
2135130f4520SKenneth D. Merry 	default:
2136130f4520SKenneth D. Merry 		ctl_set_invalid_opcode(&io->scsiio);
2137130f4520SKenneth D. Merry 		ctl_config_write_done(io);
2138130f4520SKenneth D. Merry 		retval = CTL_RETVAL_COMPLETE;
2139130f4520SKenneth D. Merry 		break;
2140130f4520SKenneth D. Merry 	}
2141130f4520SKenneth D. Merry 
2142130f4520SKenneth D. Merry 	return (retval);
2143130f4520SKenneth D. Merry 
2144130f4520SKenneth D. Merry }
2145130f4520SKenneth D. Merry 
2146130f4520SKenneth D. Merry static int
2147130f4520SKenneth D. Merry ctl_be_block_config_read(union ctl_io *io)
2148130f4520SKenneth D. Merry {
2149130f4520SKenneth D. Merry 	return (0);
2150130f4520SKenneth D. Merry }
2151130f4520SKenneth D. Merry 
2152130f4520SKenneth D. Merry static int
2153130f4520SKenneth D. Merry ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
2154130f4520SKenneth D. Merry {
2155130f4520SKenneth D. Merry 	struct ctl_be_block_lun *lun;
2156130f4520SKenneth D. Merry 	int retval;
2157130f4520SKenneth D. Merry 
2158130f4520SKenneth D. Merry 	lun = (struct ctl_be_block_lun *)be_lun;
2159130f4520SKenneth D. Merry 	retval = 0;
2160130f4520SKenneth D. Merry 
2161130f4520SKenneth D. Merry 	retval = sbuf_printf(sb, "<num_threads>");
2162130f4520SKenneth D. Merry 
2163130f4520SKenneth D. Merry 	if (retval != 0)
2164130f4520SKenneth D. Merry 		goto bailout;
2165130f4520SKenneth D. Merry 
2166130f4520SKenneth D. Merry 	retval = sbuf_printf(sb, "%d", lun->num_threads);
2167130f4520SKenneth D. Merry 
2168130f4520SKenneth D. Merry 	if (retval != 0)
2169130f4520SKenneth D. Merry 		goto bailout;
2170130f4520SKenneth D. Merry 
2171130f4520SKenneth D. Merry 	retval = sbuf_printf(sb, "</num_threads>");
2172130f4520SKenneth D. Merry 
2173130f4520SKenneth D. Merry 	/*
2174130f4520SKenneth D. Merry 	 * For processor devices, we don't have a path variable.
2175130f4520SKenneth D. Merry 	 */
2176130f4520SKenneth D. Merry 	if ((retval != 0)
2177130f4520SKenneth D. Merry 	 || (lun->dev_path == NULL))
2178130f4520SKenneth D. Merry 		goto bailout;
2179130f4520SKenneth D. Merry 
2180130f4520SKenneth D. Merry 	retval = sbuf_printf(sb, "<file>");
2181130f4520SKenneth D. Merry 
2182130f4520SKenneth D. Merry 	if (retval != 0)
2183130f4520SKenneth D. Merry 		goto bailout;
2184130f4520SKenneth D. Merry 
2185130f4520SKenneth D. Merry 	retval = ctl_sbuf_printf_esc(sb, lun->dev_path);
2186130f4520SKenneth D. Merry 
2187130f4520SKenneth D. Merry 	if (retval != 0)
2188130f4520SKenneth D. Merry 		goto bailout;
2189130f4520SKenneth D. Merry 
2190130f4520SKenneth D. Merry 	retval = sbuf_printf(sb, "</file>\n");
2191130f4520SKenneth D. Merry 
2192130f4520SKenneth D. Merry bailout:
2193130f4520SKenneth D. Merry 
2194130f4520SKenneth D. Merry 	return (retval);
2195130f4520SKenneth D. Merry }
2196130f4520SKenneth D. Merry 
2197130f4520SKenneth D. Merry int
2198130f4520SKenneth D. Merry ctl_be_block_init(void)
2199130f4520SKenneth D. Merry {
2200130f4520SKenneth D. Merry 	struct ctl_be_block_softc *softc;
2201130f4520SKenneth D. Merry 	int retval;
2202130f4520SKenneth D. Merry 
2203130f4520SKenneth D. Merry 	softc = &backend_block_softc;
2204130f4520SKenneth D. Merry 	retval = 0;
2205130f4520SKenneth D. Merry 
2206130f4520SKenneth D. Merry 	mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF);
2207130f4520SKenneth D. Merry 	STAILQ_INIT(&softc->beio_free_queue);
2208130f4520SKenneth D. Merry 	STAILQ_INIT(&softc->disk_list);
2209130f4520SKenneth D. Merry 	STAILQ_INIT(&softc->lun_list);
2210130f4520SKenneth D. Merry 	ctl_grow_beio(softc, 200);
2211130f4520SKenneth D. Merry 
2212130f4520SKenneth D. Merry 	return (retval);
2213130f4520SKenneth D. Merry }
2214