xref: /freebsd/sys/cam/scsi/scsi_pass.c (revision a9934668aa1322c0754a300570e1b559e6bd7b04)
1898b0535SWarner Losh /*-
23393f8daSKenneth D. Merry  * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
32a888f93SKenneth D. Merry  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
476babe50SJustin T. Gibbs  * All rights reserved.
576babe50SJustin T. Gibbs  *
676babe50SJustin T. Gibbs  * Redistribution and use in source and binary forms, with or without
776babe50SJustin T. Gibbs  * modification, are permitted provided that the following conditions
876babe50SJustin T. Gibbs  * are met:
976babe50SJustin T. Gibbs  * 1. Redistributions of source code must retain the above copyright
1076babe50SJustin T. Gibbs  *    notice, this list of conditions, and the following disclaimer,
1176babe50SJustin T. Gibbs  *    without modification, immediately at the beginning of the file.
1276babe50SJustin T. Gibbs  * 2. The name of the author may not be used to endorse or promote products
1376babe50SJustin T. Gibbs  *    derived from this software without specific prior written permission.
1476babe50SJustin T. Gibbs  *
1576babe50SJustin T. Gibbs  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1676babe50SJustin T. Gibbs  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1776babe50SJustin T. Gibbs  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1876babe50SJustin T. Gibbs  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1976babe50SJustin T. Gibbs  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2076babe50SJustin T. Gibbs  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2176babe50SJustin T. Gibbs  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2276babe50SJustin T. Gibbs  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2376babe50SJustin T. Gibbs  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2476babe50SJustin T. Gibbs  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2576babe50SJustin T. Gibbs  * SUCH DAMAGE.
2676babe50SJustin T. Gibbs  */
2776babe50SJustin T. Gibbs 
28ee709e70SDavid E. O'Brien #include <sys/cdefs.h>
29ee709e70SDavid E. O'Brien __FBSDID("$FreeBSD$");
30ee709e70SDavid E. O'Brien 
31*a9934668SKenneth D. Merry #include "opt_kdtrace.h"
32*a9934668SKenneth D. Merry 
3376babe50SJustin T. Gibbs #include <sys/param.h>
3476babe50SJustin T. Gibbs #include <sys/systm.h>
3576babe50SJustin T. Gibbs #include <sys/kernel.h>
36*a9934668SKenneth D. Merry #include <sys/conf.h>
3776babe50SJustin T. Gibbs #include <sys/types.h>
389626b608SPoul-Henning Kamp #include <sys/bio.h>
39*a9934668SKenneth D. Merry #include <sys/bus.h>
4076babe50SJustin T. Gibbs #include <sys/devicestat.h>
41*a9934668SKenneth D. Merry #include <sys/errno.h>
42*a9934668SKenneth D. Merry #include <sys/fcntl.h>
43*a9934668SKenneth D. Merry #include <sys/malloc.h>
44f7312ca2SRobert Watson #include <sys/proc.h>
45*a9934668SKenneth D. Merry #include <sys/poll.h>
46*a9934668SKenneth D. Merry #include <sys/selinfo.h>
47*a9934668SKenneth D. Merry #include <sys/sdt.h>
48416494d7SJustin T. Gibbs #include <sys/taskqueue.h>
49*a9934668SKenneth D. Merry #include <vm/uma.h>
50*a9934668SKenneth D. Merry #include <vm/vm.h>
51*a9934668SKenneth D. Merry #include <vm/vm_extern.h>
52*a9934668SKenneth D. Merry 
53*a9934668SKenneth D. Merry #include <machine/bus.h>
5476babe50SJustin T. Gibbs 
5576babe50SJustin T. Gibbs #include <cam/cam.h>
5676babe50SJustin T. Gibbs #include <cam/cam_ccb.h>
5776babe50SJustin T. Gibbs #include <cam/cam_periph.h>
583393f8daSKenneth D. Merry #include <cam/cam_queue.h>
59*a9934668SKenneth D. Merry #include <cam/cam_xpt.h>
6076babe50SJustin T. Gibbs #include <cam/cam_xpt_periph.h>
6176babe50SJustin T. Gibbs #include <cam/cam_debug.h>
6225a2902cSScott Long #include <cam/cam_compat.h>
63*a9934668SKenneth D. Merry #include <cam/cam_xpt_periph.h>
6476babe50SJustin T. Gibbs 
6576babe50SJustin T. Gibbs #include <cam/scsi/scsi_all.h>
6676babe50SJustin T. Gibbs #include <cam/scsi/scsi_pass.h>
6776babe50SJustin T. Gibbs 
6876babe50SJustin T. Gibbs typedef enum {
6976babe50SJustin T. Gibbs 	PASS_FLAG_OPEN			= 0x01,
7076babe50SJustin T. Gibbs 	PASS_FLAG_LOCKED		= 0x02,
71ea37f519SKenneth D. Merry 	PASS_FLAG_INVALID		= 0x04,
72*a9934668SKenneth D. Merry 	PASS_FLAG_INITIAL_PHYSPATH	= 0x08,
73*a9934668SKenneth D. Merry 	PASS_FLAG_ZONE_INPROG		= 0x10,
74*a9934668SKenneth D. Merry 	PASS_FLAG_ZONE_VALID		= 0x20,
75*a9934668SKenneth D. Merry 	PASS_FLAG_UNMAPPED_CAPABLE	= 0x40,
76*a9934668SKenneth D. Merry 	PASS_FLAG_ABANDONED_REF_SET	= 0x80
7776babe50SJustin T. Gibbs } pass_flags;
7876babe50SJustin T. Gibbs 
7976babe50SJustin T. Gibbs typedef enum {
8076babe50SJustin T. Gibbs 	PASS_STATE_NORMAL
8176babe50SJustin T. Gibbs } pass_state;
8276babe50SJustin T. Gibbs 
8376babe50SJustin T. Gibbs typedef enum {
84*a9934668SKenneth D. Merry 	PASS_CCB_BUFFER_IO,
85*a9934668SKenneth D. Merry 	PASS_CCB_QUEUED_IO
8676babe50SJustin T. Gibbs } pass_ccb_types;
8776babe50SJustin T. Gibbs 
8876babe50SJustin T. Gibbs #define ccb_type	ppriv_field0
89*a9934668SKenneth D. Merry #define ccb_ioreq	ppriv_ptr1
90*a9934668SKenneth D. Merry 
91*a9934668SKenneth D. Merry /*
92*a9934668SKenneth D. Merry  * The maximum number of memory segments we preallocate.
93*a9934668SKenneth D. Merry  */
94*a9934668SKenneth D. Merry #define	PASS_MAX_SEGS	16
95*a9934668SKenneth D. Merry 
96*a9934668SKenneth D. Merry typedef enum {
97*a9934668SKenneth D. Merry 	PASS_IO_NONE		= 0x00,
98*a9934668SKenneth D. Merry 	PASS_IO_USER_SEG_MALLOC	= 0x01,
99*a9934668SKenneth D. Merry 	PASS_IO_KERN_SEG_MALLOC	= 0x02,
100*a9934668SKenneth D. Merry 	PASS_IO_ABANDONED	= 0x04
101*a9934668SKenneth D. Merry } pass_io_flags;
102*a9934668SKenneth D. Merry 
103*a9934668SKenneth D. Merry struct pass_io_req {
104*a9934668SKenneth D. Merry 	union ccb			 ccb;
105*a9934668SKenneth D. Merry 	union ccb			*alloced_ccb;
106*a9934668SKenneth D. Merry 	union ccb			*user_ccb_ptr;
107*a9934668SKenneth D. Merry 	camq_entry			 user_periph_links;
108*a9934668SKenneth D. Merry 	ccb_ppriv_area			 user_periph_priv;
109*a9934668SKenneth D. Merry 	struct cam_periph_map_info	 mapinfo;
110*a9934668SKenneth D. Merry 	pass_io_flags			 flags;
111*a9934668SKenneth D. Merry 	ccb_flags			 data_flags;
112*a9934668SKenneth D. Merry 	int				 num_user_segs;
113*a9934668SKenneth D. Merry 	bus_dma_segment_t		 user_segs[PASS_MAX_SEGS];
114*a9934668SKenneth D. Merry 	int				 num_kern_segs;
115*a9934668SKenneth D. Merry 	bus_dma_segment_t		 kern_segs[PASS_MAX_SEGS];
116*a9934668SKenneth D. Merry 	bus_dma_segment_t		*user_segptr;
117*a9934668SKenneth D. Merry 	bus_dma_segment_t		*kern_segptr;
118*a9934668SKenneth D. Merry 	int				 num_bufs;
119*a9934668SKenneth D. Merry 	uint32_t			 dirs[CAM_PERIPH_MAXMAPS];
120*a9934668SKenneth D. Merry 	uint32_t			 lengths[CAM_PERIPH_MAXMAPS];
121*a9934668SKenneth D. Merry 	uint8_t				*user_bufs[CAM_PERIPH_MAXMAPS];
122*a9934668SKenneth D. Merry 	uint8_t				*kern_bufs[CAM_PERIPH_MAXMAPS];
123*a9934668SKenneth D. Merry 	struct bintime			 start_time;
124*a9934668SKenneth D. Merry 	TAILQ_ENTRY(pass_io_req)	 links;
125*a9934668SKenneth D. Merry };
12676babe50SJustin T. Gibbs 
12776babe50SJustin T. Gibbs struct pass_softc {
12876babe50SJustin T. Gibbs 	pass_state		  state;
12976babe50SJustin T. Gibbs 	pass_flags		  flags;
13076babe50SJustin T. Gibbs 	u_int8_t		  pd_type;
13176babe50SJustin T. Gibbs 	union ccb		  saved_ccb;
13286d45c7fSKenneth D. Merry 	int			  open_count;
133de239312SAlexander Motin 	u_int		 	  maxio;
134a9d2245eSPoul-Henning Kamp 	struct devstat		 *device_stats;
13589c9c53dSPoul-Henning Kamp 	struct cdev		 *dev;
136416494d7SJustin T. Gibbs 	struct cdev		 *alias_dev;
137416494d7SJustin T. Gibbs 	struct task		  add_physpath_task;
138*a9934668SKenneth D. Merry 	struct task		  shutdown_kqueue_task;
139*a9934668SKenneth D. Merry 	struct selinfo		  read_select;
140*a9934668SKenneth D. Merry 	TAILQ_HEAD(, pass_io_req) incoming_queue;
141*a9934668SKenneth D. Merry 	TAILQ_HEAD(, pass_io_req) active_queue;
142*a9934668SKenneth D. Merry 	TAILQ_HEAD(, pass_io_req) abandoned_queue;
143*a9934668SKenneth D. Merry 	TAILQ_HEAD(, pass_io_req) done_queue;
144*a9934668SKenneth D. Merry 	struct cam_periph	 *periph;
145*a9934668SKenneth D. Merry 	char			  zone_name[12];
146*a9934668SKenneth D. Merry 	char			  io_zone_name[12];
147*a9934668SKenneth D. Merry 	uma_zone_t		  pass_zone;
148*a9934668SKenneth D. Merry 	uma_zone_t		  pass_io_zone;
149*a9934668SKenneth D. Merry 	size_t			  io_zone_size;
15076babe50SJustin T. Gibbs };
15176babe50SJustin T. Gibbs 
15276babe50SJustin T. Gibbs static	d_open_t	passopen;
15376babe50SJustin T. Gibbs static	d_close_t	passclose;
15476babe50SJustin T. Gibbs static	d_ioctl_t	passioctl;
15525a2902cSScott Long static	d_ioctl_t	passdoioctl;
156*a9934668SKenneth D. Merry static	d_poll_t	passpoll;
157*a9934668SKenneth D. Merry static	d_kqfilter_t	passkqfilter;
158*a9934668SKenneth D. Merry static	void		passreadfiltdetach(struct knote *kn);
159*a9934668SKenneth D. Merry static	int		passreadfilt(struct knote *kn, long hint);
16076babe50SJustin T. Gibbs 
16176babe50SJustin T. Gibbs static	periph_init_t	passinit;
16276babe50SJustin T. Gibbs static	periph_ctor_t	passregister;
163ee9c90c7SKenneth D. Merry static	periph_oninv_t	passoninvalidate;
16476babe50SJustin T. Gibbs static	periph_dtor_t	passcleanup;
165*a9934668SKenneth D. Merry static	periph_start_t	passstart;
166*a9934668SKenneth D. Merry static	void		pass_shutdown_kqueue(void *context, int pending);
167416494d7SJustin T. Gibbs static	void		pass_add_physpath(void *context, int pending);
16876babe50SJustin T. Gibbs static	void		passasync(void *callback_arg, u_int32_t code,
16976babe50SJustin T. Gibbs 				  struct cam_path *path, void *arg);
170*a9934668SKenneth D. Merry static	void		passdone(struct cam_periph *periph,
171*a9934668SKenneth D. Merry 				 union ccb *done_ccb);
172*a9934668SKenneth D. Merry static	int		passcreatezone(struct cam_periph *periph);
173*a9934668SKenneth D. Merry static	void		passiocleanup(struct pass_softc *softc,
174*a9934668SKenneth D. Merry 				      struct pass_io_req *io_req);
175*a9934668SKenneth D. Merry static	int		passcopysglist(struct cam_periph *periph,
176*a9934668SKenneth D. Merry 				       struct pass_io_req *io_req,
177*a9934668SKenneth D. Merry 				       ccb_flags direction);
178*a9934668SKenneth D. Merry static	int		passmemsetup(struct cam_periph *periph,
179*a9934668SKenneth D. Merry 				     struct pass_io_req *io_req);
180*a9934668SKenneth D. Merry static	int		passmemdone(struct cam_periph *periph,
181*a9934668SKenneth D. Merry 				    struct pass_io_req *io_req);
18276babe50SJustin T. Gibbs static	int		passerror(union ccb *ccb, u_int32_t cam_flags,
18376babe50SJustin T. Gibbs 				  u_int32_t sense_flags);
18476babe50SJustin T. Gibbs static 	int		passsendccb(struct cam_periph *periph, union ccb *ccb,
18576babe50SJustin T. Gibbs 				    union ccb *inccb);
18676babe50SJustin T. Gibbs 
18776babe50SJustin T. Gibbs static struct periph_driver passdriver =
18876babe50SJustin T. Gibbs {
18976babe50SJustin T. Gibbs 	passinit, "pass",
19076babe50SJustin T. Gibbs 	TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0
19176babe50SJustin T. Gibbs };
19276babe50SJustin T. Gibbs 
1930b7c27b9SPeter Wemm PERIPHDRIVER_DECLARE(pass, passdriver);
19476babe50SJustin T. Gibbs 
1954e2f199eSPoul-Henning Kamp static struct cdevsw pass_cdevsw = {
196dc08ffecSPoul-Henning Kamp 	.d_version =	D_VERSION,
197c552ebe1SKenneth D. Merry 	.d_flags =	D_TRACKCLOSE,
1987ac40f5fSPoul-Henning Kamp 	.d_open =	passopen,
1997ac40f5fSPoul-Henning Kamp 	.d_close =	passclose,
2007ac40f5fSPoul-Henning Kamp 	.d_ioctl =	passioctl,
201*a9934668SKenneth D. Merry 	.d_poll = 	passpoll,
202*a9934668SKenneth D. Merry 	.d_kqfilter = 	passkqfilter,
2037ac40f5fSPoul-Henning Kamp 	.d_name =	"pass",
20476babe50SJustin T. Gibbs };
20576babe50SJustin T. Gibbs 
206*a9934668SKenneth D. Merry static struct filterops passread_filtops = {
207*a9934668SKenneth D. Merry 	.f_isfd	=	1,
208*a9934668SKenneth D. Merry 	.f_detach =	passreadfiltdetach,
209*a9934668SKenneth D. Merry 	.f_event =	passreadfilt
210*a9934668SKenneth D. Merry };
211*a9934668SKenneth D. Merry 
212*a9934668SKenneth D. Merry static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers");
213*a9934668SKenneth D. Merry 
21476babe50SJustin T. Gibbs static void
21576babe50SJustin T. Gibbs passinit(void)
21676babe50SJustin T. Gibbs {
21776babe50SJustin T. Gibbs 	cam_status status;
21876babe50SJustin T. Gibbs 
21976babe50SJustin T. Gibbs 	/*
22076babe50SJustin T. Gibbs 	 * Install a global async callback.  This callback will
22176babe50SJustin T. Gibbs 	 * receive async callbacks like "new device found".
22276babe50SJustin T. Gibbs 	 */
22385d92640SScott Long 	status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL);
22476babe50SJustin T. Gibbs 
22576babe50SJustin T. Gibbs 	if (status != CAM_REQ_CMP) {
22676babe50SJustin T. Gibbs 		printf("pass: Failed to attach master async callback "
22776babe50SJustin T. Gibbs 		       "due to status 0x%x!\n", status);
22876babe50SJustin T. Gibbs 	}
22976babe50SJustin T. Gibbs 
23076babe50SJustin T. Gibbs }
23176babe50SJustin T. Gibbs 
23276babe50SJustin T. Gibbs static void
233*a9934668SKenneth D. Merry passrejectios(struct cam_periph *periph)
234*a9934668SKenneth D. Merry {
235*a9934668SKenneth D. Merry 	struct pass_io_req *io_req, *io_req2;
236*a9934668SKenneth D. Merry 	struct pass_softc *softc;
237*a9934668SKenneth D. Merry 
238*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
239*a9934668SKenneth D. Merry 
240*a9934668SKenneth D. Merry 	/*
241*a9934668SKenneth D. Merry 	 * The user can no longer get status for I/O on the done queue, so
242*a9934668SKenneth D. Merry 	 * clean up all outstanding I/O on the done queue.
243*a9934668SKenneth D. Merry 	 */
244*a9934668SKenneth D. Merry 	TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
245*a9934668SKenneth D. Merry 		TAILQ_REMOVE(&softc->done_queue, io_req, links);
246*a9934668SKenneth D. Merry 		passiocleanup(softc, io_req);
247*a9934668SKenneth D. Merry 		uma_zfree(softc->pass_zone, io_req);
248*a9934668SKenneth D. Merry 	}
249*a9934668SKenneth D. Merry 
250*a9934668SKenneth D. Merry 	/*
251*a9934668SKenneth D. Merry 	 * The underlying device is gone, so we can't issue these I/Os.
252*a9934668SKenneth D. Merry 	 * The devfs node has been shut down, so we can't return status to
253*a9934668SKenneth D. Merry 	 * the user.  Free any I/O left on the incoming queue.
254*a9934668SKenneth D. Merry 	 */
255*a9934668SKenneth D. Merry 	TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) {
256*a9934668SKenneth D. Merry 		TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
257*a9934668SKenneth D. Merry 		passiocleanup(softc, io_req);
258*a9934668SKenneth D. Merry 		uma_zfree(softc->pass_zone, io_req);
259*a9934668SKenneth D. Merry 	}
260*a9934668SKenneth D. Merry 
261*a9934668SKenneth D. Merry 	/*
262*a9934668SKenneth D. Merry 	 * Normally we would put I/Os on the abandoned queue and acquire a
263*a9934668SKenneth D. Merry 	 * reference when we saw the final close.  But, the device went
264*a9934668SKenneth D. Merry 	 * away and devfs may have moved everything off to deadfs by the
265*a9934668SKenneth D. Merry 	 * time the I/O done callback is called; as a result, we won't see
266*a9934668SKenneth D. Merry 	 * any more closes.  So, if we have any active I/Os, we need to put
267*a9934668SKenneth D. Merry 	 * them on the abandoned queue.  When the abandoned queue is empty,
268*a9934668SKenneth D. Merry 	 * we'll release the remaining reference (see below) to the peripheral.
269*a9934668SKenneth D. Merry 	 */
270*a9934668SKenneth D. Merry 	TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) {
271*a9934668SKenneth D. Merry 		TAILQ_REMOVE(&softc->active_queue, io_req, links);
272*a9934668SKenneth D. Merry 		io_req->flags |= PASS_IO_ABANDONED;
273*a9934668SKenneth D. Merry 		TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links);
274*a9934668SKenneth D. Merry 	}
275*a9934668SKenneth D. Merry 
276*a9934668SKenneth D. Merry 	/*
277*a9934668SKenneth D. Merry 	 * If we put any I/O on the abandoned queue, acquire a reference.
278*a9934668SKenneth D. Merry 	 */
279*a9934668SKenneth D. Merry 	if ((!TAILQ_EMPTY(&softc->abandoned_queue))
280*a9934668SKenneth D. Merry 	 && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) {
281*a9934668SKenneth D. Merry 		cam_periph_doacquire(periph);
282*a9934668SKenneth D. Merry 		softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
283*a9934668SKenneth D. Merry 	}
284*a9934668SKenneth D. Merry }
285*a9934668SKenneth D. Merry 
286*a9934668SKenneth D. Merry static void
287ea37f519SKenneth D. Merry passdevgonecb(void *arg)
288ea37f519SKenneth D. Merry {
289ea37f519SKenneth D. Merry 	struct cam_periph *periph;
290227d67aaSAlexander Motin 	struct mtx *mtx;
29186d45c7fSKenneth D. Merry 	struct pass_softc *softc;
29286d45c7fSKenneth D. Merry 	int i;
293ea37f519SKenneth D. Merry 
294ea37f519SKenneth D. Merry 	periph = (struct cam_periph *)arg;
295227d67aaSAlexander Motin 	mtx = cam_periph_mtx(periph);
296227d67aaSAlexander Motin 	mtx_lock(mtx);
297ea37f519SKenneth D. Merry 
298227d67aaSAlexander Motin 	softc = (struct pass_softc *)periph->softc;
29986d45c7fSKenneth D. Merry 	KASSERT(softc->open_count >= 0, ("Negative open count %d",
30086d45c7fSKenneth D. Merry 		softc->open_count));
30186d45c7fSKenneth D. Merry 
30286d45c7fSKenneth D. Merry 	/*
30386d45c7fSKenneth D. Merry 	 * When we get this callback, we will get no more close calls from
30486d45c7fSKenneth D. Merry 	 * devfs.  So if we have any dangling opens, we need to release the
30586d45c7fSKenneth D. Merry 	 * reference held for that particular context.
30686d45c7fSKenneth D. Merry 	 */
30786d45c7fSKenneth D. Merry 	for (i = 0; i < softc->open_count; i++)
30886d45c7fSKenneth D. Merry 		cam_periph_release_locked(periph);
30986d45c7fSKenneth D. Merry 
31086d45c7fSKenneth D. Merry 	softc->open_count = 0;
31186d45c7fSKenneth D. Merry 
31286d45c7fSKenneth D. Merry 	/*
31386d45c7fSKenneth D. Merry 	 * Release the reference held for the device node, it is gone now.
314*a9934668SKenneth D. Merry 	 * Accordingly, inform all queued I/Os of their fate.
31586d45c7fSKenneth D. Merry 	 */
31686d45c7fSKenneth D. Merry 	cam_periph_release_locked(periph);
317*a9934668SKenneth D. Merry 	passrejectios(periph);
31886d45c7fSKenneth D. Merry 
31986d45c7fSKenneth D. Merry 	/*
320*a9934668SKenneth D. Merry 	 * We reference the SIM lock directly here, instead of using
32186d45c7fSKenneth D. Merry 	 * cam_periph_unlock().  The reason is that the final call to
32286d45c7fSKenneth D. Merry 	 * cam_periph_release_locked() above could result in the periph
32386d45c7fSKenneth D. Merry 	 * getting freed.  If that is the case, dereferencing the periph
32486d45c7fSKenneth D. Merry 	 * with a cam_periph_unlock() call would cause a page fault.
32586d45c7fSKenneth D. Merry 	 */
326227d67aaSAlexander Motin 	mtx_unlock(mtx);
327*a9934668SKenneth D. Merry 
328*a9934668SKenneth D. Merry 	/*
329*a9934668SKenneth D. Merry 	 * We have to remove our kqueue context from a thread because it
330*a9934668SKenneth D. Merry 	 * may sleep.  It would be nice if we could get a callback from
331*a9934668SKenneth D. Merry 	 * kqueue when it is done cleaning up resources.
332*a9934668SKenneth D. Merry 	 */
333*a9934668SKenneth D. Merry 	taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task);
334ea37f519SKenneth D. Merry }
335ea37f519SKenneth D. Merry 
336ea37f519SKenneth D. Merry static void
337ee9c90c7SKenneth D. Merry passoninvalidate(struct cam_periph *periph)
338ee9c90c7SKenneth D. Merry {
339ee9c90c7SKenneth D. Merry 	struct pass_softc *softc;
340ee9c90c7SKenneth D. Merry 
341ee9c90c7SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
342ee9c90c7SKenneth D. Merry 
343ee9c90c7SKenneth D. Merry 	/*
344ee9c90c7SKenneth D. Merry 	 * De-register any async callbacks.
345ee9c90c7SKenneth D. Merry 	 */
34685d92640SScott Long 	xpt_register_async(0, passasync, periph, periph->path);
347ee9c90c7SKenneth D. Merry 
348ee9c90c7SKenneth D. Merry 	softc->flags |= PASS_FLAG_INVALID;
349ee9c90c7SKenneth D. Merry 
350ee9c90c7SKenneth D. Merry 	/*
351ea37f519SKenneth D. Merry 	 * Tell devfs this device has gone away, and ask for a callback
352ea37f519SKenneth D. Merry 	 * when it has cleaned up its state.
353ea37f519SKenneth D. Merry 	 */
354ea37f519SKenneth D. Merry 	destroy_dev_sched_cb(softc->dev, passdevgonecb, periph);
355ee9c90c7SKenneth D. Merry }
356ee9c90c7SKenneth D. Merry 
357ee9c90c7SKenneth D. Merry static void
35876babe50SJustin T. Gibbs passcleanup(struct cam_periph *periph)
35976babe50SJustin T. Gibbs {
360ee9c90c7SKenneth D. Merry 	struct pass_softc *softc;
361ee9c90c7SKenneth D. Merry 
362ee9c90c7SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
363ee9c90c7SKenneth D. Merry 
364*a9934668SKenneth D. Merry 	cam_periph_assert(periph, MA_OWNED);
365*a9934668SKenneth D. Merry 	KASSERT(TAILQ_EMPTY(&softc->active_queue),
366*a9934668SKenneth D. Merry 		("%s called when there are commands on the active queue!\n",
367*a9934668SKenneth D. Merry 		__func__));
368*a9934668SKenneth D. Merry 	KASSERT(TAILQ_EMPTY(&softc->abandoned_queue),
369*a9934668SKenneth D. Merry 		("%s called when there are commands on the abandoned queue!\n",
370*a9934668SKenneth D. Merry 		__func__));
371*a9934668SKenneth D. Merry 	KASSERT(TAILQ_EMPTY(&softc->incoming_queue),
372*a9934668SKenneth D. Merry 		("%s called when there are commands on the incoming queue!\n",
373*a9934668SKenneth D. Merry 		__func__));
374*a9934668SKenneth D. Merry 	KASSERT(TAILQ_EMPTY(&softc->done_queue),
375*a9934668SKenneth D. Merry 		("%s called when there are commands on the done queue!\n",
376*a9934668SKenneth D. Merry 		__func__));
377*a9934668SKenneth D. Merry 
3785f3fed85SEdward Tomasz Napierala 	devstat_remove_entry(softc->device_stats);
379416494d7SJustin T. Gibbs 
3805f3fed85SEdward Tomasz Napierala 	cam_periph_unlock(periph);
381*a9934668SKenneth D. Merry 
382*a9934668SKenneth D. Merry 	/*
383*a9934668SKenneth D. Merry 	 * We call taskqueue_drain() for the physpath task to make sure it
384*a9934668SKenneth D. Merry 	 * is complete.  We drop the lock because this can potentially
385*a9934668SKenneth D. Merry 	 * sleep.  XXX KDM that is bad.  Need a way to get a callback when
386*a9934668SKenneth D. Merry 	 * a taskqueue is drained.
387*a9934668SKenneth D. Merry 	 *
388*a9934668SKenneth D. Merry  	 * Note that we don't drain the kqueue shutdown task queue.  This
389*a9934668SKenneth D. Merry 	 * is because we hold a reference on the periph for kqueue, and
390*a9934668SKenneth D. Merry 	 * release that reference from the kqueue shutdown task queue.  So
391*a9934668SKenneth D. Merry 	 * we cannot come into this routine unless we've released that
392*a9934668SKenneth D. Merry 	 * reference.  Also, because that could be the last reference, we
393*a9934668SKenneth D. Merry 	 * could be called from the cam_periph_release() call in
394*a9934668SKenneth D. Merry 	 * pass_shutdown_kqueue().  In that case, the taskqueue_drain()
395*a9934668SKenneth D. Merry 	 * would deadlock.  It would be preferable if we had a way to
396*a9934668SKenneth D. Merry 	 * get a callback when a taskqueue is done.
397*a9934668SKenneth D. Merry 	 */
398416494d7SJustin T. Gibbs 	taskqueue_drain(taskqueue_thread, &softc->add_physpath_task);
399416494d7SJustin T. Gibbs 
4005f3fed85SEdward Tomasz Napierala 	cam_periph_lock(periph);
401416494d7SJustin T. Gibbs 
402ee9c90c7SKenneth D. Merry 	free(softc, M_DEVBUF);
40376babe50SJustin T. Gibbs }
40476babe50SJustin T. Gibbs 
40576babe50SJustin T. Gibbs static void
406*a9934668SKenneth D. Merry pass_shutdown_kqueue(void *context, int pending)
407*a9934668SKenneth D. Merry {
408*a9934668SKenneth D. Merry 	struct cam_periph *periph;
409*a9934668SKenneth D. Merry 	struct pass_softc *softc;
410*a9934668SKenneth D. Merry 
411*a9934668SKenneth D. Merry 	periph = context;
412*a9934668SKenneth D. Merry 	softc = periph->softc;
413*a9934668SKenneth D. Merry 
414*a9934668SKenneth D. Merry 	knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0);
415*a9934668SKenneth D. Merry 	knlist_destroy(&softc->read_select.si_note);
416*a9934668SKenneth D. Merry 
417*a9934668SKenneth D. Merry 	/*
418*a9934668SKenneth D. Merry 	 * Release the reference we held for kqueue.
419*a9934668SKenneth D. Merry 	 */
420*a9934668SKenneth D. Merry 	cam_periph_release(periph);
421*a9934668SKenneth D. Merry }
422*a9934668SKenneth D. Merry 
423*a9934668SKenneth D. Merry static void
424416494d7SJustin T. Gibbs pass_add_physpath(void *context, int pending)
425416494d7SJustin T. Gibbs {
426416494d7SJustin T. Gibbs 	struct cam_periph *periph;
427416494d7SJustin T. Gibbs 	struct pass_softc *softc;
428*a9934668SKenneth D. Merry 	struct mtx *mtx;
429416494d7SJustin T. Gibbs 	char *physpath;
430416494d7SJustin T. Gibbs 
431416494d7SJustin T. Gibbs 	/*
432416494d7SJustin T. Gibbs 	 * If we have one, create a devfs alias for our
433416494d7SJustin T. Gibbs 	 * physical path.
434416494d7SJustin T. Gibbs 	 */
435416494d7SJustin T. Gibbs 	periph = context;
436416494d7SJustin T. Gibbs 	softc = periph->softc;
4376884b662SAlexander Motin 	physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK);
438*a9934668SKenneth D. Merry 	mtx = cam_periph_mtx(periph);
439*a9934668SKenneth D. Merry 	mtx_lock(mtx);
440*a9934668SKenneth D. Merry 
441*a9934668SKenneth D. Merry 	if (periph->flags & CAM_PERIPH_INVALID)
4426884b662SAlexander Motin 		goto out;
443*a9934668SKenneth D. Merry 
444416494d7SJustin T. Gibbs 	if (xpt_getattr(physpath, MAXPATHLEN,
445416494d7SJustin T. Gibbs 			"GEOM::physpath", periph->path) == 0
446416494d7SJustin T. Gibbs 	 && strlen(physpath) != 0) {
447416494d7SJustin T. Gibbs 
448*a9934668SKenneth D. Merry 		mtx_unlock(mtx);
449416494d7SJustin T. Gibbs 		make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev,
450416494d7SJustin T. Gibbs 					softc->dev, softc->alias_dev, physpath);
451*a9934668SKenneth D. Merry 		mtx_lock(mtx);
452416494d7SJustin T. Gibbs 	}
453ea37f519SKenneth D. Merry 
454*a9934668SKenneth D. Merry out:
455ea37f519SKenneth D. Merry 	/*
456ea37f519SKenneth D. Merry 	 * Now that we've made our alias, we no longer have to have a
457ea37f519SKenneth D. Merry 	 * reference to the device.
458ea37f519SKenneth D. Merry 	 */
459*a9934668SKenneth D. Merry 	if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0)
460ea37f519SKenneth D. Merry 		softc->flags |= PASS_FLAG_INITIAL_PHYSPATH;
4616884b662SAlexander Motin 
462*a9934668SKenneth D. Merry 	/*
463*a9934668SKenneth D. Merry 	 * We always acquire a reference to the periph before queueing this
464*a9934668SKenneth D. Merry 	 * task queue function, so it won't go away before we run.
465*a9934668SKenneth D. Merry 	 */
466*a9934668SKenneth D. Merry 	while (pending-- > 0)
467*a9934668SKenneth D. Merry 		cam_periph_release_locked(periph);
468*a9934668SKenneth D. Merry 	mtx_unlock(mtx);
469*a9934668SKenneth D. Merry 
4706884b662SAlexander Motin 	free(physpath, M_DEVBUF);
471416494d7SJustin T. Gibbs }
472416494d7SJustin T. Gibbs 
473416494d7SJustin T. Gibbs static void
47476babe50SJustin T. Gibbs passasync(void *callback_arg, u_int32_t code,
47576babe50SJustin T. Gibbs 	  struct cam_path *path, void *arg)
47676babe50SJustin T. Gibbs {
47776babe50SJustin T. Gibbs 	struct cam_periph *periph;
47876babe50SJustin T. Gibbs 
47976babe50SJustin T. Gibbs 	periph = (struct cam_periph *)callback_arg;
48076babe50SJustin T. Gibbs 
48176babe50SJustin T. Gibbs 	switch (code) {
48276babe50SJustin T. Gibbs 	case AC_FOUND_DEVICE:
48376babe50SJustin T. Gibbs 	{
48476babe50SJustin T. Gibbs 		struct ccb_getdev *cgd;
48576babe50SJustin T. Gibbs 		cam_status status;
48676babe50SJustin T. Gibbs 
48776babe50SJustin T. Gibbs 		cgd = (struct ccb_getdev *)arg;
488c5ff3b2fSMatt Jacob 		if (cgd == NULL)
489c5ff3b2fSMatt Jacob 			break;
49076babe50SJustin T. Gibbs 
49176babe50SJustin T. Gibbs 		/*
49276babe50SJustin T. Gibbs 		 * Allocate a peripheral instance for
49376babe50SJustin T. Gibbs 		 * this device and start the probe
49476babe50SJustin T. Gibbs 		 * process.
49576babe50SJustin T. Gibbs 		 */
496ee9c90c7SKenneth D. Merry 		status = cam_periph_alloc(passregister, passoninvalidate,
497*a9934668SKenneth D. Merry 					  passcleanup, passstart, "pass",
498227d67aaSAlexander Motin 					  CAM_PERIPH_BIO, path,
499ee9c90c7SKenneth D. Merry 					  passasync, AC_FOUND_DEVICE, cgd);
50076babe50SJustin T. Gibbs 
50176babe50SJustin T. Gibbs 		if (status != CAM_REQ_CMP
5023393f8daSKenneth D. Merry 		 && status != CAM_REQ_INPROG) {
5033393f8daSKenneth D. Merry 			const struct cam_status_entry *entry;
5043393f8daSKenneth D. Merry 
5053393f8daSKenneth D. Merry 			entry = cam_fetch_status_entry(status);
5063393f8daSKenneth D. Merry 
50776babe50SJustin T. Gibbs 			printf("passasync: Unable to attach new device "
5083393f8daSKenneth D. Merry 			       "due to status %#x: %s\n", status, entry ?
5093393f8daSKenneth D. Merry 			       entry->status_text : "Unknown");
5103393f8daSKenneth D. Merry 		}
51176babe50SJustin T. Gibbs 
51276babe50SJustin T. Gibbs 		break;
51376babe50SJustin T. Gibbs 	}
514416494d7SJustin T. Gibbs 	case AC_ADVINFO_CHANGED:
515416494d7SJustin T. Gibbs 	{
516416494d7SJustin T. Gibbs 		uintptr_t buftype;
517416494d7SJustin T. Gibbs 
518416494d7SJustin T. Gibbs 		buftype = (uintptr_t)arg;
519416494d7SJustin T. Gibbs 		if (buftype == CDAI_TYPE_PHYS_PATH) {
520416494d7SJustin T. Gibbs 			struct pass_softc *softc;
521*a9934668SKenneth D. Merry 			cam_status status;
522416494d7SJustin T. Gibbs 
523416494d7SJustin T. Gibbs 			softc = (struct pass_softc *)periph->softc;
524*a9934668SKenneth D. Merry 			/*
525*a9934668SKenneth D. Merry 			 * Acquire a reference to the periph before we
526*a9934668SKenneth D. Merry 			 * start the taskqueue, so that we don't run into
527*a9934668SKenneth D. Merry 			 * a situation where the periph goes away before
528*a9934668SKenneth D. Merry 			 * the task queue has a chance to run.
529*a9934668SKenneth D. Merry 			 */
530*a9934668SKenneth D. Merry 			status = cam_periph_acquire(periph);
531*a9934668SKenneth D. Merry 			if (status != CAM_REQ_CMP)
532*a9934668SKenneth D. Merry 				break;
533*a9934668SKenneth D. Merry 
534416494d7SJustin T. Gibbs 			taskqueue_enqueue(taskqueue_thread,
535416494d7SJustin T. Gibbs 					  &softc->add_physpath_task);
536416494d7SJustin T. Gibbs 		}
537416494d7SJustin T. Gibbs 		break;
538416494d7SJustin T. Gibbs 	}
53976babe50SJustin T. Gibbs 	default:
540516871c6SJustin T. Gibbs 		cam_periph_async(periph, code, path, arg);
54176babe50SJustin T. Gibbs 		break;
54276babe50SJustin T. Gibbs 	}
54376babe50SJustin T. Gibbs }
54476babe50SJustin T. Gibbs 
54576babe50SJustin T. Gibbs static cam_status
54676babe50SJustin T. Gibbs passregister(struct cam_periph *periph, void *arg)
54776babe50SJustin T. Gibbs {
54876babe50SJustin T. Gibbs 	struct pass_softc *softc;
54976babe50SJustin T. Gibbs 	struct ccb_getdev *cgd;
550b8b6b5d3SAlexander Motin 	struct ccb_pathinq cpi;
5513393f8daSKenneth D. Merry 	int    no_tags;
55276babe50SJustin T. Gibbs 
55376babe50SJustin T. Gibbs 	cgd = (struct ccb_getdev *)arg;
55476babe50SJustin T. Gibbs 	if (cgd == NULL) {
555ea37f519SKenneth D. Merry 		printf("%s: no getdev CCB, can't register device\n", __func__);
55676babe50SJustin T. Gibbs 		return(CAM_REQ_CMP_ERR);
55776babe50SJustin T. Gibbs 	}
55876babe50SJustin T. Gibbs 
55976babe50SJustin T. Gibbs 	softc = (struct pass_softc *)malloc(sizeof(*softc),
56076babe50SJustin T. Gibbs 					    M_DEVBUF, M_NOWAIT);
56176babe50SJustin T. Gibbs 
56276babe50SJustin T. Gibbs 	if (softc == NULL) {
563ea37f519SKenneth D. Merry 		printf("%s: Unable to probe new device. "
564ea37f519SKenneth D. Merry 		       "Unable to allocate softc\n", __func__);
56576babe50SJustin T. Gibbs 		return(CAM_REQ_CMP_ERR);
56676babe50SJustin T. Gibbs 	}
56776babe50SJustin T. Gibbs 
56876babe50SJustin T. Gibbs 	bzero(softc, sizeof(*softc));
56976babe50SJustin T. Gibbs 	softc->state = PASS_STATE_NORMAL;
570b8b6b5d3SAlexander Motin 	if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI)
57110b6172aSMatt Jacob 		softc->pd_type = SID_TYPE(&cgd->inq_data);
572b8b6b5d3SAlexander Motin 	else if (cgd->protocol == PROTO_SATAPM)
573b8b6b5d3SAlexander Motin 		softc->pd_type = T_ENCLOSURE;
574b8b6b5d3SAlexander Motin 	else
575b8b6b5d3SAlexander Motin 		softc->pd_type = T_DIRECT;
57676babe50SJustin T. Gibbs 
57776babe50SJustin T. Gibbs 	periph->softc = softc;
578*a9934668SKenneth D. Merry 	softc->periph = periph;
579*a9934668SKenneth D. Merry 	TAILQ_INIT(&softc->incoming_queue);
580*a9934668SKenneth D. Merry 	TAILQ_INIT(&softc->active_queue);
581*a9934668SKenneth D. Merry 	TAILQ_INIT(&softc->abandoned_queue);
582*a9934668SKenneth D. Merry 	TAILQ_INIT(&softc->done_queue);
583*a9934668SKenneth D. Merry 	snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d",
584*a9934668SKenneth D. Merry 		 periph->periph_name, periph->unit_number);
585*a9934668SKenneth D. Merry 	snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO",
586*a9934668SKenneth D. Merry 		 periph->periph_name, periph->unit_number);
587*a9934668SKenneth D. Merry 	softc->io_zone_size = MAXPHYS;
588*a9934668SKenneth D. Merry 	knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph));
5893393f8daSKenneth D. Merry 
590b8b6b5d3SAlexander Motin 	bzero(&cpi, sizeof(cpi));
591b8b6b5d3SAlexander Motin 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
592b8b6b5d3SAlexander Motin 	cpi.ccb_h.func_code = XPT_PATH_INQ;
593b8b6b5d3SAlexander Motin 	xpt_action((union ccb *)&cpi);
594b8b6b5d3SAlexander Motin 
595de239312SAlexander Motin 	if (cpi.maxio == 0)
596de239312SAlexander Motin 		softc->maxio = DFLTPHYS;	/* traditional default */
597de239312SAlexander Motin 	else if (cpi.maxio > MAXPHYS)
598de239312SAlexander Motin 		softc->maxio = MAXPHYS;		/* for safety */
599de239312SAlexander Motin 	else
600de239312SAlexander Motin 		softc->maxio = cpi.maxio;	/* real value */
601de239312SAlexander Motin 
602*a9934668SKenneth D. Merry 	if (cpi.hba_misc & PIM_UNMAPPED)
603*a9934668SKenneth D. Merry 		softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE;
604*a9934668SKenneth D. Merry 
60576babe50SJustin T. Gibbs 	/*
60676babe50SJustin T. Gibbs 	 * We pass in 0 for a blocksize, since we don't
60776babe50SJustin T. Gibbs 	 * know what the blocksize of this device is, if
60876babe50SJustin T. Gibbs 	 * it even has a blocksize.
60976babe50SJustin T. Gibbs 	 */
610edec59d9SAlexander Motin 	cam_periph_unlock(periph);
6113393f8daSKenneth D. Merry 	no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0;
612c81d2c74SMatt Jacob 	softc->device_stats = devstat_new_entry("pass",
613d3ce8327SEd Schouten 			  periph->unit_number, 0,
6143393f8daSKenneth D. Merry 			  DEVSTAT_NO_BLOCKSIZE
6153393f8daSKenneth D. Merry 			  | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0),
61610b6172aSMatt Jacob 			  softc->pd_type |
617b8b6b5d3SAlexander Motin 			  XPORT_DEVSTAT_TYPE(cpi.transport) |
6182a888f93SKenneth D. Merry 			  DEVSTAT_TYPE_PASS,
6192a888f93SKenneth D. Merry 			  DEVSTAT_PRIORITY_PASS);
62073d26919SKenneth D. Merry 
621ea37f519SKenneth D. Merry 	/*
622*a9934668SKenneth D. Merry 	 * Initialize the taskqueue handler for shutting down kqueue.
623*a9934668SKenneth D. Merry 	 */
624*a9934668SKenneth D. Merry 	TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0,
625*a9934668SKenneth D. Merry 		  pass_shutdown_kqueue, periph);
626*a9934668SKenneth D. Merry 
627*a9934668SKenneth D. Merry 	/*
628*a9934668SKenneth D. Merry 	 * Acquire a reference to the periph that we can release once we've
629*a9934668SKenneth D. Merry 	 * cleaned up the kqueue.
630*a9934668SKenneth D. Merry 	 */
631*a9934668SKenneth D. Merry 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
632*a9934668SKenneth D. Merry 		xpt_print(periph->path, "%s: lost periph during "
633*a9934668SKenneth D. Merry 			  "registration!\n", __func__);
634*a9934668SKenneth D. Merry 		cam_periph_lock(periph);
635*a9934668SKenneth D. Merry 		return (CAM_REQ_CMP_ERR);
636*a9934668SKenneth D. Merry 	}
637*a9934668SKenneth D. Merry 
638*a9934668SKenneth D. Merry 	/*
639ea37f519SKenneth D. Merry 	 * Acquire a reference to the periph before we create the devfs
640ea37f519SKenneth D. Merry 	 * instance for it.  We'll release this reference once the devfs
641ea37f519SKenneth D. Merry 	 * instance has been freed.
642ea37f519SKenneth D. Merry 	 */
643ea37f519SKenneth D. Merry 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
644ea37f519SKenneth D. Merry 		xpt_print(periph->path, "%s: lost periph during "
645ea37f519SKenneth D. Merry 			  "registration!\n", __func__);
64686d45c7fSKenneth D. Merry 		cam_periph_lock(periph);
647ea37f519SKenneth D. Merry 		return (CAM_REQ_CMP_ERR);
648ea37f519SKenneth D. Merry 	}
649ea37f519SKenneth D. Merry 
65073d26919SKenneth D. Merry 	/* Register the device */
651d3ce8327SEd Schouten 	softc->dev = make_dev(&pass_cdevsw, periph->unit_number,
652c81d2c74SMatt Jacob 			      UID_ROOT, GID_OPERATOR, 0600, "%s%d",
653c81d2c74SMatt Jacob 			      periph->periph_name, periph->unit_number);
654ea37f519SKenneth D. Merry 
655ea37f519SKenneth D. Merry 	/*
656*a9934668SKenneth D. Merry 	 * Hold a reference to the periph before we create the physical
657*a9934668SKenneth D. Merry 	 * path alias so it can't go away.
658ea37f519SKenneth D. Merry 	 */
659*a9934668SKenneth D. Merry 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
660*a9934668SKenneth D. Merry 		xpt_print(periph->path, "%s: lost periph during "
661*a9934668SKenneth D. Merry 			  "registration!\n", __func__);
662*a9934668SKenneth D. Merry 		cam_periph_lock(periph);
663*a9934668SKenneth D. Merry 		return (CAM_REQ_CMP_ERR);
664*a9934668SKenneth D. Merry 	}
665ea37f519SKenneth D. Merry 
666edec59d9SAlexander Motin 	cam_periph_lock(periph);
667e2a5fdf9SNate Lawson 	softc->dev->si_drv1 = periph;
66873d26919SKenneth D. Merry 
669416494d7SJustin T. Gibbs 	TASK_INIT(&softc->add_physpath_task, /*priority*/0,
670416494d7SJustin T. Gibbs 		  pass_add_physpath, periph);
671416494d7SJustin T. Gibbs 
67276babe50SJustin T. Gibbs 	/*
673416494d7SJustin T. Gibbs 	 * See if physical path information is already available.
67476babe50SJustin T. Gibbs 	 */
675416494d7SJustin T. Gibbs 	taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task);
676416494d7SJustin T. Gibbs 
677416494d7SJustin T. Gibbs 	/*
678416494d7SJustin T. Gibbs 	 * Add an async callback so that we get notified if
679416494d7SJustin T. Gibbs 	 * this device goes away or its physical path
680416494d7SJustin T. Gibbs 	 * (stored in the advanced info data of the EDT) has
681416494d7SJustin T. Gibbs 	 * changed.
682416494d7SJustin T. Gibbs 	 */
683416494d7SJustin T. Gibbs 	xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED,
684416494d7SJustin T. Gibbs 			   passasync, periph, periph->path);
68576babe50SJustin T. Gibbs 
68676babe50SJustin T. Gibbs 	if (bootverbose)
68776babe50SJustin T. Gibbs 		xpt_announce_periph(periph, NULL);
68876babe50SJustin T. Gibbs 
68976babe50SJustin T. Gibbs 	return(CAM_REQ_CMP);
69076babe50SJustin T. Gibbs }
69176babe50SJustin T. Gibbs 
69276babe50SJustin T. Gibbs static int
69389c9c53dSPoul-Henning Kamp passopen(struct cdev *dev, int flags, int fmt, struct thread *td)
69476babe50SJustin T. Gibbs {
69576babe50SJustin T. Gibbs 	struct cam_periph *periph;
69676babe50SJustin T. Gibbs 	struct pass_softc *softc;
697e2a5fdf9SNate Lawson 	int error;
69876babe50SJustin T. Gibbs 
699e2a5fdf9SNate Lawson 	periph = (struct cam_periph *)dev->si_drv1;
7002b83592fSScott Long 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
70176babe50SJustin T. Gibbs 		return (ENXIO);
70276babe50SJustin T. Gibbs 
7032b83592fSScott Long 	cam_periph_lock(periph);
7042b83592fSScott Long 
70576babe50SJustin T. Gibbs 	softc = (struct pass_softc *)periph->softc;
70676babe50SJustin T. Gibbs 
707ee9c90c7SKenneth D. Merry 	if (softc->flags & PASS_FLAG_INVALID) {
708c552ebe1SKenneth D. Merry 		cam_periph_release_locked(periph);
7092b83592fSScott Long 		cam_periph_unlock(periph);
71076babe50SJustin T. Gibbs 		return(ENXIO);
711ee9c90c7SKenneth D. Merry 	}
71222b9c86cSKenneth D. Merry 
71322b9c86cSKenneth D. Merry 	/*
714f5ef42beSRobert Watson 	 * Don't allow access when we're running at a high securelevel.
71522b9c86cSKenneth D. Merry 	 */
716a854ed98SJohn Baldwin 	error = securelevel_gt(td->td_ucred, 1);
717f7312ca2SRobert Watson 	if (error) {
718c552ebe1SKenneth D. Merry 		cam_periph_release_locked(periph);
7192b83592fSScott Long 		cam_periph_unlock(periph);
720f7312ca2SRobert Watson 		return(error);
72122b9c86cSKenneth D. Merry 	}
72276babe50SJustin T. Gibbs 
72376babe50SJustin T. Gibbs 	/*
72466a0780eSKenneth D. Merry 	 * Only allow read-write access.
72566a0780eSKenneth D. Merry 	 */
72622b9c86cSKenneth D. Merry 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) {
727c552ebe1SKenneth D. Merry 		cam_periph_release_locked(periph);
7282b83592fSScott Long 		cam_periph_unlock(periph);
72966a0780eSKenneth D. Merry 		return(EPERM);
73022b9c86cSKenneth D. Merry 	}
73166a0780eSKenneth D. Merry 
73266a0780eSKenneth D. Merry 	/*
73376babe50SJustin T. Gibbs 	 * We don't allow nonblocking access.
73476babe50SJustin T. Gibbs 	 */
73576babe50SJustin T. Gibbs 	if ((flags & O_NONBLOCK) != 0) {
736f0d9af51SMatt Jacob 		xpt_print(periph->path, "can't do nonblocking access\n");
737c552ebe1SKenneth D. Merry 		cam_periph_release_locked(periph);
7382b83592fSScott Long 		cam_periph_unlock(periph);
73922b9c86cSKenneth D. Merry 		return(EINVAL);
74076babe50SJustin T. Gibbs 	}
74176babe50SJustin T. Gibbs 
74286d45c7fSKenneth D. Merry 	softc->open_count++;
74386d45c7fSKenneth D. Merry 
744835187bfSScott Long 	cam_periph_unlock(periph);
74576babe50SJustin T. Gibbs 
74676babe50SJustin T. Gibbs 	return (error);
74776babe50SJustin T. Gibbs }
74876babe50SJustin T. Gibbs 
74976babe50SJustin T. Gibbs static int
75089c9c53dSPoul-Henning Kamp passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
75176babe50SJustin T. Gibbs {
75276babe50SJustin T. Gibbs 	struct 	cam_periph *periph;
75386d45c7fSKenneth D. Merry 	struct  pass_softc *softc;
754227d67aaSAlexander Motin 	struct mtx *mtx;
75576babe50SJustin T. Gibbs 
756e2a5fdf9SNate Lawson 	periph = (struct cam_periph *)dev->si_drv1;
75776babe50SJustin T. Gibbs 	if (periph == NULL)
75876babe50SJustin T. Gibbs 		return (ENXIO);
759227d67aaSAlexander Motin 	mtx = cam_periph_mtx(periph);
760227d67aaSAlexander Motin 	mtx_lock(mtx);
76176babe50SJustin T. Gibbs 
76286d45c7fSKenneth D. Merry 	softc = periph->softc;
76386d45c7fSKenneth D. Merry 	softc->open_count--;
76486d45c7fSKenneth D. Merry 
765*a9934668SKenneth D. Merry 	if (softc->open_count == 0) {
766*a9934668SKenneth D. Merry 		struct pass_io_req *io_req, *io_req2;
767*a9934668SKenneth D. Merry 		int need_unlock;
768*a9934668SKenneth D. Merry 
769*a9934668SKenneth D. Merry 		need_unlock = 0;
770*a9934668SKenneth D. Merry 
771*a9934668SKenneth D. Merry 		TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
772*a9934668SKenneth D. Merry 			TAILQ_REMOVE(&softc->done_queue, io_req, links);
773*a9934668SKenneth D. Merry 			passiocleanup(softc, io_req);
774*a9934668SKenneth D. Merry 			uma_zfree(softc->pass_zone, io_req);
775*a9934668SKenneth D. Merry 		}
776*a9934668SKenneth D. Merry 
777*a9934668SKenneth D. Merry 		TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links,
778*a9934668SKenneth D. Merry 				   io_req2) {
779*a9934668SKenneth D. Merry 			TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
780*a9934668SKenneth D. Merry 			passiocleanup(softc, io_req);
781*a9934668SKenneth D. Merry 			uma_zfree(softc->pass_zone, io_req);
782*a9934668SKenneth D. Merry 		}
783*a9934668SKenneth D. Merry 
784*a9934668SKenneth D. Merry 		/*
785*a9934668SKenneth D. Merry 		 * If there are any active I/Os, we need to forcibly acquire a
786*a9934668SKenneth D. Merry 		 * reference to the peripheral so that we don't go away
787*a9934668SKenneth D. Merry 		 * before they complete.  We'll release the reference when
788*a9934668SKenneth D. Merry 		 * the abandoned queue is empty.
789*a9934668SKenneth D. Merry 		 */
790*a9934668SKenneth D. Merry 		io_req = TAILQ_FIRST(&softc->active_queue);
791*a9934668SKenneth D. Merry 		if ((io_req != NULL)
792*a9934668SKenneth D. Merry 		 && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) {
793*a9934668SKenneth D. Merry 			cam_periph_doacquire(periph);
794*a9934668SKenneth D. Merry 			softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
795*a9934668SKenneth D. Merry 		}
796*a9934668SKenneth D. Merry 
797*a9934668SKenneth D. Merry 		/*
798*a9934668SKenneth D. Merry 		 * Since the I/O in the active queue is not under our
799*a9934668SKenneth D. Merry 		 * control, just set a flag so that we can clean it up when
800*a9934668SKenneth D. Merry 		 * it completes and put it on the abandoned queue.  This
801*a9934668SKenneth D. Merry 		 * will prevent our sending spurious completions in the
802*a9934668SKenneth D. Merry 		 * event that the device is opened again before these I/Os
803*a9934668SKenneth D. Merry 		 * complete.
804*a9934668SKenneth D. Merry 		 */
805*a9934668SKenneth D. Merry 		TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links,
806*a9934668SKenneth D. Merry 				   io_req2) {
807*a9934668SKenneth D. Merry 			TAILQ_REMOVE(&softc->active_queue, io_req, links);
808*a9934668SKenneth D. Merry 			io_req->flags |= PASS_IO_ABANDONED;
809*a9934668SKenneth D. Merry 			TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req,
810*a9934668SKenneth D. Merry 					  links);
811*a9934668SKenneth D. Merry 		}
812*a9934668SKenneth D. Merry 	}
813*a9934668SKenneth D. Merry 
81486d45c7fSKenneth D. Merry 	cam_periph_release_locked(periph);
81586d45c7fSKenneth D. Merry 
81686d45c7fSKenneth D. Merry 	/*
817227d67aaSAlexander Motin 	 * We reference the lock directly here, instead of using
81886d45c7fSKenneth D. Merry 	 * cam_periph_unlock().  The reason is that the call to
81986d45c7fSKenneth D. Merry 	 * cam_periph_release_locked() above could result in the periph
82086d45c7fSKenneth D. Merry 	 * getting freed.  If that is the case, dereferencing the periph
82186d45c7fSKenneth D. Merry 	 * with a cam_periph_unlock() call would cause a page fault.
82286d45c7fSKenneth D. Merry 	 *
82386d45c7fSKenneth D. Merry 	 * cam_periph_release() avoids this problem using the same method,
82486d45c7fSKenneth D. Merry 	 * but we're manually acquiring and dropping the lock here to
82586d45c7fSKenneth D. Merry 	 * protect the open count and avoid another lock acquisition and
82686d45c7fSKenneth D. Merry 	 * release.
82786d45c7fSKenneth D. Merry 	 */
828227d67aaSAlexander Motin 	mtx_unlock(mtx);
82976babe50SJustin T. Gibbs 
83076babe50SJustin T. Gibbs 	return (0);
83176babe50SJustin T. Gibbs }
83276babe50SJustin T. Gibbs 
833*a9934668SKenneth D. Merry 
834*a9934668SKenneth D. Merry static void
835*a9934668SKenneth D. Merry passstart(struct cam_periph *periph, union ccb *start_ccb)
836*a9934668SKenneth D. Merry {
837*a9934668SKenneth D. Merry 	struct pass_softc *softc;
838*a9934668SKenneth D. Merry 
839*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
840*a9934668SKenneth D. Merry 
841*a9934668SKenneth D. Merry 	switch (softc->state) {
842*a9934668SKenneth D. Merry 	case PASS_STATE_NORMAL: {
843*a9934668SKenneth D. Merry 		struct pass_io_req *io_req;
844*a9934668SKenneth D. Merry 
845*a9934668SKenneth D. Merry 		/*
846*a9934668SKenneth D. Merry 		 * Check for any queued I/O requests that require an
847*a9934668SKenneth D. Merry 		 * allocated slot.
848*a9934668SKenneth D. Merry 		 */
849*a9934668SKenneth D. Merry 		io_req = TAILQ_FIRST(&softc->incoming_queue);
850*a9934668SKenneth D. Merry 		if (io_req == NULL) {
851*a9934668SKenneth D. Merry 			xpt_release_ccb(start_ccb);
852*a9934668SKenneth D. Merry 			break;
853*a9934668SKenneth D. Merry 		}
854*a9934668SKenneth D. Merry 		TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
855*a9934668SKenneth D. Merry 		TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
856*a9934668SKenneth D. Merry 		/*
857*a9934668SKenneth D. Merry 		 * Merge the user's CCB into the allocated CCB.
858*a9934668SKenneth D. Merry 		 */
859*a9934668SKenneth D. Merry 		xpt_merge_ccb(start_ccb, &io_req->ccb);
860*a9934668SKenneth D. Merry 		start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO;
861*a9934668SKenneth D. Merry 		start_ccb->ccb_h.ccb_ioreq = io_req;
862*a9934668SKenneth D. Merry 		start_ccb->ccb_h.cbfcnp = passdone;
863*a9934668SKenneth D. Merry 		io_req->alloced_ccb = start_ccb;
864*a9934668SKenneth D. Merry 		binuptime(&io_req->start_time);
865*a9934668SKenneth D. Merry 		devstat_start_transaction(softc->device_stats,
866*a9934668SKenneth D. Merry 					  &io_req->start_time);
867*a9934668SKenneth D. Merry 
868*a9934668SKenneth D. Merry 		xpt_action(start_ccb);
869*a9934668SKenneth D. Merry 
870*a9934668SKenneth D. Merry 		/*
871*a9934668SKenneth D. Merry 		 * If we have any more I/O waiting, schedule ourselves again.
872*a9934668SKenneth D. Merry 		 */
873*a9934668SKenneth D. Merry 		if (!TAILQ_EMPTY(&softc->incoming_queue))
874*a9934668SKenneth D. Merry 			xpt_schedule(periph, CAM_PRIORITY_NORMAL);
875*a9934668SKenneth D. Merry 		break;
876*a9934668SKenneth D. Merry 	}
877*a9934668SKenneth D. Merry 	default:
878*a9934668SKenneth D. Merry 		break;
879*a9934668SKenneth D. Merry 	}
880*a9934668SKenneth D. Merry }
881*a9934668SKenneth D. Merry 
882*a9934668SKenneth D. Merry static void
883*a9934668SKenneth D. Merry passdone(struct cam_periph *periph, union ccb *done_ccb)
884*a9934668SKenneth D. Merry {
885*a9934668SKenneth D. Merry 	struct pass_softc *softc;
886*a9934668SKenneth D. Merry 	struct ccb_scsiio *csio;
887*a9934668SKenneth D. Merry 
888*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
889*a9934668SKenneth D. Merry 
890*a9934668SKenneth D. Merry 	cam_periph_assert(periph, MA_OWNED);
891*a9934668SKenneth D. Merry 
892*a9934668SKenneth D. Merry 	csio = &done_ccb->csio;
893*a9934668SKenneth D. Merry 	switch (csio->ccb_h.ccb_type) {
894*a9934668SKenneth D. Merry 	case PASS_CCB_QUEUED_IO: {
895*a9934668SKenneth D. Merry 		struct pass_io_req *io_req;
896*a9934668SKenneth D. Merry 
897*a9934668SKenneth D. Merry 		io_req = done_ccb->ccb_h.ccb_ioreq;
898*a9934668SKenneth D. Merry #if 0
899*a9934668SKenneth D. Merry 		xpt_print(periph->path, "%s: called for user CCB %p\n",
900*a9934668SKenneth D. Merry 			  __func__, io_req->user_ccb_ptr);
901*a9934668SKenneth D. Merry #endif
902*a9934668SKenneth D. Merry 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
903*a9934668SKenneth D. Merry 		 && (done_ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER)
904*a9934668SKenneth D. Merry 		 && ((io_req->flags & PASS_IO_ABANDONED) == 0)) {
905*a9934668SKenneth D. Merry 			int error;
906*a9934668SKenneth D. Merry 
907*a9934668SKenneth D. Merry 			error = passerror(done_ccb, CAM_RETRY_SELTO,
908*a9934668SKenneth D. Merry 					  SF_RETRY_UA | SF_NO_PRINT);
909*a9934668SKenneth D. Merry 
910*a9934668SKenneth D. Merry 			if (error == ERESTART) {
911*a9934668SKenneth D. Merry 				/*
912*a9934668SKenneth D. Merry 				 * A retry was scheduled, so
913*a9934668SKenneth D. Merry  				 * just return.
914*a9934668SKenneth D. Merry 				 */
915*a9934668SKenneth D. Merry 				return;
916*a9934668SKenneth D. Merry 			}
917*a9934668SKenneth D. Merry 		}
918*a9934668SKenneth D. Merry 
919*a9934668SKenneth D. Merry 		/*
920*a9934668SKenneth D. Merry 		 * Copy the allocated CCB contents back to the malloced CCB
921*a9934668SKenneth D. Merry 		 * so we can give status back to the user when he requests it.
922*a9934668SKenneth D. Merry 		 */
923*a9934668SKenneth D. Merry 		bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb));
924*a9934668SKenneth D. Merry 
925*a9934668SKenneth D. Merry 		/*
926*a9934668SKenneth D. Merry 		 * Log data/transaction completion with devstat(9).
927*a9934668SKenneth D. Merry 		 */
928*a9934668SKenneth D. Merry 		switch (done_ccb->ccb_h.func_code) {
929*a9934668SKenneth D. Merry 		case XPT_SCSI_IO:
930*a9934668SKenneth D. Merry 			devstat_end_transaction(softc->device_stats,
931*a9934668SKenneth D. Merry 			    done_ccb->csio.dxfer_len - done_ccb->csio.resid,
932*a9934668SKenneth D. Merry 			    done_ccb->csio.tag_action & 0x3,
933*a9934668SKenneth D. Merry 			    ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
934*a9934668SKenneth D. Merry 			    CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
935*a9934668SKenneth D. Merry 			    (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
936*a9934668SKenneth D. Merry 			    DEVSTAT_WRITE : DEVSTAT_READ, NULL,
937*a9934668SKenneth D. Merry 			    &io_req->start_time);
938*a9934668SKenneth D. Merry 			break;
939*a9934668SKenneth D. Merry 		case XPT_ATA_IO:
940*a9934668SKenneth D. Merry 			devstat_end_transaction(softc->device_stats,
941*a9934668SKenneth D. Merry 			    done_ccb->ataio.dxfer_len - done_ccb->ataio.resid,
942*a9934668SKenneth D. Merry 			    done_ccb->ataio.tag_action & 0x3,
943*a9934668SKenneth D. Merry 			    ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
944*a9934668SKenneth D. Merry 			    CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
945*a9934668SKenneth D. Merry 			    (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
946*a9934668SKenneth D. Merry 			    DEVSTAT_WRITE : DEVSTAT_READ, NULL,
947*a9934668SKenneth D. Merry 			    &io_req->start_time);
948*a9934668SKenneth D. Merry 			break;
949*a9934668SKenneth D. Merry 		case XPT_SMP_IO:
950*a9934668SKenneth D. Merry 			/*
951*a9934668SKenneth D. Merry 			 * XXX KDM this isn't quite right, but there isn't
952*a9934668SKenneth D. Merry 			 * currently an easy way to represent a bidirectional
953*a9934668SKenneth D. Merry 			 * transfer in devstat.  The only way to do it
954*a9934668SKenneth D. Merry 			 * and have the byte counts come out right would
955*a9934668SKenneth D. Merry 			 * mean that we would have to record two
956*a9934668SKenneth D. Merry 			 * transactions, one for the request and one for the
957*a9934668SKenneth D. Merry 			 * response.  For now, so that we report something,
958*a9934668SKenneth D. Merry 			 * just treat the entire thing as a read.
959*a9934668SKenneth D. Merry 			 */
960*a9934668SKenneth D. Merry 			devstat_end_transaction(softc->device_stats,
961*a9934668SKenneth D. Merry 			    done_ccb->smpio.smp_request_len +
962*a9934668SKenneth D. Merry 			    done_ccb->smpio.smp_response_len,
963*a9934668SKenneth D. Merry 			    DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL,
964*a9934668SKenneth D. Merry 			    &io_req->start_time);
965*a9934668SKenneth D. Merry 			break;
966*a9934668SKenneth D. Merry 		default:
967*a9934668SKenneth D. Merry 			devstat_end_transaction(softc->device_stats, 0,
968*a9934668SKenneth D. Merry 			    DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL,
969*a9934668SKenneth D. Merry 			    &io_req->start_time);
970*a9934668SKenneth D. Merry 			break;
971*a9934668SKenneth D. Merry 		}
972*a9934668SKenneth D. Merry 
973*a9934668SKenneth D. Merry 		/*
974*a9934668SKenneth D. Merry 		 * In the normal case, take the completed I/O off of the
975*a9934668SKenneth D. Merry 		 * active queue and put it on the done queue.  Notitfy the
976*a9934668SKenneth D. Merry 		 * user that we have a completed I/O.
977*a9934668SKenneth D. Merry 		 */
978*a9934668SKenneth D. Merry 		if ((io_req->flags & PASS_IO_ABANDONED) == 0) {
979*a9934668SKenneth D. Merry 			TAILQ_REMOVE(&softc->active_queue, io_req, links);
980*a9934668SKenneth D. Merry 			TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
981*a9934668SKenneth D. Merry 			selwakeuppri(&softc->read_select, PRIBIO);
982*a9934668SKenneth D. Merry 			KNOTE_LOCKED(&softc->read_select.si_note, 0);
983*a9934668SKenneth D. Merry 		} else {
984*a9934668SKenneth D. Merry 			/*
985*a9934668SKenneth D. Merry 			 * In the case of an abandoned I/O (final close
986*a9934668SKenneth D. Merry 			 * without fetching the I/O), take it off of the
987*a9934668SKenneth D. Merry 			 * abandoned queue and free it.
988*a9934668SKenneth D. Merry 			 */
989*a9934668SKenneth D. Merry 			TAILQ_REMOVE(&softc->abandoned_queue, io_req, links);
990*a9934668SKenneth D. Merry 			passiocleanup(softc, io_req);
991*a9934668SKenneth D. Merry 			uma_zfree(softc->pass_zone, io_req);
992*a9934668SKenneth D. Merry 
993*a9934668SKenneth D. Merry 			/*
994*a9934668SKenneth D. Merry 			 * Release the done_ccb here, since we may wind up
995*a9934668SKenneth D. Merry 			 * freeing the peripheral when we decrement the
996*a9934668SKenneth D. Merry 			 * reference count below.
997*a9934668SKenneth D. Merry 			 */
998*a9934668SKenneth D. Merry 			xpt_release_ccb(done_ccb);
999*a9934668SKenneth D. Merry 
1000*a9934668SKenneth D. Merry 			/*
1001*a9934668SKenneth D. Merry 			 * If the abandoned queue is empty, we can release
1002*a9934668SKenneth D. Merry 			 * our reference to the periph since we won't have
1003*a9934668SKenneth D. Merry 			 * any more completions coming.
1004*a9934668SKenneth D. Merry 			 */
1005*a9934668SKenneth D. Merry 			if ((TAILQ_EMPTY(&softc->abandoned_queue))
1006*a9934668SKenneth D. Merry 			 && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) {
1007*a9934668SKenneth D. Merry 				softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET;
1008*a9934668SKenneth D. Merry 				cam_periph_release_locked(periph);
1009*a9934668SKenneth D. Merry 			}
1010*a9934668SKenneth D. Merry 
1011*a9934668SKenneth D. Merry 			/*
1012*a9934668SKenneth D. Merry 			 * We have already released the CCB, so we can
1013*a9934668SKenneth D. Merry 			 * return.
1014*a9934668SKenneth D. Merry 			 */
1015*a9934668SKenneth D. Merry 			return;
1016*a9934668SKenneth D. Merry 		}
1017*a9934668SKenneth D. Merry 		break;
1018*a9934668SKenneth D. Merry 	}
1019*a9934668SKenneth D. Merry 	}
1020*a9934668SKenneth D. Merry 	xpt_release_ccb(done_ccb);
1021*a9934668SKenneth D. Merry }
1022*a9934668SKenneth D. Merry 
1023*a9934668SKenneth D. Merry static int
1024*a9934668SKenneth D. Merry passcreatezone(struct cam_periph *periph)
1025*a9934668SKenneth D. Merry {
1026*a9934668SKenneth D. Merry 	struct pass_softc *softc;
1027*a9934668SKenneth D. Merry 	int error;
1028*a9934668SKenneth D. Merry 
1029*a9934668SKenneth D. Merry 	error = 0;
1030*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
1031*a9934668SKenneth D. Merry 
1032*a9934668SKenneth D. Merry 	cam_periph_assert(periph, MA_OWNED);
1033*a9934668SKenneth D. Merry 	KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0),
1034*a9934668SKenneth D. Merry 		("%s called when the pass(4) zone is valid!\n", __func__));
1035*a9934668SKenneth D. Merry 	KASSERT((softc->pass_zone == NULL),
1036*a9934668SKenneth D. Merry 		("%s called when the pass(4) zone is allocated!\n", __func__));
1037*a9934668SKenneth D. Merry 
1038*a9934668SKenneth D. Merry 	if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) {
1039*a9934668SKenneth D. Merry 
1040*a9934668SKenneth D. Merry 		/*
1041*a9934668SKenneth D. Merry 		 * We're the first context through, so we need to create
1042*a9934668SKenneth D. Merry 		 * the pass(4) UMA zone for I/O requests.
1043*a9934668SKenneth D. Merry 		 */
1044*a9934668SKenneth D. Merry 		softc->flags |= PASS_FLAG_ZONE_INPROG;
1045*a9934668SKenneth D. Merry 
1046*a9934668SKenneth D. Merry 		/*
1047*a9934668SKenneth D. Merry 		 * uma_zcreate() does a blocking (M_WAITOK) allocation,
1048*a9934668SKenneth D. Merry 		 * so we cannot hold a mutex while we call it.
1049*a9934668SKenneth D. Merry 		 */
1050*a9934668SKenneth D. Merry 		cam_periph_unlock(periph);
1051*a9934668SKenneth D. Merry 
1052*a9934668SKenneth D. Merry 		softc->pass_zone = uma_zcreate(softc->zone_name,
1053*a9934668SKenneth D. Merry 		    sizeof(struct pass_io_req), NULL, NULL, NULL, NULL,
1054*a9934668SKenneth D. Merry 		    /*align*/ 0, /*flags*/ 0);
1055*a9934668SKenneth D. Merry 
1056*a9934668SKenneth D. Merry 		softc->pass_io_zone = uma_zcreate(softc->io_zone_name,
1057*a9934668SKenneth D. Merry 		    softc->io_zone_size, NULL, NULL, NULL, NULL,
1058*a9934668SKenneth D. Merry 		    /*align*/ 0, /*flags*/ 0);
1059*a9934668SKenneth D. Merry 
1060*a9934668SKenneth D. Merry 		cam_periph_lock(periph);
1061*a9934668SKenneth D. Merry 
1062*a9934668SKenneth D. Merry 		if ((softc->pass_zone == NULL)
1063*a9934668SKenneth D. Merry 		 || (softc->pass_io_zone == NULL)) {
1064*a9934668SKenneth D. Merry 			if (softc->pass_zone == NULL)
1065*a9934668SKenneth D. Merry 				xpt_print(periph->path, "unable to allocate "
1066*a9934668SKenneth D. Merry 				    "IO Req UMA zone\n");
1067*a9934668SKenneth D. Merry 			else
1068*a9934668SKenneth D. Merry 				xpt_print(periph->path, "unable to allocate "
1069*a9934668SKenneth D. Merry 				    "IO UMA zone\n");
1070*a9934668SKenneth D. Merry 			softc->flags &= ~PASS_FLAG_ZONE_INPROG;
1071*a9934668SKenneth D. Merry 			goto bailout;
1072*a9934668SKenneth D. Merry 		}
1073*a9934668SKenneth D. Merry 
1074*a9934668SKenneth D. Merry 		/*
1075*a9934668SKenneth D. Merry 		 * Set the flags appropriately and notify any other waiters.
1076*a9934668SKenneth D. Merry 		 */
1077*a9934668SKenneth D. Merry 		softc->flags &= PASS_FLAG_ZONE_INPROG;
1078*a9934668SKenneth D. Merry 		softc->flags |= PASS_FLAG_ZONE_VALID;
1079*a9934668SKenneth D. Merry 		wakeup(&softc->pass_zone);
1080*a9934668SKenneth D. Merry 	} else {
1081*a9934668SKenneth D. Merry 		/*
1082*a9934668SKenneth D. Merry 		 * In this case, the UMA zone has not yet been created, but
1083*a9934668SKenneth D. Merry 		 * another context is in the process of creating it.  We
1084*a9934668SKenneth D. Merry 		 * need to sleep until the creation is either done or has
1085*a9934668SKenneth D. Merry 		 * failed.
1086*a9934668SKenneth D. Merry 		 */
1087*a9934668SKenneth D. Merry 		while ((softc->flags & PASS_FLAG_ZONE_INPROG)
1088*a9934668SKenneth D. Merry 		    && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) {
1089*a9934668SKenneth D. Merry 			error = msleep(&softc->pass_zone,
1090*a9934668SKenneth D. Merry 				       cam_periph_mtx(periph), PRIBIO,
1091*a9934668SKenneth D. Merry 				       "paszon", 0);
1092*a9934668SKenneth D. Merry 			if (error != 0)
1093*a9934668SKenneth D. Merry 				goto bailout;
1094*a9934668SKenneth D. Merry 		}
1095*a9934668SKenneth D. Merry 		/*
1096*a9934668SKenneth D. Merry 		 * If the zone creation failed, no luck for the user.
1097*a9934668SKenneth D. Merry 		 */
1098*a9934668SKenneth D. Merry 		if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){
1099*a9934668SKenneth D. Merry 			error = ENOMEM;
1100*a9934668SKenneth D. Merry 			goto bailout;
1101*a9934668SKenneth D. Merry 		}
1102*a9934668SKenneth D. Merry 	}
1103*a9934668SKenneth D. Merry bailout:
1104*a9934668SKenneth D. Merry 	return (error);
1105*a9934668SKenneth D. Merry }
1106*a9934668SKenneth D. Merry 
1107*a9934668SKenneth D. Merry static void
1108*a9934668SKenneth D. Merry passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req)
1109*a9934668SKenneth D. Merry {
1110*a9934668SKenneth D. Merry 	union ccb *ccb;
1111*a9934668SKenneth D. Merry 	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1112*a9934668SKenneth D. Merry 	int i, numbufs;
1113*a9934668SKenneth D. Merry 
1114*a9934668SKenneth D. Merry 	ccb = &io_req->ccb;
1115*a9934668SKenneth D. Merry 
1116*a9934668SKenneth D. Merry 	switch (ccb->ccb_h.func_code) {
1117*a9934668SKenneth D. Merry 	case XPT_DEV_MATCH:
1118*a9934668SKenneth D. Merry 		numbufs = min(io_req->num_bufs, 2);
1119*a9934668SKenneth D. Merry 
1120*a9934668SKenneth D. Merry 		if (numbufs == 1) {
1121*a9934668SKenneth D. Merry 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1122*a9934668SKenneth D. Merry 		} else {
1123*a9934668SKenneth D. Merry 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1124*a9934668SKenneth D. Merry 			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1125*a9934668SKenneth D. Merry 		}
1126*a9934668SKenneth D. Merry 		break;
1127*a9934668SKenneth D. Merry 	case XPT_SCSI_IO:
1128*a9934668SKenneth D. Merry 	case XPT_CONT_TARGET_IO:
1129*a9934668SKenneth D. Merry 		data_ptrs[0] = &ccb->csio.data_ptr;
1130*a9934668SKenneth D. Merry 		numbufs = min(io_req->num_bufs, 1);
1131*a9934668SKenneth D. Merry 		break;
1132*a9934668SKenneth D. Merry 	case XPT_ATA_IO:
1133*a9934668SKenneth D. Merry 		data_ptrs[0] = &ccb->ataio.data_ptr;
1134*a9934668SKenneth D. Merry 		numbufs = min(io_req->num_bufs, 1);
1135*a9934668SKenneth D. Merry 		break;
1136*a9934668SKenneth D. Merry 	case XPT_SMP_IO:
1137*a9934668SKenneth D. Merry 		numbufs = min(io_req->num_bufs, 2);
1138*a9934668SKenneth D. Merry 		data_ptrs[0] = &ccb->smpio.smp_request;
1139*a9934668SKenneth D. Merry 		data_ptrs[1] = &ccb->smpio.smp_response;
1140*a9934668SKenneth D. Merry 		break;
1141*a9934668SKenneth D. Merry 	case XPT_DEV_ADVINFO:
1142*a9934668SKenneth D. Merry 		numbufs = min(io_req->num_bufs, 1);
1143*a9934668SKenneth D. Merry 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1144*a9934668SKenneth D. Merry 		break;
1145*a9934668SKenneth D. Merry 	default:
1146*a9934668SKenneth D. Merry 		/* allow ourselves to be swapped once again */
1147*a9934668SKenneth D. Merry 		return;
1148*a9934668SKenneth D. Merry 		break; /* NOTREACHED */
1149*a9934668SKenneth D. Merry 	}
1150*a9934668SKenneth D. Merry 
1151*a9934668SKenneth D. Merry 	if (io_req->flags & PASS_IO_USER_SEG_MALLOC) {
1152*a9934668SKenneth D. Merry 		free(io_req->user_segptr, M_SCSIPASS);
1153*a9934668SKenneth D. Merry 		io_req->user_segptr = NULL;
1154*a9934668SKenneth D. Merry 	}
1155*a9934668SKenneth D. Merry 
1156*a9934668SKenneth D. Merry 	/*
1157*a9934668SKenneth D. Merry 	 * We only want to free memory we malloced.
1158*a9934668SKenneth D. Merry 	 */
1159*a9934668SKenneth D. Merry 	if (io_req->data_flags == CAM_DATA_VADDR) {
1160*a9934668SKenneth D. Merry 		for (i = 0; i < io_req->num_bufs; i++) {
1161*a9934668SKenneth D. Merry 			if (io_req->kern_bufs[i] == NULL)
1162*a9934668SKenneth D. Merry 				continue;
1163*a9934668SKenneth D. Merry 
1164*a9934668SKenneth D. Merry 			free(io_req->kern_bufs[i], M_SCSIPASS);
1165*a9934668SKenneth D. Merry 			io_req->kern_bufs[i] = NULL;
1166*a9934668SKenneth D. Merry 		}
1167*a9934668SKenneth D. Merry 	} else if (io_req->data_flags == CAM_DATA_SG) {
1168*a9934668SKenneth D. Merry 		for (i = 0; i < io_req->num_kern_segs; i++) {
1169*a9934668SKenneth D. Merry 			if ((uint8_t *)(uintptr_t)
1170*a9934668SKenneth D. Merry 			    io_req->kern_segptr[i].ds_addr == NULL)
1171*a9934668SKenneth D. Merry 				continue;
1172*a9934668SKenneth D. Merry 
1173*a9934668SKenneth D. Merry 			uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t)
1174*a9934668SKenneth D. Merry 			    io_req->kern_segptr[i].ds_addr);
1175*a9934668SKenneth D. Merry 			io_req->kern_segptr[i].ds_addr = 0;
1176*a9934668SKenneth D. Merry 		}
1177*a9934668SKenneth D. Merry 	}
1178*a9934668SKenneth D. Merry 
1179*a9934668SKenneth D. Merry 	if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) {
1180*a9934668SKenneth D. Merry 		free(io_req->kern_segptr, M_SCSIPASS);
1181*a9934668SKenneth D. Merry 		io_req->kern_segptr = NULL;
1182*a9934668SKenneth D. Merry 	}
1183*a9934668SKenneth D. Merry 
1184*a9934668SKenneth D. Merry 	if (io_req->data_flags != CAM_DATA_PADDR) {
1185*a9934668SKenneth D. Merry 		for (i = 0; i < numbufs; i++) {
1186*a9934668SKenneth D. Merry 			/*
1187*a9934668SKenneth D. Merry 			 * Restore the user's buffer pointers to their
1188*a9934668SKenneth D. Merry 			 * previous values.
1189*a9934668SKenneth D. Merry 			 */
1190*a9934668SKenneth D. Merry 			if (io_req->user_bufs[i] != NULL)
1191*a9934668SKenneth D. Merry 				*data_ptrs[i] = io_req->user_bufs[i];
1192*a9934668SKenneth D. Merry 		}
1193*a9934668SKenneth D. Merry 	}
1194*a9934668SKenneth D. Merry 
1195*a9934668SKenneth D. Merry }
1196*a9934668SKenneth D. Merry 
1197*a9934668SKenneth D. Merry static int
1198*a9934668SKenneth D. Merry passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req,
1199*a9934668SKenneth D. Merry 	       ccb_flags direction)
1200*a9934668SKenneth D. Merry {
1201*a9934668SKenneth D. Merry 	bus_size_t kern_watermark, user_watermark, len_copied, len_to_copy;
1202*a9934668SKenneth D. Merry 	bus_dma_segment_t *user_sglist, *kern_sglist;
1203*a9934668SKenneth D. Merry 	int i, j, error;
1204*a9934668SKenneth D. Merry 
1205*a9934668SKenneth D. Merry 	error = 0;
1206*a9934668SKenneth D. Merry 	kern_watermark = 0;
1207*a9934668SKenneth D. Merry 	user_watermark = 0;
1208*a9934668SKenneth D. Merry 	len_to_copy = 0;
1209*a9934668SKenneth D. Merry 	len_copied = 0;
1210*a9934668SKenneth D. Merry 	user_sglist = io_req->user_segptr;
1211*a9934668SKenneth D. Merry 	kern_sglist = io_req->kern_segptr;
1212*a9934668SKenneth D. Merry 
1213*a9934668SKenneth D. Merry 	for (i = 0, j = 0; i < io_req->num_user_segs &&
1214*a9934668SKenneth D. Merry 	     j < io_req->num_kern_segs;) {
1215*a9934668SKenneth D. Merry 		uint8_t *user_ptr, *kern_ptr;
1216*a9934668SKenneth D. Merry 
1217*a9934668SKenneth D. Merry 		len_to_copy = min(user_sglist[i].ds_len -user_watermark,
1218*a9934668SKenneth D. Merry 		    kern_sglist[j].ds_len - kern_watermark);
1219*a9934668SKenneth D. Merry 
1220*a9934668SKenneth D. Merry 		user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr;
1221*a9934668SKenneth D. Merry 		user_ptr = user_ptr + user_watermark;
1222*a9934668SKenneth D. Merry 		kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr;
1223*a9934668SKenneth D. Merry 		kern_ptr = kern_ptr + kern_watermark;
1224*a9934668SKenneth D. Merry 
1225*a9934668SKenneth D. Merry 		user_watermark += len_to_copy;
1226*a9934668SKenneth D. Merry 		kern_watermark += len_to_copy;
1227*a9934668SKenneth D. Merry 
1228*a9934668SKenneth D. Merry 		if (!useracc(user_ptr, len_to_copy,
1229*a9934668SKenneth D. Merry 		    (direction == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) {
1230*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: unable to access user "
1231*a9934668SKenneth D. Merry 				  "S/G list element %p len %zu\n", __func__,
1232*a9934668SKenneth D. Merry 				  user_ptr, len_to_copy);
1233*a9934668SKenneth D. Merry 			error = EFAULT;
1234*a9934668SKenneth D. Merry 			goto bailout;
1235*a9934668SKenneth D. Merry 		}
1236*a9934668SKenneth D. Merry 
1237*a9934668SKenneth D. Merry 		if (direction == CAM_DIR_IN) {
1238*a9934668SKenneth D. Merry 			error = copyout(kern_ptr, user_ptr, len_to_copy);
1239*a9934668SKenneth D. Merry 			if (error != 0) {
1240*a9934668SKenneth D. Merry 				xpt_print(periph->path, "%s: copyout of %u "
1241*a9934668SKenneth D. Merry 					  "bytes from %p to %p failed with "
1242*a9934668SKenneth D. Merry 					  "error %d\n", __func__, len_to_copy,
1243*a9934668SKenneth D. Merry 					  kern_ptr, user_ptr, error);
1244*a9934668SKenneth D. Merry 				goto bailout;
1245*a9934668SKenneth D. Merry 			}
1246*a9934668SKenneth D. Merry 		} else {
1247*a9934668SKenneth D. Merry 			error = copyin(user_ptr, kern_ptr, len_to_copy);
1248*a9934668SKenneth D. Merry 			if (error != 0) {
1249*a9934668SKenneth D. Merry 				xpt_print(periph->path, "%s: copyin of %u "
1250*a9934668SKenneth D. Merry 					  "bytes from %p to %p failed with "
1251*a9934668SKenneth D. Merry 					  "error %d\n", __func__, len_to_copy,
1252*a9934668SKenneth D. Merry 					  user_ptr, kern_ptr, error);
1253*a9934668SKenneth D. Merry 				goto bailout;
1254*a9934668SKenneth D. Merry 			}
1255*a9934668SKenneth D. Merry 		}
1256*a9934668SKenneth D. Merry 
1257*a9934668SKenneth D. Merry 		len_copied += len_to_copy;
1258*a9934668SKenneth D. Merry 
1259*a9934668SKenneth D. Merry 		if (user_sglist[i].ds_len == user_watermark) {
1260*a9934668SKenneth D. Merry 			i++;
1261*a9934668SKenneth D. Merry 			user_watermark = 0;
1262*a9934668SKenneth D. Merry 		}
1263*a9934668SKenneth D. Merry 
1264*a9934668SKenneth D. Merry 		if (kern_sglist[j].ds_len == kern_watermark) {
1265*a9934668SKenneth D. Merry 			j++;
1266*a9934668SKenneth D. Merry 			kern_watermark = 0;
1267*a9934668SKenneth D. Merry 		}
1268*a9934668SKenneth D. Merry 	}
1269*a9934668SKenneth D. Merry 
1270*a9934668SKenneth D. Merry bailout:
1271*a9934668SKenneth D. Merry 
1272*a9934668SKenneth D. Merry 	return (error);
1273*a9934668SKenneth D. Merry }
1274*a9934668SKenneth D. Merry 
1275*a9934668SKenneth D. Merry static int
1276*a9934668SKenneth D. Merry passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
1277*a9934668SKenneth D. Merry {
1278*a9934668SKenneth D. Merry 	union ccb *ccb;
1279*a9934668SKenneth D. Merry 	struct pass_softc *softc;
1280*a9934668SKenneth D. Merry 	int numbufs, i;
1281*a9934668SKenneth D. Merry 	uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1282*a9934668SKenneth D. Merry 	uint32_t lengths[CAM_PERIPH_MAXMAPS];
1283*a9934668SKenneth D. Merry 	uint32_t dirs[CAM_PERIPH_MAXMAPS];
1284*a9934668SKenneth D. Merry 	uint32_t num_segs;
1285*a9934668SKenneth D. Merry 	uint16_t *seg_cnt_ptr;
1286*a9934668SKenneth D. Merry 	size_t maxmap;
1287*a9934668SKenneth D. Merry 	int error;
1288*a9934668SKenneth D. Merry 
1289*a9934668SKenneth D. Merry 	cam_periph_assert(periph, MA_NOTOWNED);
1290*a9934668SKenneth D. Merry 
1291*a9934668SKenneth D. Merry 	softc = periph->softc;
1292*a9934668SKenneth D. Merry 
1293*a9934668SKenneth D. Merry 	error = 0;
1294*a9934668SKenneth D. Merry 	ccb = &io_req->ccb;
1295*a9934668SKenneth D. Merry 	maxmap = 0;
1296*a9934668SKenneth D. Merry 	num_segs = 0;
1297*a9934668SKenneth D. Merry 	seg_cnt_ptr = NULL;
1298*a9934668SKenneth D. Merry 
1299*a9934668SKenneth D. Merry 	switch(ccb->ccb_h.func_code) {
1300*a9934668SKenneth D. Merry 	case XPT_DEV_MATCH:
1301*a9934668SKenneth D. Merry 		if (ccb->cdm.match_buf_len == 0) {
1302*a9934668SKenneth D. Merry 			printf("%s: invalid match buffer length 0\n", __func__);
1303*a9934668SKenneth D. Merry 			return(EINVAL);
1304*a9934668SKenneth D. Merry 		}
1305*a9934668SKenneth D. Merry 		if (ccb->cdm.pattern_buf_len > 0) {
1306*a9934668SKenneth D. Merry 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1307*a9934668SKenneth D. Merry 			lengths[0] = ccb->cdm.pattern_buf_len;
1308*a9934668SKenneth D. Merry 			dirs[0] = CAM_DIR_OUT;
1309*a9934668SKenneth D. Merry 			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1310*a9934668SKenneth D. Merry 			lengths[1] = ccb->cdm.match_buf_len;
1311*a9934668SKenneth D. Merry 			dirs[1] = CAM_DIR_IN;
1312*a9934668SKenneth D. Merry 			numbufs = 2;
1313*a9934668SKenneth D. Merry 		} else {
1314*a9934668SKenneth D. Merry 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1315*a9934668SKenneth D. Merry 			lengths[0] = ccb->cdm.match_buf_len;
1316*a9934668SKenneth D. Merry 			dirs[0] = CAM_DIR_IN;
1317*a9934668SKenneth D. Merry 			numbufs = 1;
1318*a9934668SKenneth D. Merry 		}
1319*a9934668SKenneth D. Merry 		io_req->data_flags = CAM_DATA_VADDR;
1320*a9934668SKenneth D. Merry 		break;
1321*a9934668SKenneth D. Merry 	case XPT_SCSI_IO:
1322*a9934668SKenneth D. Merry 	case XPT_CONT_TARGET_IO:
1323*a9934668SKenneth D. Merry 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1324*a9934668SKenneth D. Merry 			return(0);
1325*a9934668SKenneth D. Merry 
1326*a9934668SKenneth D. Merry 		/*
1327*a9934668SKenneth D. Merry 		 * The user shouldn't be able to supply a bio.
1328*a9934668SKenneth D. Merry 		 */
1329*a9934668SKenneth D. Merry 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
1330*a9934668SKenneth D. Merry 			return (EINVAL);
1331*a9934668SKenneth D. Merry 
1332*a9934668SKenneth D. Merry 		io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK;
1333*a9934668SKenneth D. Merry 
1334*a9934668SKenneth D. Merry 		data_ptrs[0] = &ccb->csio.data_ptr;
1335*a9934668SKenneth D. Merry 		lengths[0] = ccb->csio.dxfer_len;
1336*a9934668SKenneth D. Merry 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1337*a9934668SKenneth D. Merry 		num_segs = ccb->csio.sglist_cnt;
1338*a9934668SKenneth D. Merry 		seg_cnt_ptr = &ccb->csio.sglist_cnt;
1339*a9934668SKenneth D. Merry 		numbufs = 1;
1340*a9934668SKenneth D. Merry 		maxmap = softc->maxio;
1341*a9934668SKenneth D. Merry 		break;
1342*a9934668SKenneth D. Merry 	case XPT_ATA_IO:
1343*a9934668SKenneth D. Merry 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1344*a9934668SKenneth D. Merry 			return(0);
1345*a9934668SKenneth D. Merry 
1346*a9934668SKenneth D. Merry 		/*
1347*a9934668SKenneth D. Merry 		 * We only support a single virtual address for ATA I/O.
1348*a9934668SKenneth D. Merry 		 */
1349*a9934668SKenneth D. Merry 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
1350*a9934668SKenneth D. Merry 			return (EINVAL);
1351*a9934668SKenneth D. Merry 
1352*a9934668SKenneth D. Merry 		io_req->data_flags = CAM_DATA_VADDR;
1353*a9934668SKenneth D. Merry 
1354*a9934668SKenneth D. Merry 		data_ptrs[0] = &ccb->ataio.data_ptr;
1355*a9934668SKenneth D. Merry 		lengths[0] = ccb->ataio.dxfer_len;
1356*a9934668SKenneth D. Merry 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1357*a9934668SKenneth D. Merry 		numbufs = 1;
1358*a9934668SKenneth D. Merry 		maxmap = softc->maxio;
1359*a9934668SKenneth D. Merry 		break;
1360*a9934668SKenneth D. Merry 	case XPT_SMP_IO:
1361*a9934668SKenneth D. Merry 		io_req->data_flags = CAM_DATA_VADDR;
1362*a9934668SKenneth D. Merry 
1363*a9934668SKenneth D. Merry 		data_ptrs[0] = &ccb->smpio.smp_request;
1364*a9934668SKenneth D. Merry 		lengths[0] = ccb->smpio.smp_request_len;
1365*a9934668SKenneth D. Merry 		dirs[0] = CAM_DIR_OUT;
1366*a9934668SKenneth D. Merry 		data_ptrs[1] = &ccb->smpio.smp_response;
1367*a9934668SKenneth D. Merry 		lengths[1] = ccb->smpio.smp_response_len;
1368*a9934668SKenneth D. Merry 		dirs[1] = CAM_DIR_IN;
1369*a9934668SKenneth D. Merry 		numbufs = 2;
1370*a9934668SKenneth D. Merry 		maxmap = softc->maxio;
1371*a9934668SKenneth D. Merry 		break;
1372*a9934668SKenneth D. Merry 	case XPT_DEV_ADVINFO:
1373*a9934668SKenneth D. Merry 		if (ccb->cdai.bufsiz == 0)
1374*a9934668SKenneth D. Merry 			return (0);
1375*a9934668SKenneth D. Merry 
1376*a9934668SKenneth D. Merry 		io_req->data_flags = CAM_DATA_VADDR;
1377*a9934668SKenneth D. Merry 
1378*a9934668SKenneth D. Merry 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1379*a9934668SKenneth D. Merry 		lengths[0] = ccb->cdai.bufsiz;
1380*a9934668SKenneth D. Merry 		dirs[0] = CAM_DIR_IN;
1381*a9934668SKenneth D. Merry 		numbufs = 1;
1382*a9934668SKenneth D. Merry 		break;
1383*a9934668SKenneth D. Merry 	default:
1384*a9934668SKenneth D. Merry 		return(EINVAL);
1385*a9934668SKenneth D. Merry 		break; /* NOTREACHED */
1386*a9934668SKenneth D. Merry 	}
1387*a9934668SKenneth D. Merry 
1388*a9934668SKenneth D. Merry 	io_req->num_bufs = numbufs;
1389*a9934668SKenneth D. Merry 
1390*a9934668SKenneth D. Merry 	/*
1391*a9934668SKenneth D. Merry 	 * If there is a maximum, check to make sure that the user's
1392*a9934668SKenneth D. Merry 	 * request fits within the limit.  In general, we should only have
1393*a9934668SKenneth D. Merry 	 * a maximum length for requests that go to hardware.  Otherwise it
1394*a9934668SKenneth D. Merry 	 * is whatever we're able to malloc.
1395*a9934668SKenneth D. Merry 	 */
1396*a9934668SKenneth D. Merry 	for (i = 0; i < numbufs; i++) {
1397*a9934668SKenneth D. Merry 		io_req->user_bufs[i] = *data_ptrs[i];
1398*a9934668SKenneth D. Merry 		io_req->dirs[i] = dirs[i];
1399*a9934668SKenneth D. Merry 		io_req->lengths[i] = lengths[i];
1400*a9934668SKenneth D. Merry 
1401*a9934668SKenneth D. Merry 		if (maxmap == 0)
1402*a9934668SKenneth D. Merry 			continue;
1403*a9934668SKenneth D. Merry 
1404*a9934668SKenneth D. Merry 		if (lengths[i] <= maxmap)
1405*a9934668SKenneth D. Merry 			continue;
1406*a9934668SKenneth D. Merry 
1407*a9934668SKenneth D. Merry 		xpt_print(periph->path, "%s: data length %u > max allowed %u "
1408*a9934668SKenneth D. Merry 			  "bytes\n", __func__, lengths[i], maxmap);
1409*a9934668SKenneth D. Merry 		error = EINVAL;
1410*a9934668SKenneth D. Merry 		goto bailout;
1411*a9934668SKenneth D. Merry 	}
1412*a9934668SKenneth D. Merry 
1413*a9934668SKenneth D. Merry 	switch (io_req->data_flags) {
1414*a9934668SKenneth D. Merry 	case CAM_DATA_VADDR:
1415*a9934668SKenneth D. Merry 		/* Map or copy the buffer into kernel address space */
1416*a9934668SKenneth D. Merry 		for (i = 0; i < numbufs; i++) {
1417*a9934668SKenneth D. Merry 			uint8_t *tmp_buf;
1418*a9934668SKenneth D. Merry 
1419*a9934668SKenneth D. Merry 			/*
1420*a9934668SKenneth D. Merry 			 * If for some reason no length is specified, we
1421*a9934668SKenneth D. Merry 			 * don't need to allocate anything.
1422*a9934668SKenneth D. Merry 			 */
1423*a9934668SKenneth D. Merry 			if (io_req->lengths[i] == 0)
1424*a9934668SKenneth D. Merry 				continue;
1425*a9934668SKenneth D. Merry 
1426*a9934668SKenneth D. Merry 			/*
1427*a9934668SKenneth D. Merry 			 * Make sure that the user's buffer is accessible
1428*a9934668SKenneth D. Merry 			 * to that process.
1429*a9934668SKenneth D. Merry 			 */
1430*a9934668SKenneth D. Merry 			if (!useracc(io_req->user_bufs[i], io_req->lengths[i],
1431*a9934668SKenneth D. Merry 			    (io_req->dirs[i] == CAM_DIR_IN) ? VM_PROT_WRITE :
1432*a9934668SKenneth D. Merry 			     VM_PROT_READ)) {
1433*a9934668SKenneth D. Merry 				xpt_print(periph->path, "%s: user address %p "
1434*a9934668SKenneth D. Merry 				    "length %u is not accessible\n", __func__,
1435*a9934668SKenneth D. Merry 				    io_req->user_bufs[i], io_req->lengths[i]);
1436*a9934668SKenneth D. Merry 				error = EFAULT;
1437*a9934668SKenneth D. Merry 				goto bailout;
1438*a9934668SKenneth D. Merry 			}
1439*a9934668SKenneth D. Merry 
1440*a9934668SKenneth D. Merry 			tmp_buf = malloc(lengths[i], M_SCSIPASS,
1441*a9934668SKenneth D. Merry 					 M_WAITOK | M_ZERO);
1442*a9934668SKenneth D. Merry 			io_req->kern_bufs[i] = tmp_buf;
1443*a9934668SKenneth D. Merry 			*data_ptrs[i] = tmp_buf;
1444*a9934668SKenneth D. Merry 
1445*a9934668SKenneth D. Merry #if 0
1446*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: malloced %p len %u, user "
1447*a9934668SKenneth D. Merry 				  "buffer %p, operation: %s\n", __func__,
1448*a9934668SKenneth D. Merry 				  tmp_buf, lengths[i], io_req->user_bufs[i],
1449*a9934668SKenneth D. Merry 				  (dirs[i] == CAM_DIR_IN) ? "read" : "write");
1450*a9934668SKenneth D. Merry #endif
1451*a9934668SKenneth D. Merry 			/*
1452*a9934668SKenneth D. Merry 			 * We only need to copy in if the user is writing.
1453*a9934668SKenneth D. Merry 			 */
1454*a9934668SKenneth D. Merry 			if (dirs[i] != CAM_DIR_OUT)
1455*a9934668SKenneth D. Merry 				continue;
1456*a9934668SKenneth D. Merry 
1457*a9934668SKenneth D. Merry 			error = copyin(io_req->user_bufs[i],
1458*a9934668SKenneth D. Merry 				       io_req->kern_bufs[i], lengths[i]);
1459*a9934668SKenneth D. Merry 			if (error != 0) {
1460*a9934668SKenneth D. Merry 				xpt_print(periph->path, "%s: copy of user "
1461*a9934668SKenneth D. Merry 					  "buffer from %p to %p failed with "
1462*a9934668SKenneth D. Merry 					  "error %d\n", __func__,
1463*a9934668SKenneth D. Merry 					  io_req->user_bufs[i],
1464*a9934668SKenneth D. Merry 					  io_req->kern_bufs[i], error);
1465*a9934668SKenneth D. Merry 				goto bailout;
1466*a9934668SKenneth D. Merry 			}
1467*a9934668SKenneth D. Merry 		}
1468*a9934668SKenneth D. Merry 		break;
1469*a9934668SKenneth D. Merry 	case CAM_DATA_PADDR:
1470*a9934668SKenneth D. Merry 		/* Pass down the pointer as-is */
1471*a9934668SKenneth D. Merry 		break;
1472*a9934668SKenneth D. Merry 	case CAM_DATA_SG: {
1473*a9934668SKenneth D. Merry 		size_t sg_length, size_to_go, alloc_size;
1474*a9934668SKenneth D. Merry 		uint32_t num_segs_needed;
1475*a9934668SKenneth D. Merry 
1476*a9934668SKenneth D. Merry 		/*
1477*a9934668SKenneth D. Merry 		 * Copy the user S/G list in, and then copy in the
1478*a9934668SKenneth D. Merry 		 * individual segments.
1479*a9934668SKenneth D. Merry 		 */
1480*a9934668SKenneth D. Merry 		/*
1481*a9934668SKenneth D. Merry 		 * We shouldn't see this, but check just in case.
1482*a9934668SKenneth D. Merry 		 */
1483*a9934668SKenneth D. Merry 		if (numbufs != 1) {
1484*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: cannot currently handle "
1485*a9934668SKenneth D. Merry 				  "more than one S/G list per CCB\n", __func__);
1486*a9934668SKenneth D. Merry 			error = EINVAL;
1487*a9934668SKenneth D. Merry 			goto bailout;
1488*a9934668SKenneth D. Merry 		}
1489*a9934668SKenneth D. Merry 
1490*a9934668SKenneth D. Merry 		/*
1491*a9934668SKenneth D. Merry 		 * We have to have at least one segment.
1492*a9934668SKenneth D. Merry 		 */
1493*a9934668SKenneth D. Merry 		if (num_segs == 0) {
1494*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: CAM_DATA_SG flag set, "
1495*a9934668SKenneth D. Merry 				  "but sglist_cnt=0!\n", __func__);
1496*a9934668SKenneth D. Merry 			error = EINVAL;
1497*a9934668SKenneth D. Merry 			goto bailout;
1498*a9934668SKenneth D. Merry 		}
1499*a9934668SKenneth D. Merry 
1500*a9934668SKenneth D. Merry 		/*
1501*a9934668SKenneth D. Merry 		 * Make sure the user specified the total length and didn't
1502*a9934668SKenneth D. Merry 		 * just leave it to us to decode the S/G list.
1503*a9934668SKenneth D. Merry 		 */
1504*a9934668SKenneth D. Merry 		if (lengths[0] == 0) {
1505*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: no dxfer_len specified, "
1506*a9934668SKenneth D. Merry 				  "but CAM_DATA_SG flag is set!\n", __func__);
1507*a9934668SKenneth D. Merry 			error = EINVAL;
1508*a9934668SKenneth D. Merry 			goto bailout;
1509*a9934668SKenneth D. Merry 		}
1510*a9934668SKenneth D. Merry 
1511*a9934668SKenneth D. Merry 		/*
1512*a9934668SKenneth D. Merry 		 * We allocate buffers in io_zone_size increments for an
1513*a9934668SKenneth D. Merry 		 * S/G list.  This will generally be MAXPHYS.
1514*a9934668SKenneth D. Merry 		 */
1515*a9934668SKenneth D. Merry 		if (lengths[0] <= softc->io_zone_size)
1516*a9934668SKenneth D. Merry 			num_segs_needed = 1;
1517*a9934668SKenneth D. Merry 		else {
1518*a9934668SKenneth D. Merry 			num_segs_needed = lengths[0] / softc->io_zone_size;
1519*a9934668SKenneth D. Merry 			if ((lengths[0] % softc->io_zone_size) != 0)
1520*a9934668SKenneth D. Merry 				num_segs_needed++;
1521*a9934668SKenneth D. Merry 		}
1522*a9934668SKenneth D. Merry 
1523*a9934668SKenneth D. Merry 		/* Figure out the size of the S/G list */
1524*a9934668SKenneth D. Merry 		sg_length = num_segs * sizeof(bus_dma_segment_t);
1525*a9934668SKenneth D. Merry 		io_req->num_user_segs = num_segs;
1526*a9934668SKenneth D. Merry 		io_req->num_kern_segs = num_segs_needed;
1527*a9934668SKenneth D. Merry 
1528*a9934668SKenneth D. Merry 		/* Save the user's S/G list pointer for later restoration */
1529*a9934668SKenneth D. Merry 		io_req->user_bufs[0] = *data_ptrs[0];
1530*a9934668SKenneth D. Merry 
1531*a9934668SKenneth D. Merry 		/*
1532*a9934668SKenneth D. Merry 		 * If we have enough segments allocated by default to handle
1533*a9934668SKenneth D. Merry 		 * the length of the user's S/G list,
1534*a9934668SKenneth D. Merry 		 */
1535*a9934668SKenneth D. Merry 		if (num_segs > PASS_MAX_SEGS) {
1536*a9934668SKenneth D. Merry 			io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1537*a9934668SKenneth D. Merry 			    num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
1538*a9934668SKenneth D. Merry 			io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1539*a9934668SKenneth D. Merry 		} else
1540*a9934668SKenneth D. Merry 			io_req->user_segptr = io_req->user_segs;
1541*a9934668SKenneth D. Merry 
1542*a9934668SKenneth D. Merry 		if (!useracc(*data_ptrs[0], sg_length, VM_PROT_READ)) {
1543*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: unable to access user "
1544*a9934668SKenneth D. Merry 				  "S/G list at %p\n", __func__, *data_ptrs[0]);
1545*a9934668SKenneth D. Merry 			error = EFAULT;
1546*a9934668SKenneth D. Merry 			goto bailout;
1547*a9934668SKenneth D. Merry 		}
1548*a9934668SKenneth D. Merry 
1549*a9934668SKenneth D. Merry 		error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1550*a9934668SKenneth D. Merry 		if (error != 0) {
1551*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: copy of user S/G list "
1552*a9934668SKenneth D. Merry 				  "from %p to %p failed with error %d\n",
1553*a9934668SKenneth D. Merry 				  __func__, *data_ptrs[0], io_req->user_segptr,
1554*a9934668SKenneth D. Merry 				  error);
1555*a9934668SKenneth D. Merry 			goto bailout;
1556*a9934668SKenneth D. Merry 		}
1557*a9934668SKenneth D. Merry 
1558*a9934668SKenneth D. Merry 		if (num_segs_needed > PASS_MAX_SEGS) {
1559*a9934668SKenneth D. Merry 			io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) *
1560*a9934668SKenneth D. Merry 			    num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO);
1561*a9934668SKenneth D. Merry 			io_req->flags |= PASS_IO_KERN_SEG_MALLOC;
1562*a9934668SKenneth D. Merry 		} else {
1563*a9934668SKenneth D. Merry 			io_req->kern_segptr = io_req->kern_segs;
1564*a9934668SKenneth D. Merry 		}
1565*a9934668SKenneth D. Merry 
1566*a9934668SKenneth D. Merry 		/*
1567*a9934668SKenneth D. Merry 		 * Allocate the kernel S/G list.
1568*a9934668SKenneth D. Merry 		 */
1569*a9934668SKenneth D. Merry 		for (size_to_go = lengths[0], i = 0;
1570*a9934668SKenneth D. Merry 		     size_to_go > 0 && i < num_segs_needed;
1571*a9934668SKenneth D. Merry 		     i++, size_to_go -= alloc_size) {
1572*a9934668SKenneth D. Merry 			uint8_t *kern_ptr;
1573*a9934668SKenneth D. Merry 
1574*a9934668SKenneth D. Merry 			alloc_size = min(size_to_go, softc->io_zone_size);
1575*a9934668SKenneth D. Merry 			kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK);
1576*a9934668SKenneth D. Merry 			io_req->kern_segptr[i].ds_addr =
1577*a9934668SKenneth D. Merry 			    (bus_addr_t)(uintptr_t)kern_ptr;
1578*a9934668SKenneth D. Merry 			io_req->kern_segptr[i].ds_len = alloc_size;
1579*a9934668SKenneth D. Merry 		}
1580*a9934668SKenneth D. Merry 		if (size_to_go > 0) {
1581*a9934668SKenneth D. Merry 			printf("%s: size_to_go = %zu, software error!\n",
1582*a9934668SKenneth D. Merry 			       __func__, size_to_go);
1583*a9934668SKenneth D. Merry 			error = EINVAL;
1584*a9934668SKenneth D. Merry 			goto bailout;
1585*a9934668SKenneth D. Merry 		}
1586*a9934668SKenneth D. Merry 
1587*a9934668SKenneth D. Merry 		*data_ptrs[0] = (uint8_t *)io_req->kern_segptr;
1588*a9934668SKenneth D. Merry 		*seg_cnt_ptr = io_req->num_kern_segs;
1589*a9934668SKenneth D. Merry 
1590*a9934668SKenneth D. Merry 		/*
1591*a9934668SKenneth D. Merry 		 * We only need to copy data here if the user is writing.
1592*a9934668SKenneth D. Merry 		 */
1593*a9934668SKenneth D. Merry 		if (dirs[0] == CAM_DIR_OUT)
1594*a9934668SKenneth D. Merry 			error = passcopysglist(periph, io_req, dirs[0]);
1595*a9934668SKenneth D. Merry 		break;
1596*a9934668SKenneth D. Merry 	}
1597*a9934668SKenneth D. Merry 	case CAM_DATA_SG_PADDR: {
1598*a9934668SKenneth D. Merry 		size_t sg_length;
1599*a9934668SKenneth D. Merry 
1600*a9934668SKenneth D. Merry 		/*
1601*a9934668SKenneth D. Merry 		 * We shouldn't see this, but check just in case.
1602*a9934668SKenneth D. Merry 		 */
1603*a9934668SKenneth D. Merry 		if (numbufs != 1) {
1604*a9934668SKenneth D. Merry 			printf("%s: cannot currently handle more than one "
1605*a9934668SKenneth D. Merry 			       "S/G list per CCB\n", __func__);
1606*a9934668SKenneth D. Merry 			error = EINVAL;
1607*a9934668SKenneth D. Merry 			goto bailout;
1608*a9934668SKenneth D. Merry 		}
1609*a9934668SKenneth D. Merry 
1610*a9934668SKenneth D. Merry 		/*
1611*a9934668SKenneth D. Merry 		 * We have to have at least one segment.
1612*a9934668SKenneth D. Merry 		 */
1613*a9934668SKenneth D. Merry 		if (num_segs == 0) {
1614*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag "
1615*a9934668SKenneth D. Merry 				  "set, but sglist_cnt=0!\n", __func__);
1616*a9934668SKenneth D. Merry 			error = EINVAL;
1617*a9934668SKenneth D. Merry 			goto bailout;
1618*a9934668SKenneth D. Merry 		}
1619*a9934668SKenneth D. Merry 
1620*a9934668SKenneth D. Merry 		/*
1621*a9934668SKenneth D. Merry 		 * Make sure the user specified the total length and didn't
1622*a9934668SKenneth D. Merry 		 * just leave it to us to decode the S/G list.
1623*a9934668SKenneth D. Merry 		 */
1624*a9934668SKenneth D. Merry 		if (lengths[0] == 0) {
1625*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: no dxfer_len specified, "
1626*a9934668SKenneth D. Merry 				  "but CAM_DATA_SG flag is set!\n", __func__);
1627*a9934668SKenneth D. Merry 			error = EINVAL;
1628*a9934668SKenneth D. Merry 			goto bailout;
1629*a9934668SKenneth D. Merry 		}
1630*a9934668SKenneth D. Merry 
1631*a9934668SKenneth D. Merry 		/* Figure out the size of the S/G list */
1632*a9934668SKenneth D. Merry 		sg_length = num_segs * sizeof(bus_dma_segment_t);
1633*a9934668SKenneth D. Merry 		io_req->num_user_segs = num_segs;
1634*a9934668SKenneth D. Merry 		io_req->num_kern_segs = io_req->num_user_segs;
1635*a9934668SKenneth D. Merry 
1636*a9934668SKenneth D. Merry 		/* Save the user's S/G list pointer for later restoration */
1637*a9934668SKenneth D. Merry 		io_req->user_bufs[0] = *data_ptrs[0];
1638*a9934668SKenneth D. Merry 
1639*a9934668SKenneth D. Merry 		if (num_segs > PASS_MAX_SEGS) {
1640*a9934668SKenneth D. Merry 			io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1641*a9934668SKenneth D. Merry 			    num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
1642*a9934668SKenneth D. Merry 			io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1643*a9934668SKenneth D. Merry 		} else
1644*a9934668SKenneth D. Merry 			io_req->user_segptr = io_req->user_segs;
1645*a9934668SKenneth D. Merry 
1646*a9934668SKenneth D. Merry 		io_req->kern_segptr = io_req->user_segptr;
1647*a9934668SKenneth D. Merry 
1648*a9934668SKenneth D. Merry 		error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1649*a9934668SKenneth D. Merry 		if (error != 0) {
1650*a9934668SKenneth D. Merry 			xpt_print(periph->path, "%s: copy of user S/G list "
1651*a9934668SKenneth D. Merry 				  "from %p to %p failed with error %d\n",
1652*a9934668SKenneth D. Merry 				  __func__, *data_ptrs[0], io_req->user_segptr,
1653*a9934668SKenneth D. Merry 				  error);
1654*a9934668SKenneth D. Merry 			goto bailout;
1655*a9934668SKenneth D. Merry 		}
1656*a9934668SKenneth D. Merry 		break;
1657*a9934668SKenneth D. Merry 	}
1658*a9934668SKenneth D. Merry 	default:
1659*a9934668SKenneth D. Merry 	case CAM_DATA_BIO:
1660*a9934668SKenneth D. Merry 		/*
1661*a9934668SKenneth D. Merry 		 * A user shouldn't be attaching a bio to the CCB.  It
1662*a9934668SKenneth D. Merry 		 * isn't a user-accessible structure.
1663*a9934668SKenneth D. Merry 		 */
1664*a9934668SKenneth D. Merry 		error = EINVAL;
1665*a9934668SKenneth D. Merry 		break;
1666*a9934668SKenneth D. Merry 	}
1667*a9934668SKenneth D. Merry 
1668*a9934668SKenneth D. Merry bailout:
1669*a9934668SKenneth D. Merry 	if (error != 0)
1670*a9934668SKenneth D. Merry 		passiocleanup(softc, io_req);
1671*a9934668SKenneth D. Merry 
1672*a9934668SKenneth D. Merry 	return (error);
1673*a9934668SKenneth D. Merry }
1674*a9934668SKenneth D. Merry 
1675*a9934668SKenneth D. Merry static int
1676*a9934668SKenneth D. Merry passmemdone(struct cam_periph *periph, struct pass_io_req *io_req)
1677*a9934668SKenneth D. Merry {
1678*a9934668SKenneth D. Merry 	struct pass_softc *softc;
1679*a9934668SKenneth D. Merry 	union ccb *ccb;
1680*a9934668SKenneth D. Merry 	int error;
1681*a9934668SKenneth D. Merry 	int i;
1682*a9934668SKenneth D. Merry 
1683*a9934668SKenneth D. Merry 	error = 0;
1684*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
1685*a9934668SKenneth D. Merry 	ccb = &io_req->ccb;
1686*a9934668SKenneth D. Merry 
1687*a9934668SKenneth D. Merry 	switch (io_req->data_flags) {
1688*a9934668SKenneth D. Merry 	case CAM_DATA_VADDR:
1689*a9934668SKenneth D. Merry 		/*
1690*a9934668SKenneth D. Merry 		 * Copy back to the user buffer if this was a read.
1691*a9934668SKenneth D. Merry 		 */
1692*a9934668SKenneth D. Merry 		for (i = 0; i < io_req->num_bufs; i++) {
1693*a9934668SKenneth D. Merry 			if (io_req->dirs[i] != CAM_DIR_IN)
1694*a9934668SKenneth D. Merry 				continue;
1695*a9934668SKenneth D. Merry 
1696*a9934668SKenneth D. Merry 			error = copyout(io_req->kern_bufs[i],
1697*a9934668SKenneth D. Merry 			    io_req->user_bufs[i], io_req->lengths[i]);
1698*a9934668SKenneth D. Merry 			if (error != 0) {
1699*a9934668SKenneth D. Merry 				xpt_print(periph->path, "Unable to copy %u "
1700*a9934668SKenneth D. Merry 					  "bytes from %p to user address %p\n",
1701*a9934668SKenneth D. Merry 					  io_req->lengths[i],
1702*a9934668SKenneth D. Merry 					  io_req->kern_bufs[i],
1703*a9934668SKenneth D. Merry 					  io_req->user_bufs[i]);
1704*a9934668SKenneth D. Merry 				goto bailout;
1705*a9934668SKenneth D. Merry 			}
1706*a9934668SKenneth D. Merry 
1707*a9934668SKenneth D. Merry 		}
1708*a9934668SKenneth D. Merry 		break;
1709*a9934668SKenneth D. Merry 	case CAM_DATA_PADDR:
1710*a9934668SKenneth D. Merry 		/* Do nothing.  The pointer is a physical address already */
1711*a9934668SKenneth D. Merry 		break;
1712*a9934668SKenneth D. Merry 	case CAM_DATA_SG:
1713*a9934668SKenneth D. Merry 		/*
1714*a9934668SKenneth D. Merry 		 * Copy back to the user buffer if this was a read.
1715*a9934668SKenneth D. Merry 		 * Restore the user's S/G list buffer pointer.
1716*a9934668SKenneth D. Merry 		 */
1717*a9934668SKenneth D. Merry 		if (io_req->dirs[0] == CAM_DIR_IN)
1718*a9934668SKenneth D. Merry 			error = passcopysglist(periph, io_req, io_req->dirs[0]);
1719*a9934668SKenneth D. Merry 		break;
1720*a9934668SKenneth D. Merry 	case CAM_DATA_SG_PADDR:
1721*a9934668SKenneth D. Merry 		/*
1722*a9934668SKenneth D. Merry 		 * Restore the user's S/G list buffer pointer.  No need to
1723*a9934668SKenneth D. Merry 		 * copy.
1724*a9934668SKenneth D. Merry 		 */
1725*a9934668SKenneth D. Merry 		break;
1726*a9934668SKenneth D. Merry 	default:
1727*a9934668SKenneth D. Merry 	case CAM_DATA_BIO:
1728*a9934668SKenneth D. Merry 		error = EINVAL;
1729*a9934668SKenneth D. Merry 		break;
1730*a9934668SKenneth D. Merry 	}
1731*a9934668SKenneth D. Merry 
1732*a9934668SKenneth D. Merry bailout:
1733*a9934668SKenneth D. Merry 	/*
1734*a9934668SKenneth D. Merry 	 * Reset the user's pointers to their original values and free
1735*a9934668SKenneth D. Merry 	 * allocated memory.
1736*a9934668SKenneth D. Merry 	 */
1737*a9934668SKenneth D. Merry 	passiocleanup(softc, io_req);
1738*a9934668SKenneth D. Merry 
1739*a9934668SKenneth D. Merry 	return (error);
1740*a9934668SKenneth D. Merry }
1741*a9934668SKenneth D. Merry 
174276babe50SJustin T. Gibbs static int
174389c9c53dSPoul-Henning Kamp passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
174476babe50SJustin T. Gibbs {
174525a2902cSScott Long 	int error;
174625a2902cSScott Long 
174725a2902cSScott Long 	if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
1748f564de00SScott Long 		error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl);
174925a2902cSScott Long 	}
175025a2902cSScott Long 	return (error);
175125a2902cSScott Long }
175225a2902cSScott Long 
175325a2902cSScott Long static int
175425a2902cSScott Long passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
175525a2902cSScott Long {
175676babe50SJustin T. Gibbs 	struct	cam_periph *periph;
1757*a9934668SKenneth D. Merry 	struct	pass_softc *softc;
175876babe50SJustin T. Gibbs 	int	error;
17598cff7eb8SAlexander Motin 	uint32_t priority;
176076babe50SJustin T. Gibbs 
1761e2a5fdf9SNate Lawson 	periph = (struct cam_periph *)dev->si_drv1;
176276babe50SJustin T. Gibbs 	if (periph == NULL)
176376babe50SJustin T. Gibbs 		return(ENXIO);
176476babe50SJustin T. Gibbs 
17652b83592fSScott Long 	cam_periph_lock(periph);
1766*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
176776babe50SJustin T. Gibbs 
176876babe50SJustin T. Gibbs 	error = 0;
176976babe50SJustin T. Gibbs 
177076babe50SJustin T. Gibbs 	switch (cmd) {
177176babe50SJustin T. Gibbs 
177276babe50SJustin T. Gibbs 	case CAMIOCOMMAND:
177376babe50SJustin T. Gibbs 	{
177476babe50SJustin T. Gibbs 		union ccb *inccb;
177576babe50SJustin T. Gibbs 		union ccb *ccb;
17769deea857SKenneth D. Merry 		int ccb_malloced;
177776babe50SJustin T. Gibbs 
177876babe50SJustin T. Gibbs 		inccb = (union ccb *)addr;
17799deea857SKenneth D. Merry 
17809deea857SKenneth D. Merry 		/*
17819deea857SKenneth D. Merry 		 * Some CCB types, like scan bus and scan lun can only go
17829deea857SKenneth D. Merry 		 * through the transport layer device.
17839deea857SKenneth D. Merry 		 */
17849deea857SKenneth D. Merry 		if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
1785f0d9af51SMatt Jacob 			xpt_print(periph->path, "CCB function code %#x is "
1786f0d9af51SMatt Jacob 			    "restricted to the XPT device\n",
1787f0d9af51SMatt Jacob 			    inccb->ccb_h.func_code);
17889deea857SKenneth D. Merry 			error = ENODEV;
17899deea857SKenneth D. Merry 			break;
17909deea857SKenneth D. Merry 		}
17919deea857SKenneth D. Merry 
17928cff7eb8SAlexander Motin 		/* Compatibility for RL/priority-unaware code. */
17938cff7eb8SAlexander Motin 		priority = inccb->ccb_h.pinfo.priority;
1794cccf4220SAlexander Motin 		if (priority <= CAM_PRIORITY_OOB)
1795cccf4220SAlexander Motin 		    priority += CAM_PRIORITY_OOB + 1;
17968cff7eb8SAlexander Motin 
17979deea857SKenneth D. Merry 		/*
17989deea857SKenneth D. Merry 		 * Non-immediate CCBs need a CCB from the per-device pool
17999deea857SKenneth D. Merry 		 * of CCBs, which is scheduled by the transport layer.
18009deea857SKenneth D. Merry 		 * Immediate CCBs and user-supplied CCBs should just be
18019deea857SKenneth D. Merry 		 * malloced.
18029deea857SKenneth D. Merry 		 */
18039deea857SKenneth D. Merry 		if ((inccb->ccb_h.func_code & XPT_FC_QUEUED)
18049deea857SKenneth D. Merry 		 && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) {
18058cff7eb8SAlexander Motin 			ccb = cam_periph_getccb(periph, priority);
18069deea857SKenneth D. Merry 			ccb_malloced = 0;
18079deea857SKenneth D. Merry 		} else {
18088008a935SScott Long 			ccb = xpt_alloc_ccb_nowait();
18099deea857SKenneth D. Merry 
18109deea857SKenneth D. Merry 			if (ccb != NULL)
18119deea857SKenneth D. Merry 				xpt_setup_ccb(&ccb->ccb_h, periph->path,
18128cff7eb8SAlexander Motin 					      priority);
18139deea857SKenneth D. Merry 			ccb_malloced = 1;
18149deea857SKenneth D. Merry 		}
18159deea857SKenneth D. Merry 
18169deea857SKenneth D. Merry 		if (ccb == NULL) {
1817f0d9af51SMatt Jacob 			xpt_print(periph->path, "unable to allocate CCB\n");
18189deea857SKenneth D. Merry 			error = ENOMEM;
18199deea857SKenneth D. Merry 			break;
18209deea857SKenneth D. Merry 		}
182176babe50SJustin T. Gibbs 
182276babe50SJustin T. Gibbs 		error = passsendccb(periph, ccb, inccb);
182376babe50SJustin T. Gibbs 
18249deea857SKenneth D. Merry 		if (ccb_malloced)
18259deea857SKenneth D. Merry 			xpt_free_ccb(ccb);
18269deea857SKenneth D. Merry 		else
182776babe50SJustin T. Gibbs 			xpt_release_ccb(ccb);
182876babe50SJustin T. Gibbs 
182976babe50SJustin T. Gibbs 		break;
183076babe50SJustin T. Gibbs 	}
1831*a9934668SKenneth D. Merry 	case CAMIOQUEUE:
1832*a9934668SKenneth D. Merry 	{
1833*a9934668SKenneth D. Merry 		struct pass_io_req *io_req;
1834*a9934668SKenneth D. Merry 		union ccb **user_ccb, *ccb;
1835*a9934668SKenneth D. Merry 		xpt_opcode fc;
1836*a9934668SKenneth D. Merry 
1837*a9934668SKenneth D. Merry 		if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) {
1838*a9934668SKenneth D. Merry 			error = passcreatezone(periph);
1839*a9934668SKenneth D. Merry 			if (error != 0)
1840*a9934668SKenneth D. Merry 				goto bailout;
1841*a9934668SKenneth D. Merry 		}
1842*a9934668SKenneth D. Merry 
1843*a9934668SKenneth D. Merry 		/*
1844*a9934668SKenneth D. Merry 		 * We're going to do a blocking allocation for this I/O
1845*a9934668SKenneth D. Merry 		 * request, so we have to drop the lock.
1846*a9934668SKenneth D. Merry 		 */
1847*a9934668SKenneth D. Merry 		cam_periph_unlock(periph);
1848*a9934668SKenneth D. Merry 
1849*a9934668SKenneth D. Merry 		io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO);
1850*a9934668SKenneth D. Merry 		ccb = &io_req->ccb;
1851*a9934668SKenneth D. Merry 		user_ccb = (union ccb **)addr;
1852*a9934668SKenneth D. Merry 
1853*a9934668SKenneth D. Merry 		/*
1854*a9934668SKenneth D. Merry 		 * Unlike the CAMIOCOMMAND ioctl above, we only have a
1855*a9934668SKenneth D. Merry 		 * pointer to the user's CCB, so we have to copy the whole
1856*a9934668SKenneth D. Merry 		 * thing in to a buffer we have allocated (above) instead
1857*a9934668SKenneth D. Merry 		 * of allowing the ioctl code to malloc a buffer and copy
1858*a9934668SKenneth D. Merry 		 * it in.
1859*a9934668SKenneth D. Merry 		 *
1860*a9934668SKenneth D. Merry 		 * This is an advantage for this asynchronous interface,
1861*a9934668SKenneth D. Merry 		 * since we don't want the memory to get freed while the
1862*a9934668SKenneth D. Merry 		 * CCB is outstanding.
1863*a9934668SKenneth D. Merry 		 */
1864*a9934668SKenneth D. Merry #if 0
1865*a9934668SKenneth D. Merry 		xpt_print(periph->path, "Copying user CCB %p to "
1866*a9934668SKenneth D. Merry 			  "kernel address %p\n", *user_ccb, ccb);
1867*a9934668SKenneth D. Merry #endif
1868*a9934668SKenneth D. Merry 		error = copyin(*user_ccb, ccb, sizeof(*ccb));
1869*a9934668SKenneth D. Merry 		if (error != 0) {
1870*a9934668SKenneth D. Merry 			xpt_print(periph->path, "Copy of user CCB %p to "
1871*a9934668SKenneth D. Merry 				  "kernel address %p failed with error %d\n",
1872*a9934668SKenneth D. Merry 				  *user_ccb, ccb, error);
1873*a9934668SKenneth D. Merry 			uma_zfree(softc->pass_zone, io_req);
1874*a9934668SKenneth D. Merry 			cam_periph_lock(periph);
1875*a9934668SKenneth D. Merry 			break;
1876*a9934668SKenneth D. Merry 		}
1877*a9934668SKenneth D. Merry 
1878*a9934668SKenneth D. Merry 		/*
1879*a9934668SKenneth D. Merry 		 * Some CCB types, like scan bus and scan lun can only go
1880*a9934668SKenneth D. Merry 		 * through the transport layer device.
1881*a9934668SKenneth D. Merry 		 */
1882*a9934668SKenneth D. Merry 		if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
1883*a9934668SKenneth D. Merry 			xpt_print(periph->path, "CCB function code %#x is "
1884*a9934668SKenneth D. Merry 			    "restricted to the XPT device\n",
1885*a9934668SKenneth D. Merry 			    ccb->ccb_h.func_code);
1886*a9934668SKenneth D. Merry 			uma_zfree(softc->pass_zone, io_req);
1887*a9934668SKenneth D. Merry 			cam_periph_lock(periph);
1888*a9934668SKenneth D. Merry 			error = ENODEV;
1889*a9934668SKenneth D. Merry 			break;
1890*a9934668SKenneth D. Merry 		}
1891*a9934668SKenneth D. Merry 
1892*a9934668SKenneth D. Merry 		/*
1893*a9934668SKenneth D. Merry 		 * Save the user's CCB pointer as well as his linked list
1894*a9934668SKenneth D. Merry 		 * pointers and peripheral private area so that we can
1895*a9934668SKenneth D. Merry 		 * restore these later.
1896*a9934668SKenneth D. Merry 		 */
1897*a9934668SKenneth D. Merry 		io_req->user_ccb_ptr = *user_ccb;
1898*a9934668SKenneth D. Merry 		io_req->user_periph_links = ccb->ccb_h.periph_links;
1899*a9934668SKenneth D. Merry 		io_req->user_periph_priv = ccb->ccb_h.periph_priv;
1900*a9934668SKenneth D. Merry 
1901*a9934668SKenneth D. Merry 		/*
1902*a9934668SKenneth D. Merry 		 * Now that we've saved the user's values, we can set our
1903*a9934668SKenneth D. Merry 		 * own peripheral private entry.
1904*a9934668SKenneth D. Merry 		 */
1905*a9934668SKenneth D. Merry 		ccb->ccb_h.ccb_ioreq = io_req;
1906*a9934668SKenneth D. Merry 
1907*a9934668SKenneth D. Merry 		/* Compatibility for RL/priority-unaware code. */
1908*a9934668SKenneth D. Merry 		priority = ccb->ccb_h.pinfo.priority;
1909*a9934668SKenneth D. Merry 		if (priority <= CAM_PRIORITY_OOB)
1910*a9934668SKenneth D. Merry 		    priority += CAM_PRIORITY_OOB + 1;
1911*a9934668SKenneth D. Merry 
1912*a9934668SKenneth D. Merry 		/*
1913*a9934668SKenneth D. Merry 		 * Setup fields in the CCB like the path and the priority.
1914*a9934668SKenneth D. Merry 		 * The path in particular cannot be done in userland, since
1915*a9934668SKenneth D. Merry 		 * it is a pointer to a kernel data structure.
1916*a9934668SKenneth D. Merry 		 */
1917*a9934668SKenneth D. Merry 		xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority,
1918*a9934668SKenneth D. Merry 				    ccb->ccb_h.flags);
1919*a9934668SKenneth D. Merry 
1920*a9934668SKenneth D. Merry 		/*
1921*a9934668SKenneth D. Merry 		 * Setup our done routine.  There is no way for the user to
1922*a9934668SKenneth D. Merry 		 * have a valid pointer here.
1923*a9934668SKenneth D. Merry 		 */
1924*a9934668SKenneth D. Merry 		ccb->ccb_h.cbfcnp = passdone;
1925*a9934668SKenneth D. Merry 
1926*a9934668SKenneth D. Merry 		fc = ccb->ccb_h.func_code;
1927*a9934668SKenneth D. Merry 		/*
1928*a9934668SKenneth D. Merry 		 * If this function code has memory that can be mapped in
1929*a9934668SKenneth D. Merry 		 * or out, we need to call passmemsetup().
1930*a9934668SKenneth D. Merry 		 */
1931*a9934668SKenneth D. Merry 		if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO)
1932*a9934668SKenneth D. Merry 		 || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH)
1933*a9934668SKenneth D. Merry 		 || (fc == XPT_DEV_ADVINFO)) {
1934*a9934668SKenneth D. Merry 			error = passmemsetup(periph, io_req);
1935*a9934668SKenneth D. Merry 			if (error != 0) {
1936*a9934668SKenneth D. Merry 				uma_zfree(softc->pass_zone, io_req);
1937*a9934668SKenneth D. Merry 				cam_periph_lock(periph);
1938*a9934668SKenneth D. Merry 				break;
1939*a9934668SKenneth D. Merry 			}
1940*a9934668SKenneth D. Merry 		} else
1941*a9934668SKenneth D. Merry 			io_req->mapinfo.num_bufs_used = 0;
1942*a9934668SKenneth D. Merry 
1943*a9934668SKenneth D. Merry 		cam_periph_lock(periph);
1944*a9934668SKenneth D. Merry 
1945*a9934668SKenneth D. Merry 		/*
1946*a9934668SKenneth D. Merry 		 * Everything goes on the incoming queue initially.
1947*a9934668SKenneth D. Merry 		 */
1948*a9934668SKenneth D. Merry 		TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links);
1949*a9934668SKenneth D. Merry 
1950*a9934668SKenneth D. Merry 		/*
1951*a9934668SKenneth D. Merry 		 * If the CCB is queued, and is not a user CCB, then
1952*a9934668SKenneth D. Merry 		 * we need to allocate a slot for it.  Call xpt_schedule()
1953*a9934668SKenneth D. Merry 		 * so that our start routine will get called when a CCB is
1954*a9934668SKenneth D. Merry 		 * available.
1955*a9934668SKenneth D. Merry 		 */
1956*a9934668SKenneth D. Merry 		if ((fc & XPT_FC_QUEUED)
1957*a9934668SKenneth D. Merry 		 && ((fc & XPT_FC_USER_CCB) == 0)) {
1958*a9934668SKenneth D. Merry 			xpt_schedule(periph, priority);
1959*a9934668SKenneth D. Merry 			break;
1960*a9934668SKenneth D. Merry 		}
1961*a9934668SKenneth D. Merry 
1962*a9934668SKenneth D. Merry 		/*
1963*a9934668SKenneth D. Merry 		 * At this point, the CCB in question is either an
1964*a9934668SKenneth D. Merry 		 * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB
1965*a9934668SKenneth D. Merry 		 * and therefore should be malloced, not allocated via a slot.
1966*a9934668SKenneth D. Merry 		 * Remove the CCB from the incoming queue and add it to the
1967*a9934668SKenneth D. Merry 		 * active queue.
1968*a9934668SKenneth D. Merry 		 */
1969*a9934668SKenneth D. Merry 		TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
1970*a9934668SKenneth D. Merry 		TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
1971*a9934668SKenneth D. Merry 
1972*a9934668SKenneth D. Merry 		xpt_action(ccb);
1973*a9934668SKenneth D. Merry 
1974*a9934668SKenneth D. Merry 		/*
1975*a9934668SKenneth D. Merry 		 * If this is not a queued CCB (i.e. it is an immediate CCB),
1976*a9934668SKenneth D. Merry 		 * then it is already done.  We need to put it on the done
1977*a9934668SKenneth D. Merry 		 * queue for the user to fetch.
1978*a9934668SKenneth D. Merry 		 */
1979*a9934668SKenneth D. Merry 		if ((fc & XPT_FC_QUEUED) == 0) {
1980*a9934668SKenneth D. Merry 			TAILQ_REMOVE(&softc->active_queue, io_req, links);
1981*a9934668SKenneth D. Merry 			TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
1982*a9934668SKenneth D. Merry 		}
1983*a9934668SKenneth D. Merry 		break;
1984*a9934668SKenneth D. Merry 	}
1985*a9934668SKenneth D. Merry 	case CAMIOGET:
1986*a9934668SKenneth D. Merry 	{
1987*a9934668SKenneth D. Merry 		union ccb **user_ccb;
1988*a9934668SKenneth D. Merry 		struct pass_io_req *io_req;
1989*a9934668SKenneth D. Merry 		int old_error;
1990*a9934668SKenneth D. Merry 
1991*a9934668SKenneth D. Merry 		user_ccb = (union ccb **)addr;
1992*a9934668SKenneth D. Merry 		old_error = 0;
1993*a9934668SKenneth D. Merry 
1994*a9934668SKenneth D. Merry 		io_req = TAILQ_FIRST(&softc->done_queue);
1995*a9934668SKenneth D. Merry 		if (io_req == NULL) {
1996*a9934668SKenneth D. Merry 			error = ENOENT;
1997*a9934668SKenneth D. Merry 			break;
1998*a9934668SKenneth D. Merry 		}
1999*a9934668SKenneth D. Merry 
2000*a9934668SKenneth D. Merry 		/*
2001*a9934668SKenneth D. Merry 		 * Remove the I/O from the done queue.
2002*a9934668SKenneth D. Merry 		 */
2003*a9934668SKenneth D. Merry 		TAILQ_REMOVE(&softc->done_queue, io_req, links);
2004*a9934668SKenneth D. Merry 
2005*a9934668SKenneth D. Merry 		/*
2006*a9934668SKenneth D. Merry 		 * We have to drop the lock during the copyout because the
2007*a9934668SKenneth D. Merry 		 * copyout can result in VM faults that require sleeping.
2008*a9934668SKenneth D. Merry 		 */
2009*a9934668SKenneth D. Merry 		cam_periph_unlock(periph);
2010*a9934668SKenneth D. Merry 
2011*a9934668SKenneth D. Merry 		/*
2012*a9934668SKenneth D. Merry 		 * Do any needed copies (e.g. for reads) and revert the
2013*a9934668SKenneth D. Merry 		 * pointers in the CCB back to the user's pointers.
2014*a9934668SKenneth D. Merry 		 */
2015*a9934668SKenneth D. Merry 		error = passmemdone(periph, io_req);
2016*a9934668SKenneth D. Merry 
2017*a9934668SKenneth D. Merry 		old_error = error;
2018*a9934668SKenneth D. Merry 
2019*a9934668SKenneth D. Merry 		io_req->ccb.ccb_h.periph_links = io_req->user_periph_links;
2020*a9934668SKenneth D. Merry 		io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv;
2021*a9934668SKenneth D. Merry 
2022*a9934668SKenneth D. Merry #if 0
2023*a9934668SKenneth D. Merry 		xpt_print(periph->path, "Copying to user CCB %p from "
2024*a9934668SKenneth D. Merry 			  "kernel address %p\n", *user_ccb, &io_req->ccb);
2025*a9934668SKenneth D. Merry #endif
2026*a9934668SKenneth D. Merry 
2027*a9934668SKenneth D. Merry 		error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb));
2028*a9934668SKenneth D. Merry 		if (error != 0) {
2029*a9934668SKenneth D. Merry 			xpt_print(periph->path, "Copy to user CCB %p from "
2030*a9934668SKenneth D. Merry 				  "kernel address %p failed with error %d\n",
2031*a9934668SKenneth D. Merry 				  *user_ccb, &io_req->ccb, error);
2032*a9934668SKenneth D. Merry 		}
2033*a9934668SKenneth D. Merry 
2034*a9934668SKenneth D. Merry 		/*
2035*a9934668SKenneth D. Merry 		 * Prefer the first error we got back, and make sure we
2036*a9934668SKenneth D. Merry 		 * don't overwrite bad status with good.
2037*a9934668SKenneth D. Merry 		 */
2038*a9934668SKenneth D. Merry 		if (old_error != 0)
2039*a9934668SKenneth D. Merry 			error = old_error;
2040*a9934668SKenneth D. Merry 
2041*a9934668SKenneth D. Merry 		cam_periph_lock(periph);
2042*a9934668SKenneth D. Merry 
2043*a9934668SKenneth D. Merry 		/*
2044*a9934668SKenneth D. Merry 		 * At this point, if there was an error, we could potentially
2045*a9934668SKenneth D. Merry 		 * re-queue the I/O and try again.  But why?  The error
2046*a9934668SKenneth D. Merry 		 * would almost certainly happen again.  We might as well
2047*a9934668SKenneth D. Merry 		 * not leak memory.
2048*a9934668SKenneth D. Merry 		 */
2049*a9934668SKenneth D. Merry 		uma_zfree(softc->pass_zone, io_req);
2050*a9934668SKenneth D. Merry 		break;
2051*a9934668SKenneth D. Merry 	}
205276babe50SJustin T. Gibbs 	default:
205376babe50SJustin T. Gibbs 		error = cam_periph_ioctl(periph, cmd, addr, passerror);
205476babe50SJustin T. Gibbs 		break;
205576babe50SJustin T. Gibbs 	}
205676babe50SJustin T. Gibbs 
2057*a9934668SKenneth D. Merry bailout:
20582b83592fSScott Long 	cam_periph_unlock(periph);
2059*a9934668SKenneth D. Merry 
206076babe50SJustin T. Gibbs 	return(error);
206176babe50SJustin T. Gibbs }
206276babe50SJustin T. Gibbs 
2063*a9934668SKenneth D. Merry static int
2064*a9934668SKenneth D. Merry passpoll(struct cdev *dev, int poll_events, struct thread *td)
2065*a9934668SKenneth D. Merry {
2066*a9934668SKenneth D. Merry 	struct cam_periph *periph;
2067*a9934668SKenneth D. Merry 	struct pass_softc *softc;
2068*a9934668SKenneth D. Merry 	int revents;
2069*a9934668SKenneth D. Merry 
2070*a9934668SKenneth D. Merry 	periph = (struct cam_periph *)dev->si_drv1;
2071*a9934668SKenneth D. Merry 	if (periph == NULL)
2072*a9934668SKenneth D. Merry 		return (ENXIO);
2073*a9934668SKenneth D. Merry 
2074*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
2075*a9934668SKenneth D. Merry 
2076*a9934668SKenneth D. Merry 	revents = poll_events & (POLLOUT | POLLWRNORM);
2077*a9934668SKenneth D. Merry 	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
2078*a9934668SKenneth D. Merry 		cam_periph_lock(periph);
2079*a9934668SKenneth D. Merry 
2080*a9934668SKenneth D. Merry 		if (!TAILQ_EMPTY(&softc->done_queue)) {
2081*a9934668SKenneth D. Merry 			revents |= poll_events & (POLLIN | POLLRDNORM);
2082*a9934668SKenneth D. Merry 		}
2083*a9934668SKenneth D. Merry 		cam_periph_unlock(periph);
2084*a9934668SKenneth D. Merry 		if (revents == 0)
2085*a9934668SKenneth D. Merry 			selrecord(td, &softc->read_select);
2086*a9934668SKenneth D. Merry 	}
2087*a9934668SKenneth D. Merry 
2088*a9934668SKenneth D. Merry 	return (revents);
2089*a9934668SKenneth D. Merry }
2090*a9934668SKenneth D. Merry 
2091*a9934668SKenneth D. Merry static int
2092*a9934668SKenneth D. Merry passkqfilter(struct cdev *dev, struct knote *kn)
2093*a9934668SKenneth D. Merry {
2094*a9934668SKenneth D. Merry 	struct cam_periph *periph;
2095*a9934668SKenneth D. Merry 	struct pass_softc *softc;
2096*a9934668SKenneth D. Merry 
2097*a9934668SKenneth D. Merry 	periph = (struct cam_periph *)dev->si_drv1;
2098*a9934668SKenneth D. Merry 	if (periph == NULL)
2099*a9934668SKenneth D. Merry 		return (ENXIO);
2100*a9934668SKenneth D. Merry 
2101*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
2102*a9934668SKenneth D. Merry 
2103*a9934668SKenneth D. Merry 	kn->kn_hook = (caddr_t)periph;
2104*a9934668SKenneth D. Merry 	kn->kn_fop = &passread_filtops;
2105*a9934668SKenneth D. Merry 	knlist_add(&softc->read_select.si_note, kn, 0);
2106*a9934668SKenneth D. Merry 
2107*a9934668SKenneth D. Merry 	return (0);
2108*a9934668SKenneth D. Merry }
2109*a9934668SKenneth D. Merry 
2110*a9934668SKenneth D. Merry static void
2111*a9934668SKenneth D. Merry passreadfiltdetach(struct knote *kn)
2112*a9934668SKenneth D. Merry {
2113*a9934668SKenneth D. Merry 	struct cam_periph *periph;
2114*a9934668SKenneth D. Merry 	struct pass_softc *softc;
2115*a9934668SKenneth D. Merry 
2116*a9934668SKenneth D. Merry 	periph = (struct cam_periph *)kn->kn_hook;
2117*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
2118*a9934668SKenneth D. Merry 
2119*a9934668SKenneth D. Merry 	knlist_remove(&softc->read_select.si_note, kn, 0);
2120*a9934668SKenneth D. Merry }
2121*a9934668SKenneth D. Merry 
2122*a9934668SKenneth D. Merry static int
2123*a9934668SKenneth D. Merry passreadfilt(struct knote *kn, long hint)
2124*a9934668SKenneth D. Merry {
2125*a9934668SKenneth D. Merry 	struct cam_periph *periph;
2126*a9934668SKenneth D. Merry 	struct pass_softc *softc;
2127*a9934668SKenneth D. Merry 	int retval;
2128*a9934668SKenneth D. Merry 
2129*a9934668SKenneth D. Merry 	periph = (struct cam_periph *)kn->kn_hook;
2130*a9934668SKenneth D. Merry 	softc = (struct pass_softc *)periph->softc;
2131*a9934668SKenneth D. Merry 
2132*a9934668SKenneth D. Merry 	cam_periph_assert(periph, MA_OWNED);
2133*a9934668SKenneth D. Merry 
2134*a9934668SKenneth D. Merry 	if (TAILQ_EMPTY(&softc->done_queue))
2135*a9934668SKenneth D. Merry 		retval = 0;
2136*a9934668SKenneth D. Merry 	else
2137*a9934668SKenneth D. Merry 		retval = 1;
2138*a9934668SKenneth D. Merry 
2139*a9934668SKenneth D. Merry 	return (retval);
2140*a9934668SKenneth D. Merry }
2141*a9934668SKenneth D. Merry 
214276babe50SJustin T. Gibbs /*
214376babe50SJustin T. Gibbs  * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
214476babe50SJustin T. Gibbs  * should be the CCB that is copied in from the user.
214576babe50SJustin T. Gibbs  */
214676babe50SJustin T. Gibbs static int
214776babe50SJustin T. Gibbs passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
214876babe50SJustin T. Gibbs {
214976babe50SJustin T. Gibbs 	struct pass_softc *softc;
215076babe50SJustin T. Gibbs 	struct cam_periph_map_info mapinfo;
215195fbded6SScott Long 	xpt_opcode fc;
215295fbded6SScott Long 	int error;
215376babe50SJustin T. Gibbs 
215476babe50SJustin T. Gibbs 	softc = (struct pass_softc *)periph->softc;
215576babe50SJustin T. Gibbs 
215676babe50SJustin T. Gibbs 	/*
215776babe50SJustin T. Gibbs 	 * There are some fields in the CCB header that need to be
215876babe50SJustin T. Gibbs 	 * preserved, the rest we get from the user.
215976babe50SJustin T. Gibbs 	 */
216076babe50SJustin T. Gibbs 	xpt_merge_ccb(ccb, inccb);
216176babe50SJustin T. Gibbs 
216276babe50SJustin T. Gibbs 	/*
2163*a9934668SKenneth D. Merry 	 */
2164*a9934668SKenneth D. Merry 	ccb->ccb_h.cbfcnp = passdone;
2165*a9934668SKenneth D. Merry 
2166*a9934668SKenneth D. Merry 	/*
216795fbded6SScott Long 	 * Let cam_periph_mapmem do a sanity check on the data pointer format.
216895fbded6SScott Long 	 * Even if no data transfer is needed, it's a cheap check and it
216995fbded6SScott Long 	 * simplifies the code.
217076babe50SJustin T. Gibbs 	 */
217195fbded6SScott Long 	fc = ccb->ccb_h.func_code;
217295fbded6SScott Long 	if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO)
217395fbded6SScott Long 	 || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO)) {
217476babe50SJustin T. Gibbs 		bzero(&mapinfo, sizeof(mapinfo));
217576babe50SJustin T. Gibbs 
21762b83592fSScott Long 		/*
21772b83592fSScott Long 		 * cam_periph_mapmem calls into proc and vm functions that can
21782b83592fSScott Long 		 * sleep as well as trigger I/O, so we can't hold the lock.
21792b83592fSScott Long 		 * Dropping it here is reasonably safe.
21802b83592fSScott Long 		 */
21812b83592fSScott Long 		cam_periph_unlock(periph);
2182de239312SAlexander Motin 		error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio);
21832b83592fSScott Long 		cam_periph_lock(periph);
218476babe50SJustin T. Gibbs 
218576babe50SJustin T. Gibbs 		/*
218676babe50SJustin T. Gibbs 		 * cam_periph_mapmem returned an error, we can't continue.
218776babe50SJustin T. Gibbs 		 * Return the error to the user.
218876babe50SJustin T. Gibbs 		 */
218976babe50SJustin T. Gibbs 		if (error)
219076babe50SJustin T. Gibbs 			return(error);
219195fbded6SScott Long 	} else
219295fbded6SScott Long 		/* Ensure that the unmap call later on is a no-op. */
219395fbded6SScott Long 		mapinfo.num_bufs_used = 0;
219476babe50SJustin T. Gibbs 
219576babe50SJustin T. Gibbs 	/*
219676babe50SJustin T. Gibbs 	 * If the user wants us to perform any error recovery, then honor
219776babe50SJustin T. Gibbs 	 * that request.  Otherwise, it's up to the user to perform any
219876babe50SJustin T. Gibbs 	 * error recovery.
219976babe50SJustin T. Gibbs 	 */
22000191d9b3SAlexander Motin 	cam_periph_runccb(ccb, passerror, /* cam_flags */ CAM_RETRY_SELTO,
22010191d9b3SAlexander Motin 	    /* sense_flags */ ((ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ?
22020191d9b3SAlexander Motin 	     SF_RETRY_UA : SF_NO_RECOVERY) | SF_NO_PRINT,
2203a9d2245eSPoul-Henning Kamp 	    softc->device_stats);
220476babe50SJustin T. Gibbs 
220576babe50SJustin T. Gibbs 	cam_periph_unmapmem(ccb, &mapinfo);
220676babe50SJustin T. Gibbs 
220776babe50SJustin T. Gibbs 	ccb->ccb_h.cbfcnp = NULL;
220876babe50SJustin T. Gibbs 	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
220976babe50SJustin T. Gibbs 	bcopy(ccb, inccb, sizeof(union ccb));
221076babe50SJustin T. Gibbs 
221183c5d981SAlexander Motin 	return(0);
221276babe50SJustin T. Gibbs }
221376babe50SJustin T. Gibbs 
221476babe50SJustin T. Gibbs static int
221576babe50SJustin T. Gibbs passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
221676babe50SJustin T. Gibbs {
221776babe50SJustin T. Gibbs 	struct cam_periph *periph;
221876babe50SJustin T. Gibbs 	struct pass_softc *softc;
221976babe50SJustin T. Gibbs 
222076babe50SJustin T. Gibbs 	periph = xpt_path_periph(ccb->ccb_h.path);
222176babe50SJustin T. Gibbs 	softc = (struct pass_softc *)periph->softc;
222276babe50SJustin T. Gibbs 
222376babe50SJustin T. Gibbs 	return(cam_periph_error(ccb, cam_flags, sense_flags,
222476babe50SJustin T. Gibbs 				 &softc->saved_ccb));
222576babe50SJustin T. Gibbs }
2226