xref: /freebsd/sys/cam/ctl/scsi_ctl.c (revision 076ad2f836d5f49dc1375f1677335a48fe0d4b82)
1 /*-
2  * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
3  * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    substantially similar to the "NO WARRANTY" disclaimer below
14  *    ("Disclaimer") and any redistribution must be conditioned upon
15  *    including a substantially similar Disclaimer requirement for further
16  *    binary redistribution.
17  *
18  * NO WARRANTY
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
28  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGES.
30  *
31  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
32  */
33 /*
34  * Peripheral driver interface between CAM and CTL (CAM Target Layer).
35  *
36  * Author: Ken Merry <ken@FreeBSD.org>
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/queue.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/condvar.h>
49 #include <sys/malloc.h>
50 #include <sys/bus.h>
51 #include <sys/endian.h>
52 #include <sys/sbuf.h>
53 #include <sys/sysctl.h>
54 #include <sys/types.h>
55 #include <sys/systm.h>
56 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 
59 #include <cam/cam.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_periph.h>
62 #include <cam/cam_queue.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt.h>
67 
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70 
71 #include <cam/ctl/ctl_io.h>
72 #include <cam/ctl/ctl.h>
73 #include <cam/ctl/ctl_frontend.h>
74 #include <cam/ctl/ctl_util.h>
75 #include <cam/ctl/ctl_error.h>
76 
77 struct ctlfe_softc {
78 	struct ctl_port	port;
79 	path_id_t	path_id;
80 	target_id_t	target_id;
81 	uint32_t	hba_misc;
82 	u_int		maxio;
83 	struct cam_sim *sim;
84 	char		port_name[DEV_IDLEN];
85 	struct mtx	lun_softc_mtx;
86 	STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
87 	STAILQ_ENTRY(ctlfe_softc) links;
88 };
89 
90 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
91 struct mtx ctlfe_list_mtx;
92 static char ctlfe_mtx_desc[] = "ctlfelist";
93 
94 typedef enum {
95 	CTLFE_LUN_NONE		= 0x00,
96 	CTLFE_LUN_WILDCARD	= 0x01
97 } ctlfe_lun_flags;
98 
99 struct ctlfe_lun_softc {
100 	struct ctlfe_softc *parent_softc;
101 	struct cam_periph *periph;
102 	ctlfe_lun_flags flags;
103 	int	 ctios_sent;		/* Number of active CTIOs */
104 	int	 refcount;		/* Number of active xpt_action() */
105 	int	 atios_alloced;		/* Number of ATIOs not freed */
106 	int	 inots_alloced;		/* Number of INOTs not freed */
107 	struct task	refdrain_task;
108 	TAILQ_HEAD(, ccb_hdr) work_queue;
109 	STAILQ_ENTRY(ctlfe_lun_softc) links;
110 };
111 
112 typedef enum {
113 	CTLFE_CMD_NONE		= 0x00,
114 	CTLFE_CMD_PIECEWISE	= 0x01
115 } ctlfe_cmd_flags;
116 
117 struct ctlfe_cmd_info {
118 	int cur_transfer_index;
119 	size_t cur_transfer_off;
120 	ctlfe_cmd_flags flags;
121 	/*
122 	 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
123 	 * bytes on amd64.  So with 32 elements, this is 256 bytes on
124 	 * i386 and 512 bytes on amd64.
125 	 */
126 #define CTLFE_MAX_SEGS	32
127 	bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS];
128 };
129 
130 /*
131  * When we register the adapter/bus, request that this many ctl_ios be
132  * allocated.  This should be the maximum supported by the adapter, but we
133  * currently don't have a way to get that back from the path inquiry.
134  * XXX KDM add that to the path inquiry.
135  */
136 #define	CTLFE_REQ_CTL_IO	4096
137 /*
138  * Number of Accept Target I/O CCBs to allocate and queue down to the
139  * adapter per LUN.
140  * XXX KDM should this be controlled by CTL?
141  */
142 #define	CTLFE_ATIO_PER_LUN	1024
143 /*
144  * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to
145  * allocate and queue down to the adapter per LUN.
146  * XXX KDM should this be controlled by CTL?
147  */
148 #define	CTLFE_IN_PER_LUN	1024
149 
150 /*
151  * Timeout (in seconds) on CTIO CCB doing DMA or sending status
152  */
153 #define	CTLFE_TIMEOUT	5
154 
155 /*
156  * Turn this on to enable extra debugging prints.
157  */
158 #if 0
159 #define	CTLFE_DEBUG
160 #endif
161 
162 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
163 
164 #define	io_ptr		ppriv_ptr0
165 
166 /* This is only used in the CTIO */
167 #define	ccb_atio	ppriv_ptr1
168 
169 #define PRIV_CCB(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0])
170 #define PRIV_INFO(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1])
171 
172 static int		ctlfeinitialize(void);
173 static int		ctlfeshutdown(void);
174 static periph_init_t	ctlfeperiphinit;
175 static void		ctlfeasync(void *callback_arg, uint32_t code,
176 				   struct cam_path *path, void *arg);
177 static periph_ctor_t	ctlferegister;
178 static periph_oninv_t	ctlfeoninvalidate;
179 static periph_dtor_t	ctlfecleanup;
180 static periph_start_t	ctlfestart;
181 static void		ctlfedone(struct cam_periph *periph,
182 				  union ccb *done_ccb);
183 
184 static void 		ctlfe_onoffline(void *arg, int online);
185 static void 		ctlfe_online(void *arg);
186 static void 		ctlfe_offline(void *arg);
187 static int 		ctlfe_lun_enable(void *arg, int lun_id);
188 static int 		ctlfe_lun_disable(void *arg, int lun_id);
189 static void		ctlfe_dump_sim(struct cam_sim *sim);
190 static void		ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
191 static void 		ctlfe_datamove(union ctl_io *io);
192 static void 		ctlfe_done(union ctl_io *io);
193 static void 		ctlfe_dump(void);
194 static void		ctlfe_free_ccb(struct cam_periph *periph,
195 			    union ccb *ccb);
196 static void		ctlfe_requeue_ccb(struct cam_periph *periph,
197 			    union ccb *ccb, int unlock);
198 
199 static struct periph_driver ctlfe_driver =
200 {
201 	ctlfeperiphinit, "ctl",
202 	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0,
203 	CAM_PERIPH_DRV_EARLY
204 };
205 
206 static struct ctl_frontend ctlfe_frontend =
207 {
208 	.name = "camtgt",
209 	.init = ctlfeinitialize,
210 	.fe_dump = ctlfe_dump,
211 	.shutdown = ctlfeshutdown,
212 };
213 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
214 
215 static int
216 ctlfeshutdown(void)
217 {
218 
219 	/* CAM does not support periph driver unregister now. */
220 	return (EBUSY);
221 }
222 
223 static int
224 ctlfeinitialize(void)
225 {
226 
227 	STAILQ_INIT(&ctlfe_softc_list);
228 	mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
229 	periphdriver_register(&ctlfe_driver);
230 	return (0);
231 }
232 
233 static void
234 ctlfeperiphinit(void)
235 {
236 	cam_status status;
237 
238 	status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
239 				    AC_CONTRACT, ctlfeasync, NULL, NULL);
240 	if (status != CAM_REQ_CMP) {
241 		printf("ctl: Failed to attach async callback due to CAM "
242 		       "status 0x%x!\n", status);
243 	}
244 }
245 
246 static void
247 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
248 {
249 	struct ctlfe_softc *softc;
250 
251 #ifdef CTLFEDEBUG
252 	printf("%s: entered\n", __func__);
253 #endif
254 
255 	mtx_lock(&ctlfe_list_mtx);
256 	STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
257 		if (softc->path_id == xpt_path_path_id(path))
258 			break;
259 	}
260 	mtx_unlock(&ctlfe_list_mtx);
261 
262 	/*
263 	 * When a new path gets registered, and it is capable of target
264 	 * mode, go ahead and attach.  Later on, we may need to be more
265 	 * selective, but for now this will be sufficient.
266  	 */
267 	switch (code) {
268 	case AC_PATH_REGISTERED: {
269 		struct ctl_port *port;
270 		struct ccb_pathinq *cpi;
271 		int retval;
272 
273 		cpi = (struct ccb_pathinq *)arg;
274 
275 		/* Don't attach if it doesn't support target mode */
276 		if ((cpi->target_sprt & PIT_PROCESSOR) == 0) {
277 #ifdef CTLFEDEBUG
278 			printf("%s: SIM %s%d doesn't support target mode\n",
279 			       __func__, cpi->dev_name, cpi->unit_number);
280 #endif
281 			break;
282 		}
283 
284 		if (softc != NULL) {
285 #ifdef CTLFEDEBUG
286 			printf("%s: CTL port for CAM path %u already exists\n",
287 			       __func__, xpt_path_path_id(path));
288 #endif
289 			break;
290 		}
291 
292 		/*
293 		 * We're in an interrupt context here, so we have to
294 		 * use M_NOWAIT.  Of course this means trouble if we
295 		 * can't allocate memory.
296 		 */
297 		softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO);
298 		if (softc == NULL) {
299 			printf("%s: unable to malloc %zd bytes for softc\n",
300 			       __func__, sizeof(*softc));
301 			return;
302 		}
303 
304 		softc->path_id = cpi->ccb_h.path_id;
305 		softc->target_id = cpi->initiator_id;
306 		softc->sim = xpt_path_sim(path);
307 		softc->hba_misc = cpi->hba_misc;
308 		if (cpi->maxio != 0)
309 			softc->maxio = cpi->maxio;
310 		else
311 			softc->maxio = DFLTPHYS;
312 		mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF);
313 		STAILQ_INIT(&softc->lun_softc_list);
314 
315 		port = &softc->port;
316 		port->frontend = &ctlfe_frontend;
317 
318 		/*
319 		 * XXX KDM should we be more accurate here ?
320 		 */
321 		if (cpi->transport == XPORT_FC)
322 			port->port_type = CTL_PORT_FC;
323 		else if (cpi->transport == XPORT_SAS)
324 			port->port_type = CTL_PORT_SAS;
325 		else
326 			port->port_type = CTL_PORT_SCSI;
327 
328 		/* XXX KDM what should the real number be here? */
329 		port->num_requested_ctl_io = CTLFE_REQ_CTL_IO;
330 		snprintf(softc->port_name, sizeof(softc->port_name),
331 			 "%s%d", cpi->dev_name, cpi->unit_number);
332 		/*
333 		 * XXX KDM it would be nice to allocate storage in the
334 		 * frontend structure itself.
335 	 	 */
336 		port->port_name = softc->port_name;
337 		port->physical_port = cpi->bus_id;
338 		port->virtual_port = 0;
339 		port->port_online = ctlfe_online;
340 		port->port_offline = ctlfe_offline;
341 		port->onoff_arg = softc;
342 		port->lun_enable = ctlfe_lun_enable;
343 		port->lun_disable = ctlfe_lun_disable;
344 		port->targ_lun_arg = softc;
345 		port->fe_datamove = ctlfe_datamove;
346 		port->fe_done = ctlfe_done;
347 		port->targ_port = -1;
348 
349 		retval = ctl_port_register(port);
350 		if (retval != 0) {
351 			printf("%s: ctl_port_register() failed with "
352 			       "error %d!\n", __func__, retval);
353 			mtx_destroy(&softc->lun_softc_mtx);
354 			free(softc, M_CTLFE);
355 			break;
356 		} else {
357 			mtx_lock(&ctlfe_list_mtx);
358 			STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links);
359 			mtx_unlock(&ctlfe_list_mtx);
360 		}
361 
362 		break;
363 	}
364 	case AC_PATH_DEREGISTERED: {
365 
366 		if (softc != NULL) {
367 			/*
368 			 * XXX KDM are we certain at this point that there
369 			 * are no outstanding commands for this frontend?
370 			 */
371 			mtx_lock(&ctlfe_list_mtx);
372 			STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc,
373 			    links);
374 			mtx_unlock(&ctlfe_list_mtx);
375 			ctl_port_deregister(&softc->port);
376 			mtx_destroy(&softc->lun_softc_mtx);
377 			free(softc, M_CTLFE);
378 		}
379 		break;
380 	}
381 	case AC_CONTRACT: {
382 		struct ac_contract *ac;
383 
384 		ac = (struct ac_contract *)arg;
385 
386 		switch (ac->contract_number) {
387 		case AC_CONTRACT_DEV_CHG: {
388 			struct ac_device_changed *dev_chg;
389 			int retval;
390 
391 			dev_chg = (struct ac_device_changed *)ac->contract_data;
392 
393 			printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n",
394 			       __func__, dev_chg->wwpn, dev_chg->port,
395 			       xpt_path_path_id(path), dev_chg->target,
396 			       (dev_chg->arrived == 0) ?  "left" : "arrived");
397 
398 			if (softc == NULL) {
399 				printf("%s: CTL port for CAM path %u not "
400 				       "found!\n", __func__,
401 				       xpt_path_path_id(path));
402 				break;
403 			}
404 			if (dev_chg->arrived != 0) {
405 				retval = ctl_add_initiator(&softc->port,
406 				    dev_chg->target, dev_chg->wwpn, NULL);
407 			} else {
408 				retval = ctl_remove_initiator(&softc->port,
409 				    dev_chg->target);
410 			}
411 
412 			if (retval < 0) {
413 				printf("%s: could not %s port %d iid %u "
414 				       "WWPN %#jx!\n", __func__,
415 				       (dev_chg->arrived != 0) ? "add" :
416 				       "remove", softc->port.targ_port,
417 				       dev_chg->target,
418 				       (uintmax_t)dev_chg->wwpn);
419 			}
420 			break;
421 		}
422 		default:
423 			printf("%s: unsupported contract number %ju\n",
424 			       __func__, (uintmax_t)ac->contract_number);
425 			break;
426 		}
427 		break;
428 	}
429 	default:
430 		break;
431 	}
432 }
433 
434 static cam_status
435 ctlferegister(struct cam_periph *periph, void *arg)
436 {
437 	struct ctlfe_softc *bus_softc;
438 	struct ctlfe_lun_softc *softc;
439 	union ccb en_lun_ccb;
440 	cam_status status;
441 	int i;
442 
443 	softc = (struct ctlfe_lun_softc *)arg;
444 	bus_softc = softc->parent_softc;
445 
446 	TAILQ_INIT(&softc->work_queue);
447 	softc->periph = periph;
448 	periph->softc = softc;
449 
450 	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
451 	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
452 	en_lun_ccb.cel.grp6_len = 0;
453 	en_lun_ccb.cel.grp7_len = 0;
454 	en_lun_ccb.cel.enable = 1;
455 	xpt_action(&en_lun_ccb);
456 	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
457 	if (status != CAM_REQ_CMP) {
458 		xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n",
459 			  __func__, en_lun_ccb.ccb_h.status);
460 		return (status);
461 	}
462 
463 	status = CAM_REQ_CMP;
464 
465 	for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
466 		union ccb *new_ccb;
467 		union ctl_io *new_io;
468 		struct ctlfe_cmd_info *cmd_info;
469 
470 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
471 					      M_ZERO|M_NOWAIT);
472 		if (new_ccb == NULL) {
473 			status = CAM_RESRC_UNAVAIL;
474 			break;
475 		}
476 		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
477 		if (new_io == NULL) {
478 			free(new_ccb, M_CTLFE);
479 			status = CAM_RESRC_UNAVAIL;
480 			break;
481 		}
482 		cmd_info = malloc(sizeof(*cmd_info), M_CTLFE,
483 		    M_ZERO | M_NOWAIT);
484 		if (cmd_info == NULL) {
485 			ctl_free_io(new_io);
486 			free(new_ccb, M_CTLFE);
487 			status = CAM_RESRC_UNAVAIL;
488 			break;
489 		}
490 		PRIV_INFO(new_io) = cmd_info;
491 		softc->atios_alloced++;
492 		new_ccb->ccb_h.io_ptr = new_io;
493 
494 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
495 		new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
496 		new_ccb->ccb_h.cbfcnp = ctlfedone;
497 		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
498 		xpt_action(new_ccb);
499 		status = new_ccb->ccb_h.status;
500 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
501 			free(cmd_info, M_CTLFE);
502 			ctl_free_io(new_io);
503 			free(new_ccb, M_CTLFE);
504 			break;
505 		}
506 	}
507 
508 	status = cam_periph_acquire(periph);
509 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
510 		xpt_print(periph->path, "%s: could not acquire reference "
511 			  "count, status = %#x\n", __func__, status);
512 		return (status);
513 	}
514 
515 	if (i == 0) {
516 		xpt_print(periph->path, "%s: could not allocate ATIO CCBs, "
517 			  "status 0x%x\n", __func__, status);
518 		return (CAM_REQ_CMP_ERR);
519 	}
520 
521 	for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
522 		union ccb *new_ccb;
523 		union ctl_io *new_io;
524 
525 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
526 					      M_ZERO|M_NOWAIT);
527 		if (new_ccb == NULL) {
528 			status = CAM_RESRC_UNAVAIL;
529 			break;
530 		}
531 		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
532 		if (new_io == NULL) {
533 			free(new_ccb, M_CTLFE);
534 			status = CAM_RESRC_UNAVAIL;
535 			break;
536 		}
537 		softc->inots_alloced++;
538 		new_ccb->ccb_h.io_ptr = new_io;
539 
540 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
541 		new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
542 		new_ccb->ccb_h.cbfcnp = ctlfedone;
543 		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
544 		xpt_action(new_ccb);
545 		status = new_ccb->ccb_h.status;
546 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
547 			/*
548 			 * Note that we don't free the CCB here.  If the
549 			 * status is not CAM_REQ_INPROG, then we're
550 			 * probably talking to a SIM that says it is
551 			 * target-capable but doesn't support the
552 			 * XPT_IMMEDIATE_NOTIFY CCB.  i.e. it supports the
553 			 * older API.  In that case, it'll call xpt_done()
554 			 * on the CCB, and we need to free it in our done
555 			 * routine as a result.
556 			 */
557 			break;
558 		}
559 	}
560 	if ((i == 0)
561 	 || (status != CAM_REQ_INPROG)) {
562 		xpt_print(periph->path, "%s: could not allocate immediate "
563 			  "notify CCBs, status 0x%x\n", __func__, status);
564 		return (CAM_REQ_CMP_ERR);
565 	}
566 	mtx_lock(&bus_softc->lun_softc_mtx);
567 	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
568 	mtx_unlock(&bus_softc->lun_softc_mtx);
569 	return (CAM_REQ_CMP);
570 }
571 
572 static void
573 ctlfeoninvalidate(struct cam_periph *periph)
574 {
575 	union ccb en_lun_ccb;
576 	cam_status status;
577 	struct ctlfe_softc *bus_softc;
578 	struct ctlfe_lun_softc *softc;
579 
580 	softc = (struct ctlfe_lun_softc *)periph->softc;
581 
582 	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
583 	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
584 	en_lun_ccb.cel.grp6_len = 0;
585 	en_lun_ccb.cel.grp7_len = 0;
586 	en_lun_ccb.cel.enable = 0;
587 	xpt_action(&en_lun_ccb);
588 	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
589 	if (status != CAM_REQ_CMP) {
590 		xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
591 			  __func__, en_lun_ccb.ccb_h.status);
592 		/*
593 		 * XXX KDM what do we do now?
594 		 */
595 	}
596 
597 	bus_softc = softc->parent_softc;
598 	mtx_lock(&bus_softc->lun_softc_mtx);
599 	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
600 	mtx_unlock(&bus_softc->lun_softc_mtx);
601 }
602 
603 static void
604 ctlfecleanup(struct cam_periph *periph)
605 {
606 	struct ctlfe_lun_softc *softc;
607 
608 	softc = (struct ctlfe_lun_softc *)periph->softc;
609 
610 	KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0",
611 	    __func__, softc->ctios_sent));
612 	KASSERT(softc->refcount == 0, ("%s: refcount %d != 0",
613 	    __func__, softc->refcount));
614 	KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0",
615 	    __func__, softc->atios_alloced));
616 	KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0",
617 	    __func__, softc->inots_alloced));
618 
619 	free(softc, M_CTLFE);
620 }
621 
622 static void
623 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
624     ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len,
625     u_int16_t *sglist_cnt)
626 {
627 	struct ctlfe_softc *bus_softc;
628 	struct ctlfe_cmd_info *cmd_info;
629 	struct ctl_sg_entry *ctl_sglist;
630 	bus_dma_segment_t *cam_sglist;
631 	size_t off;
632 	int i, idx;
633 
634 	cmd_info = PRIV_INFO(io);
635 	bus_softc = softc->parent_softc;
636 
637 	/*
638 	 * Set the direction, relative to the initiator.
639 	 */
640 	*flags &= ~CAM_DIR_MASK;
641 	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
642 		*flags |= CAM_DIR_IN;
643 	else
644 		*flags |= CAM_DIR_OUT;
645 
646 	*flags &= ~CAM_DATA_MASK;
647 	idx = cmd_info->cur_transfer_index;
648 	off = cmd_info->cur_transfer_off;
649 	cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
650 	if (io->scsiio.kern_sg_entries == 0) {	/* No S/G list. */
651 
652 		/* One time shift for SRR offset. */
653 		off += io->scsiio.ext_data_filled;
654 		io->scsiio.ext_data_filled = 0;
655 
656 		*data_ptr = io->scsiio.kern_data_ptr + off;
657 		if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
658 			*dxfer_len = io->scsiio.kern_data_len - off;
659 		} else {
660 			*dxfer_len = bus_softc->maxio;
661 			cmd_info->cur_transfer_off += bus_softc->maxio;
662 			cmd_info->flags |= CTLFE_CMD_PIECEWISE;
663 		}
664 		*sglist_cnt = 0;
665 
666 		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
667 			*flags |= CAM_DATA_PADDR;
668 		else
669 			*flags |= CAM_DATA_VADDR;
670 	} else {	/* S/G list with physical or virtual pointers. */
671 		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
672 
673 		/* One time shift for SRR offset. */
674 		while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
675 			io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
676 			idx++;
677 			off = 0;
678 		}
679 		off += io->scsiio.ext_data_filled;
680 		io->scsiio.ext_data_filled = 0;
681 
682 		cam_sglist = cmd_info->cam_sglist;
683 		*dxfer_len = 0;
684 		for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
685 			cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off;
686 			if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) {
687 				cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off;
688 				*dxfer_len += cam_sglist[i].ds_len;
689 			} else {
690 				cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len;
691 				cmd_info->cur_transfer_index = idx + i;
692 				cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off;
693 				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
694 				*dxfer_len += cam_sglist[i].ds_len;
695 				if (ctl_sglist[i].len != 0)
696 					i++;
697 				break;
698 			}
699 			if (i == (CTLFE_MAX_SEGS - 1) &&
700 			    idx + i < (io->scsiio.kern_sg_entries - 1)) {
701 				cmd_info->cur_transfer_index = idx + i + 1;
702 				cmd_info->cur_transfer_off = 0;
703 				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
704 				i++;
705 				break;
706 			}
707 			off = 0;
708 		}
709 		*sglist_cnt = i;
710 		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
711 			*flags |= CAM_DATA_SG_PADDR;
712 		else
713 			*flags |= CAM_DATA_SG;
714 		*data_ptr = (uint8_t *)cam_sglist;
715 	}
716 }
717 
718 static void
719 ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
720 {
721 	struct ctlfe_lun_softc *softc;
722 	struct ctlfe_cmd_info *cmd_info;
723 	struct ccb_hdr *ccb_h;
724 	struct ccb_accept_tio *atio;
725 	struct ccb_scsiio *csio;
726 	uint8_t *data_ptr;
727 	uint32_t dxfer_len;
728 	ccb_flags flags;
729 	union ctl_io *io;
730 	uint8_t scsi_status;
731 
732 	softc = (struct ctlfe_lun_softc *)periph->softc;
733 
734 next:
735 	ccb_h = TAILQ_FIRST(&softc->work_queue);
736 	if (ccb_h == NULL) {
737 		xpt_release_ccb(start_ccb);
738 		return;
739 	}
740 
741 	/* Take the ATIO off the work queue */
742 	TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe);
743 	atio = (struct ccb_accept_tio *)ccb_h;
744 	io = (union ctl_io *)ccb_h->io_ptr;
745 	csio = &start_ccb->csio;
746 
747 	flags = atio->ccb_h.flags &
748 		(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
749 	cmd_info = PRIV_INFO(io);
750 	cmd_info->cur_transfer_index = 0;
751 	cmd_info->cur_transfer_off = 0;
752 	cmd_info->flags = 0;
753 
754 	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
755 		/*
756 		 * Datamove call, we need to setup the S/G list.
757 		 */
758 		ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
759 		    &csio->sglist_cnt);
760 	} else {
761 		/*
762 		 * We're done, send status back.
763 		 */
764 		if ((io->io_hdr.flags & CTL_FLAG_ABORT) &&
765 		    (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) {
766 			io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
767 
768 			/* Tell the SIM that we've aborted this ATIO */
769 #ifdef CTLFEDEBUG
770 			printf("%s: tag %04x abort\n", __func__, atio->tag_id);
771 #endif
772 			KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO,
773 			    ("func_code %#x is not ATIO", atio->ccb_h.func_code));
774 			start_ccb->ccb_h.func_code = XPT_ABORT;
775 			start_ccb->cab.abort_ccb = (union ccb *)atio;
776 			xpt_action(start_ccb);
777 
778 			ctlfe_requeue_ccb(periph, (union ccb *)atio,
779 			    /* unlock */0);
780 
781 			/* XPT_ABORT is not queued, so we can take next I/O. */
782 			goto next;
783 		}
784 		data_ptr = NULL;
785 		dxfer_len = 0;
786 		csio->sglist_cnt = 0;
787 	}
788 	scsi_status = 0;
789 	if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
790 	    (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
791 	    ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
792 	     io->io_hdr.status == CTL_SUCCESS)) {
793 		flags |= CAM_SEND_STATUS;
794 		scsi_status = io->scsiio.scsi_status;
795 		csio->sense_len = io->scsiio.sense_len;
796 #ifdef CTLFEDEBUG
797 		printf("%s: tag %04x status %x\n", __func__,
798 		       atio->tag_id, io->io_hdr.status);
799 #endif
800 		if (csio->sense_len != 0) {
801 			csio->sense_data = io->scsiio.sense_data;
802 			flags |= CAM_SEND_SENSE;
803 		}
804 	}
805 
806 #ifdef CTLFEDEBUG
807 	printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
808 	       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
809 	       atio->tag_id, flags, data_ptr, dxfer_len);
810 #endif
811 
812 	/*
813 	 * Valid combinations:
814 	 *  - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0,
815 	 *    sglist_cnt = 0
816 	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0,
817 	 *    sglist_cnt = 0
818 	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0,
819 	 *    sglist_cnt != 0
820 	 */
821 #ifdef CTLFEDEBUG
822 	if (((flags & CAM_SEND_STATUS)
823 	  && (((flags & CAM_DATA_SG) != 0)
824 	   || (dxfer_len != 0)
825 	   || (csio->sglist_cnt != 0)))
826 	 || (((flags & CAM_SEND_STATUS) == 0)
827 	  && (dxfer_len == 0))
828 	 || ((flags & CAM_DATA_SG)
829 	  && (csio->sglist_cnt == 0))
830 	 || (((flags & CAM_DATA_SG) == 0)
831 	  && (csio->sglist_cnt != 0))) {
832 		printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
833 		       "%d sg %u\n", __func__, atio->tag_id,
834 		       atio_cdb_ptr(atio)[0], flags, dxfer_len,
835 		       csio->sglist_cnt);
836 		printf("%s: tag %04x io status %#x\n", __func__,
837 		       atio->tag_id, io->io_hdr.status);
838 	}
839 #endif
840 	cam_fill_ctio(csio,
841 		      /*retries*/ 2,
842 		      ctlfedone,
843 		      flags,
844 		      (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0,
845 		      atio->tag_id,
846 		      atio->init_id,
847 		      scsi_status,
848 		      /*data_ptr*/ data_ptr,
849 		      /*dxfer_len*/ dxfer_len,
850 		      /*timeout*/ CTLFE_TIMEOUT * 1000);
851 	start_ccb->ccb_h.flags |= CAM_UNLOCKED;
852 	start_ccb->ccb_h.ccb_atio = atio;
853 	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
854 		io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
855 	io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED);
856 
857 	softc->ctios_sent++;
858 	softc->refcount++;
859 	cam_periph_unlock(periph);
860 	xpt_action(start_ccb);
861 	cam_periph_lock(periph);
862 	softc->refcount--;
863 
864 	/*
865 	 * If we still have work to do, ask for another CCB.
866 	 */
867 	if (!TAILQ_EMPTY(&softc->work_queue))
868 		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
869 }
870 
871 static void
872 ctlfe_drain(void *context, int pending)
873 {
874 	struct cam_periph *periph = context;
875 	struct ctlfe_lun_softc *softc = periph->softc;
876 
877 	cam_periph_lock(periph);
878 	while (softc->refcount != 0) {
879 		cam_periph_sleep(periph, &softc->refcount, PRIBIO,
880 		    "ctlfe_drain", 1);
881 	}
882 	cam_periph_unlock(periph);
883 	cam_periph_release(periph);
884 }
885 
886 static void
887 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
888 {
889 	struct ctlfe_lun_softc *softc;
890 	union ctl_io *io;
891 	struct ctlfe_cmd_info *cmd_info;
892 
893 	softc = (struct ctlfe_lun_softc *)periph->softc;
894 	io = ccb->ccb_h.io_ptr;
895 
896 	switch (ccb->ccb_h.func_code) {
897 	case XPT_ACCEPT_TARGET_IO:
898 		softc->atios_alloced--;
899 		cmd_info = PRIV_INFO(io);
900 		free(cmd_info, M_CTLFE);
901 		break;
902 	case XPT_IMMEDIATE_NOTIFY:
903 	case XPT_NOTIFY_ACKNOWLEDGE:
904 		softc->inots_alloced--;
905 		break;
906 	default:
907 		break;
908 	}
909 
910 	ctl_free_io(io);
911 	free(ccb, M_CTLFE);
912 
913 	KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0",
914 	    __func__, softc->atios_alloced));
915 	KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0",
916 	    __func__, softc->inots_alloced));
917 
918 	/*
919 	 * If we have received all of our CCBs, we can release our
920 	 * reference on the peripheral driver.  It will probably go away
921 	 * now.
922 	 */
923 	if (softc->atios_alloced == 0 && softc->inots_alloced == 0) {
924 		if (softc->refcount == 0) {
925 			cam_periph_release_locked(periph);
926 		} else {
927 			TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph);
928 			taskqueue_enqueue(taskqueue_thread,
929 			    &softc->refdrain_task);
930 		}
931 	}
932 }
933 
934 /*
935  * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated.
936  */
937 static void
938 ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock)
939 {
940 	struct ctlfe_lun_softc *softc;
941 	struct mtx *mtx;
942 
943 	if (periph->flags & CAM_PERIPH_INVALID) {
944 		mtx = cam_periph_mtx(periph);
945 		ctlfe_free_ccb(periph, ccb);
946 		if (unlock)
947 			mtx_unlock(mtx);
948 		return;
949 	}
950 	if (unlock)
951 		cam_periph_unlock(periph);
952 
953 	/*
954 	 * For a wildcard attachment, commands can come in with a specific
955 	 * target/lun.  Reset the target and LUN fields back to the wildcard
956 	 * values before we send them back down to the SIM.
957 	 */
958 	softc = (struct ctlfe_lun_softc *)periph->softc;
959 	if (softc->flags & CTLFE_LUN_WILDCARD) {
960 		ccb->ccb_h.target_id = CAM_TARGET_WILDCARD;
961 		ccb->ccb_h.target_lun = CAM_LUN_WILDCARD;
962 	}
963 
964 	xpt_action(ccb);
965 }
966 
967 static int
968 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
969 {
970 	uint64_t lba;
971 	uint32_t num_blocks, nbc;
972 	uint8_t *cmdbyt = atio_cdb_ptr(atio);
973 
974 	nbc = offset >> 9;	/* ASSUMING 512 BYTE BLOCKS */
975 
976 	switch (cmdbyt[0]) {
977 	case READ_6:
978 	case WRITE_6:
979 	{
980 		struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt;
981 		lba = scsi_3btoul(cdb->addr);
982 		lba &= 0x1fffff;
983 		num_blocks = cdb->length;
984 		if (num_blocks == 0)
985 			num_blocks = 256;
986 		lba += nbc;
987 		num_blocks -= nbc;
988 		scsi_ulto3b(lba, cdb->addr);
989 		cdb->length = num_blocks;
990 		break;
991 	}
992 	case READ_10:
993 	case WRITE_10:
994 	{
995 		struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt;
996 		lba = scsi_4btoul(cdb->addr);
997 		num_blocks = scsi_2btoul(cdb->length);
998 		lba += nbc;
999 		num_blocks -= nbc;
1000 		scsi_ulto4b(lba, cdb->addr);
1001 		scsi_ulto2b(num_blocks, cdb->length);
1002 		break;
1003 	}
1004 	case READ_12:
1005 	case WRITE_12:
1006 	{
1007 		struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt;
1008 		lba = scsi_4btoul(cdb->addr);
1009 		num_blocks = scsi_4btoul(cdb->length);
1010 		lba += nbc;
1011 		num_blocks -= nbc;
1012 		scsi_ulto4b(lba, cdb->addr);
1013 		scsi_ulto4b(num_blocks, cdb->length);
1014 		break;
1015 	}
1016 	case READ_16:
1017 	case WRITE_16:
1018 	{
1019 		struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt;
1020 		lba = scsi_8btou64(cdb->addr);
1021 		num_blocks = scsi_4btoul(cdb->length);
1022 		lba += nbc;
1023 		num_blocks -= nbc;
1024 		scsi_u64to8b(lba, cdb->addr);
1025 		scsi_ulto4b(num_blocks, cdb->length);
1026 		break;
1027 	}
1028 	default:
1029 		return -1;
1030 	}
1031 	return (0);
1032 }
1033 
1034 static void
1035 ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
1036 {
1037 	struct ctlfe_lun_softc *softc;
1038 	struct ctlfe_softc *bus_softc;
1039 	struct ctlfe_cmd_info *cmd_info;
1040 	struct ccb_accept_tio *atio = NULL;
1041 	union ctl_io *io = NULL;
1042 	struct mtx *mtx;
1043 	cam_status status;
1044 
1045 	KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
1046 	    ("CCB in ctlfedone() without CAM_UNLOCKED flag"));
1047 #ifdef CTLFE_DEBUG
1048 	printf("%s: entered, func_code = %#x\n", __func__,
1049 	       done_ccb->ccb_h.func_code);
1050 #endif
1051 
1052 	/*
1053 	 * At this point CTL has no known use case for device queue freezes.
1054 	 * In case some SIM think different -- drop its freeze right here.
1055 	 */
1056 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1057 		cam_release_devq(periph->path,
1058 				 /*relsim_flags*/0,
1059 				 /*reduction*/0,
1060 				 /*timeout*/0,
1061 				 /*getcount_only*/0);
1062 		done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1063 	}
1064 
1065 	softc = (struct ctlfe_lun_softc *)periph->softc;
1066 	bus_softc = softc->parent_softc;
1067 	mtx = cam_periph_mtx(periph);
1068 	mtx_lock(mtx);
1069 
1070 	switch (done_ccb->ccb_h.func_code) {
1071 	case XPT_ACCEPT_TARGET_IO: {
1072 
1073 		atio = &done_ccb->atio;
1074 		status = atio->ccb_h.status & CAM_STATUS_MASK;
1075 		if (status != CAM_CDB_RECVD) {
1076 			ctlfe_free_ccb(periph, done_ccb);
1077 			goto out;
1078 		}
1079 
1080  resubmit:
1081 		/*
1082 		 * Allocate a ctl_io, pass it to CTL, and wait for the
1083 		 * datamove or done.
1084 		 */
1085 		mtx_unlock(mtx);
1086 		io = done_ccb->ccb_h.io_ptr;
1087 		cmd_info = PRIV_INFO(io);
1088 		ctl_zero_io(io);
1089 
1090 		/* Save pointers on both sides */
1091 		PRIV_CCB(io) = done_ccb;
1092 		PRIV_INFO(io) = cmd_info;
1093 		done_ccb->ccb_h.io_ptr = io;
1094 
1095 		/*
1096 		 * Only SCSI I/O comes down this path, resets, etc. come
1097 		 * down the immediate notify path below.
1098 		 */
1099 		io->io_hdr.io_type = CTL_IO_SCSI;
1100 		io->io_hdr.nexus.initid = atio->init_id;
1101 		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1102 		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1103 			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1104 			    CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun));
1105 		} else {
1106 			io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
1107 		}
1108 		io->scsiio.tag_num = atio->tag_id;
1109 		switch (atio->tag_action) {
1110 		case CAM_TAG_ACTION_NONE:
1111 			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1112 			break;
1113 		case MSG_SIMPLE_TASK:
1114 			io->scsiio.tag_type = CTL_TAG_SIMPLE;
1115 			break;
1116 		case MSG_HEAD_OF_QUEUE_TASK:
1117         		io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
1118 			break;
1119 		case MSG_ORDERED_TASK:
1120         		io->scsiio.tag_type = CTL_TAG_ORDERED;
1121 			break;
1122 		case MSG_ACA_TASK:
1123 			io->scsiio.tag_type = CTL_TAG_ACA;
1124 			break;
1125 		default:
1126 			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1127 			printf("%s: unhandled tag type %#x!!\n", __func__,
1128 			       atio->tag_action);
1129 			break;
1130 		}
1131 		if (atio->cdb_len > sizeof(io->scsiio.cdb)) {
1132 			printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
1133 			       __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
1134 		}
1135 		io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
1136 		bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len);
1137 
1138 #ifdef CTLFEDEBUG
1139 		printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
1140 		        io->io_hdr.nexus.initid,
1141 		        io->io_hdr.nexus.targ_port,
1142 		        io->io_hdr.nexus.targ_lun,
1143 			io->scsiio.tag_num, io->scsiio.cdb[0]);
1144 #endif
1145 
1146 		ctl_queue(io);
1147 		return;
1148 	}
1149 	case XPT_CONT_TARGET_IO: {
1150 		int srr = 0;
1151 		uint32_t srr_off = 0;
1152 
1153 		atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
1154 		io = (union ctl_io *)atio->ccb_h.io_ptr;
1155 
1156 		softc->ctios_sent--;
1157 #ifdef CTLFEDEBUG
1158 		printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
1159 		       __func__, atio->tag_id, done_ccb->ccb_h.flags);
1160 #endif
1161 		/*
1162 		 * Handle SRR case were the data pointer is pushed back hack
1163 		 */
1164 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV
1165 		    && done_ccb->csio.msg_ptr != NULL
1166 		    && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED
1167 		    && done_ccb->csio.msg_ptr[1] == 5
1168        		    && done_ccb->csio.msg_ptr[2] == 0) {
1169 			srr = 1;
1170 			srr_off =
1171 			    (done_ccb->csio.msg_ptr[3] << 24)
1172 			    | (done_ccb->csio.msg_ptr[4] << 16)
1173 			    | (done_ccb->csio.msg_ptr[5] << 8)
1174 			    | (done_ccb->csio.msg_ptr[6]);
1175 		}
1176 
1177 		/*
1178 		 * If we have an SRR and we're still sending data, we
1179 		 * should be able to adjust offsets and cycle again.
1180 		 * It is possible only if offset is from this datamove.
1181 		 */
1182 		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
1183 		    srr_off >= io->scsiio.kern_rel_offset &&
1184 		    srr_off < io->scsiio.kern_rel_offset +
1185 		     io->scsiio.kern_data_len) {
1186 			io->scsiio.kern_data_resid =
1187 			    io->scsiio.kern_rel_offset +
1188 			    io->scsiio.kern_data_len - srr_off;
1189 			io->scsiio.ext_data_filled = srr_off;
1190 			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
1191 			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
1192 			xpt_release_ccb(done_ccb);
1193 			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1194 					  periph_links.tqe);
1195 			xpt_schedule(periph, CAM_PRIORITY_NORMAL);
1196 			break;
1197 		}
1198 
1199 		/*
1200 		 * If status was being sent, the back end data is now history.
1201 		 * Hack it up and resubmit a new command with the CDB adjusted.
1202 		 * If the SIM does the right thing, all of the resid math
1203 		 * should work.
1204 		 */
1205 		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1206 			xpt_release_ccb(done_ccb);
1207 			if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
1208 				done_ccb = (union ccb *)atio;
1209 				goto resubmit;
1210 			}
1211 			/*
1212 			 * Fall through to doom....
1213 			 */
1214 		}
1215 
1216 		if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1217 		    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1218 			io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
1219 
1220 		/*
1221 		 * If we were sending status back to the initiator, free up
1222 		 * resources.  If we were doing a datamove, call the
1223 		 * datamove done routine.
1224 		 */
1225 		if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1226 			/*
1227 			 * If we asked to send sense data but it wasn't sent,
1228 			 * queue the I/O back to CTL for later REQUEST SENSE.
1229 			 */
1230 			if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 &&
1231 			    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
1232 			    (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 &&
1233 			    (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) {
1234 				PRIV_INFO(io) = PRIV_INFO(
1235 				    (union ctl_io *)atio->ccb_h.io_ptr);
1236 				ctl_queue_sense(atio->ccb_h.io_ptr);
1237 				atio->ccb_h.io_ptr = io;
1238 			}
1239 
1240 			/* Abort ATIO if CTIO sending status has failed. */
1241 			if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) !=
1242 			    CAM_REQ_CMP) {
1243 				done_ccb->ccb_h.func_code = XPT_ABORT;
1244 				done_ccb->cab.abort_ccb = (union ccb *)atio;
1245 				xpt_action(done_ccb);
1246 			}
1247 
1248 			xpt_release_ccb(done_ccb);
1249 			ctlfe_requeue_ccb(periph, (union ccb *)atio,
1250 			    /* unlock */1);
1251 			return;
1252 		} else {
1253 			struct ctlfe_cmd_info *cmd_info;
1254 			struct ccb_scsiio *csio;
1255 
1256 			csio = &done_ccb->csio;
1257 			cmd_info = PRIV_INFO(io);
1258 
1259 			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1260 
1261 			/*
1262 			 * Translate CAM status to CTL status.  Success
1263 			 * does not change the overall, ctl_io status.  In
1264 			 * that case we just set port_status to 0.  If we
1265 			 * have a failure, though, set a data phase error
1266 			 * for the overall ctl_io.
1267 			 */
1268 			switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
1269 			case CAM_REQ_CMP:
1270 				io->scsiio.kern_data_resid -=
1271 				    csio->dxfer_len - csio->resid;
1272 				io->io_hdr.port_status = 0;
1273 				break;
1274 			default:
1275 				/*
1276 				 * XXX KDM we probably need to figure out a
1277 				 * standard set of errors that the SIM
1278 				 * drivers should return in the event of a
1279 				 * data transfer failure.  A data phase
1280 				 * error will at least point the user to a
1281 				 * data transfer error of some sort.
1282 				 * Hopefully the SIM printed out some
1283 				 * additional information to give the user
1284 				 * a clue what happened.
1285 				 */
1286 				io->io_hdr.port_status = 0xbad1;
1287 				ctl_set_data_phase_error(&io->scsiio);
1288 				/*
1289 				 * XXX KDM figure out residual.
1290 				 */
1291 				break;
1292 			}
1293 			/*
1294 			 * If we had to break this S/G list into multiple
1295 			 * pieces, figure out where we are in the list, and
1296 			 * continue sending pieces if necessary.
1297 			 */
1298 			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) &&
1299 			    io->io_hdr.port_status == 0 && csio->resid == 0) {
1300 				ccb_flags flags;
1301 				uint8_t *data_ptr;
1302 				uint32_t dxfer_len;
1303 
1304 				flags = atio->ccb_h.flags &
1305 					(CAM_DIS_DISCONNECT|
1306 					 CAM_TAG_ACTION_VALID);
1307 
1308 				ctlfedata(softc, io, &flags, &data_ptr,
1309 				    &dxfer_len, &csio->sglist_cnt);
1310 
1311 				if (((flags & CAM_SEND_STATUS) == 0)
1312 				 && (dxfer_len == 0)) {
1313 					printf("%s: tag %04x no status or "
1314 					       "len cdb = %02x\n", __func__,
1315 					       atio->tag_id,
1316 					       atio_cdb_ptr(atio)[0]);
1317 					printf("%s: tag %04x io status %#x\n",
1318 					       __func__, atio->tag_id,
1319 					       io->io_hdr.status);
1320 				}
1321 
1322 				cam_fill_ctio(csio,
1323 					      /*retries*/ 2,
1324 					      ctlfedone,
1325 					      flags,
1326 					      (flags & CAM_TAG_ACTION_VALID) ?
1327 					       MSG_SIMPLE_Q_TAG : 0,
1328 					      atio->tag_id,
1329 					      atio->init_id,
1330 					      0,
1331 					      /*data_ptr*/ data_ptr,
1332 					      /*dxfer_len*/ dxfer_len,
1333 					      CTLFE_TIMEOUT * 1000);
1334 
1335 				csio->ccb_h.flags |= CAM_UNLOCKED;
1336 				csio->resid = 0;
1337 				csio->ccb_h.ccb_atio = atio;
1338 				io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1339 				softc->ctios_sent++;
1340 				mtx_unlock(mtx);
1341 				xpt_action((union ccb *)csio);
1342 			} else {
1343 				/*
1344 				 * Release the CTIO.  The ATIO will be sent back
1345 				 * down to the SIM once we send status.
1346 				 */
1347 				xpt_release_ccb(done_ccb);
1348 				mtx_unlock(mtx);
1349 
1350 				/* Call the backend move done callback */
1351 				io->scsiio.be_move_done(io);
1352 			}
1353 			return;
1354 		}
1355 		break;
1356 	}
1357 	case XPT_IMMEDIATE_NOTIFY: {
1358 		union ctl_io *io;
1359 		struct ccb_immediate_notify *inot;
1360 		int send_ctl_io;
1361 
1362 		inot = &done_ccb->cin1;
1363 		io = done_ccb->ccb_h.io_ptr;
1364 		ctl_zero_io(io);
1365 
1366 		send_ctl_io = 1;
1367 
1368 		io->io_hdr.io_type = CTL_IO_TASK;
1369 		PRIV_CCB(io) = done_ccb;
1370 		inot->ccb_h.io_ptr = io;
1371 		io->io_hdr.nexus.initid = inot->initiator_id;
1372 		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1373 		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1374 			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1375 			    CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun));
1376 		} else {
1377 			io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
1378 		}
1379 		/* XXX KDM should this be the tag_id? */
1380 		io->taskio.tag_num = inot->seq_id;
1381 
1382 		status = inot->ccb_h.status & CAM_STATUS_MASK;
1383 		switch (status) {
1384 		case CAM_SCSI_BUS_RESET:
1385 			io->taskio.task_action = CTL_TASK_BUS_RESET;
1386 			break;
1387 		case CAM_BDR_SENT:
1388 			io->taskio.task_action = CTL_TASK_TARGET_RESET;
1389 			break;
1390 		case CAM_MESSAGE_RECV:
1391 			switch (inot->arg) {
1392 			case MSG_ABORT_TASK_SET:
1393 				io->taskio.task_action =
1394 				    CTL_TASK_ABORT_TASK_SET;
1395 				break;
1396 			case MSG_TARGET_RESET:
1397 				io->taskio.task_action = CTL_TASK_TARGET_RESET;
1398 				break;
1399 			case MSG_ABORT_TASK:
1400 				io->taskio.task_action = CTL_TASK_ABORT_TASK;
1401 				break;
1402 			case MSG_LOGICAL_UNIT_RESET:
1403 				io->taskio.task_action = CTL_TASK_LUN_RESET;
1404 				break;
1405 			case MSG_CLEAR_TASK_SET:
1406 				io->taskio.task_action =
1407 				    CTL_TASK_CLEAR_TASK_SET;
1408 				break;
1409 			case MSG_CLEAR_ACA:
1410 				io->taskio.task_action = CTL_TASK_CLEAR_ACA;
1411 				break;
1412 			case MSG_QUERY_TASK:
1413 				io->taskio.task_action = CTL_TASK_QUERY_TASK;
1414 				break;
1415 			case MSG_QUERY_TASK_SET:
1416 				io->taskio.task_action =
1417 				    CTL_TASK_QUERY_TASK_SET;
1418 				break;
1419 			case MSG_QUERY_ASYNC_EVENT:
1420 				io->taskio.task_action =
1421 				    CTL_TASK_QUERY_ASYNC_EVENT;
1422 				break;
1423 			case MSG_NOOP:
1424 				send_ctl_io = 0;
1425 				break;
1426 			default:
1427 				xpt_print(periph->path,
1428 				    "%s: unsupported INOT message 0x%x\n",
1429 				    __func__, inot->arg);
1430 				send_ctl_io = 0;
1431 				break;
1432 			}
1433 			break;
1434 		default:
1435 			xpt_print(periph->path,
1436 			    "%s: unsupported INOT status 0x%x\n",
1437 			    __func__, status);
1438 			/* FALLTHROUGH */
1439 		case CAM_REQ_ABORTED:
1440 		case CAM_REQ_INVALID:
1441 		case CAM_DEV_NOT_THERE:
1442 		case CAM_PROVIDE_FAIL:
1443 			ctlfe_free_ccb(periph, done_ccb);
1444 			goto out;
1445 		}
1446 		if (send_ctl_io != 0) {
1447 			ctl_queue(io);
1448 		} else {
1449 			done_ccb->ccb_h.status = CAM_REQ_INPROG;
1450 			done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1451 			xpt_action(done_ccb);
1452 		}
1453 		break;
1454 	}
1455 	case XPT_NOTIFY_ACKNOWLEDGE:
1456 		/* Queue this back down to the SIM as an immediate notify. */
1457 		done_ccb->ccb_h.status = CAM_REQ_INPROG;
1458 		done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
1459 		ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1);
1460 		return;
1461 	case XPT_SET_SIM_KNOB:
1462 	case XPT_GET_SIM_KNOB:
1463 	case XPT_GET_SIM_KNOB_OLD:
1464 		break;
1465 	default:
1466 		panic("%s: unexpected CCB type %#x", __func__,
1467 		      done_ccb->ccb_h.func_code);
1468 		break;
1469 	}
1470 
1471 out:
1472 	mtx_unlock(mtx);
1473 }
1474 
1475 static void
1476 ctlfe_onoffline(void *arg, int online)
1477 {
1478 	struct ctlfe_softc *bus_softc;
1479 	union ccb *ccb;
1480 	cam_status status;
1481 	struct cam_path *path;
1482 	int set_wwnn;
1483 
1484 	bus_softc = (struct ctlfe_softc *)arg;
1485 
1486 	set_wwnn = 0;
1487 
1488 	status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
1489 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1490 	if (status != CAM_REQ_CMP) {
1491 		printf("%s: unable to create path!\n", __func__);
1492 		return;
1493 	}
1494 	ccb = xpt_alloc_ccb();
1495 	xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
1496 	ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
1497 	xpt_action(ccb);
1498 
1499 	/*
1500 	 * Copan WWN format:
1501 	 *
1502 	 * Bits 63-60:	0x5		NAA, IEEE registered name
1503 	 * Bits 59-36:	0x000ED5	IEEE Company name assigned to Copan
1504 	 * Bits 35-12:			Copan SSN (Sequential Serial Number)
1505 	 * Bits 11-8:			Type of port:
1506 	 *					1 == N-Port
1507 	 *					2 == F-Port
1508 	 *					3 == NL-Port
1509 	 * Bits 7-0:			0 == Node Name, >0 == Port Number
1510 	 */
1511 	if (online != 0) {
1512 		if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
1513 
1514 			printf("%s: %s current WWNN %#jx\n", __func__,
1515 			       bus_softc->port_name,
1516 			       ccb->knob.xport_specific.fc.wwnn);
1517 			printf("%s: %s current WWPN %#jx\n", __func__,
1518 			       bus_softc->port_name,
1519 			       ccb->knob.xport_specific.fc.wwpn);
1520 
1521 			/*
1522 			 * If the user has specified a WWNN/WWPN, send them
1523 			 * down to the SIM.  Otherwise, record what the SIM
1524 			 * has reported.
1525 			 */
1526 			if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn
1527 			    != ccb->knob.xport_specific.fc.wwnn) {
1528 				ccb->knob.xport_specific.fc.wwnn =
1529 				    bus_softc->port.wwnn;
1530 				set_wwnn = 1;
1531 			} else {
1532 				ctl_port_set_wwns(&bus_softc->port,
1533 				    true, ccb->knob.xport_specific.fc.wwnn,
1534 				    false, 0);
1535 			}
1536 			if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn
1537 			     != ccb->knob.xport_specific.fc.wwpn) {
1538 				ccb->knob.xport_specific.fc.wwpn =
1539 				    bus_softc->port.wwpn;
1540 				set_wwnn = 1;
1541 			} else {
1542 				ctl_port_set_wwns(&bus_softc->port,
1543 				    false, 0,
1544 				    true, ccb->knob.xport_specific.fc.wwpn);
1545 			}
1546 
1547 
1548 			if (set_wwnn != 0) {
1549 				printf("%s: %s new WWNN %#jx\n", __func__,
1550 				       bus_softc->port_name,
1551 				ccb->knob.xport_specific.fc.wwnn);
1552 				printf("%s: %s new WWPN %#jx\n", __func__,
1553 				       bus_softc->port_name,
1554 				       ccb->knob.xport_specific.fc.wwpn);
1555 			}
1556 		} else {
1557 			printf("%s: %s has no valid WWNN/WWPN\n", __func__,
1558 			       bus_softc->port_name);
1559 		}
1560 	}
1561 	ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
1562 	ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
1563 	if (set_wwnn != 0)
1564 		ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
1565 
1566 	if (online != 0)
1567 		ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET;
1568 	else
1569 		ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET;
1570 
1571 	xpt_action(ccb);
1572 
1573 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1574 		printf("%s: SIM %s (path id %d) target %s failed with "
1575 		       "status %#x\n",
1576 		       __func__, bus_softc->port_name, bus_softc->path_id,
1577 		       (online != 0) ? "enable" : "disable",
1578 		       ccb->ccb_h.status);
1579 	} else {
1580 		printf("%s: SIM %s (path id %d) target %s succeeded\n",
1581 		       __func__, bus_softc->port_name, bus_softc->path_id,
1582 		       (online != 0) ? "enable" : "disable");
1583 	}
1584 
1585 	xpt_free_path(path);
1586 	xpt_free_ccb(ccb);
1587 }
1588 
1589 static void
1590 ctlfe_online(void *arg)
1591 {
1592 	struct ctlfe_softc *bus_softc;
1593 	struct cam_path *path;
1594 	cam_status status;
1595 	struct ctlfe_lun_softc *lun_softc;
1596 	struct cam_periph *periph;
1597 
1598 	bus_softc = (struct ctlfe_softc *)arg;
1599 
1600 	/*
1601 	 * Create the wildcard LUN before bringing the port online.
1602 	 */
1603 	status = xpt_create_path(&path, /*periph*/ NULL,
1604 				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1605 				 CAM_LUN_WILDCARD);
1606 	if (status != CAM_REQ_CMP) {
1607 		printf("%s: unable to create path for wildcard periph\n",
1608 				__func__);
1609 		return;
1610 	}
1611 
1612 	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO);
1613 
1614 	xpt_path_lock(path);
1615 	periph = cam_periph_find(path, "ctl");
1616 	if (periph != NULL) {
1617 		/* We've already got a periph, no need to alloc a new one. */
1618 		xpt_path_unlock(path);
1619 		xpt_free_path(path);
1620 		free(lun_softc, M_CTLFE);
1621 		return;
1622 	}
1623 	lun_softc->parent_softc = bus_softc;
1624 	lun_softc->flags |= CTLFE_LUN_WILDCARD;
1625 
1626 	status = cam_periph_alloc(ctlferegister,
1627 				  ctlfeoninvalidate,
1628 				  ctlfecleanup,
1629 				  ctlfestart,
1630 				  "ctl",
1631 				  CAM_PERIPH_BIO,
1632 				  path,
1633 				  ctlfeasync,
1634 				  0,
1635 				  lun_softc);
1636 
1637 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1638 		const struct cam_status_entry *entry;
1639 
1640 		entry = cam_fetch_status_entry(status);
1641 		printf("%s: CAM error %s (%#x) returned from "
1642 		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1643 		       entry->status_text : "Unknown", status);
1644 		free(lun_softc, M_CTLFE);
1645 	}
1646 
1647 	xpt_path_unlock(path);
1648 	ctlfe_onoffline(arg, /*online*/ 1);
1649 	xpt_free_path(path);
1650 }
1651 
1652 static void
1653 ctlfe_offline(void *arg)
1654 {
1655 	struct ctlfe_softc *bus_softc;
1656 	struct cam_path *path;
1657 	cam_status status;
1658 	struct cam_periph *periph;
1659 
1660 	bus_softc = (struct ctlfe_softc *)arg;
1661 
1662 	ctlfe_onoffline(arg, /*online*/ 0);
1663 
1664 	/*
1665 	 * Disable the wildcard LUN for this port now that we have taken
1666 	 * the port offline.
1667 	 */
1668 	status = xpt_create_path(&path, /*periph*/ NULL,
1669 				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1670 				 CAM_LUN_WILDCARD);
1671 	if (status != CAM_REQ_CMP) {
1672 		printf("%s: unable to create path for wildcard periph\n",
1673 		       __func__);
1674 		return;
1675 	}
1676 	xpt_path_lock(path);
1677 	if ((periph = cam_periph_find(path, "ctl")) != NULL)
1678 		cam_periph_invalidate(periph);
1679 	xpt_path_unlock(path);
1680 	xpt_free_path(path);
1681 }
1682 
1683 /*
1684  * This will get called to enable a LUN on every bus that is attached to
1685  * CTL.  So we only need to create a path/periph for this particular bus.
1686  */
1687 static int
1688 ctlfe_lun_enable(void *arg, int lun_id)
1689 {
1690 	struct ctlfe_softc *bus_softc;
1691 	struct ctlfe_lun_softc *softc;
1692 	struct cam_path *path;
1693 	struct cam_periph *periph;
1694 	cam_status status;
1695 
1696 	bus_softc = (struct ctlfe_softc *)arg;
1697 	if (bus_softc->hba_misc & PIM_EXTLUNS)
1698 		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1699 
1700 	status = xpt_create_path(&path, /*periph*/ NULL,
1701 	    bus_softc->path_id, bus_softc->target_id, lun_id);
1702 	/* XXX KDM need some way to return status to CTL here? */
1703 	if (status != CAM_REQ_CMP) {
1704 		printf("%s: could not create path, status %#x\n", __func__,
1705 		       status);
1706 		return (1);
1707 	}
1708 
1709 	softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
1710 	xpt_path_lock(path);
1711 	periph = cam_periph_find(path, "ctl");
1712 	if (periph != NULL) {
1713 		/* We've already got a periph, no need to alloc a new one. */
1714 		xpt_path_unlock(path);
1715 		xpt_free_path(path);
1716 		free(softc, M_CTLFE);
1717 		return (0);
1718 	}
1719 	softc->parent_softc = bus_softc;
1720 
1721 	status = cam_periph_alloc(ctlferegister,
1722 				  ctlfeoninvalidate,
1723 				  ctlfecleanup,
1724 				  ctlfestart,
1725 				  "ctl",
1726 				  CAM_PERIPH_BIO,
1727 				  path,
1728 				  ctlfeasync,
1729 				  0,
1730 				  softc);
1731 
1732 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1733 		const struct cam_status_entry *entry;
1734 
1735 		entry = cam_fetch_status_entry(status);
1736 		printf("%s: CAM error %s (%#x) returned from "
1737 		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1738 		       entry->status_text : "Unknown", status);
1739 		free(softc, M_CTLFE);
1740 	}
1741 
1742 	xpt_path_unlock(path);
1743 	xpt_free_path(path);
1744 	return (0);
1745 }
1746 
1747 /*
1748  * This will get called when the user removes a LUN to disable that LUN
1749  * on every bus that is attached to CTL.
1750  */
1751 static int
1752 ctlfe_lun_disable(void *arg, int lun_id)
1753 {
1754 	struct ctlfe_softc *softc;
1755 	struct ctlfe_lun_softc *lun_softc;
1756 
1757 	softc = (struct ctlfe_softc *)arg;
1758 	if (softc->hba_misc & PIM_EXTLUNS)
1759 		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1760 
1761 	mtx_lock(&softc->lun_softc_mtx);
1762 	STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
1763 		struct cam_path *path;
1764 
1765 		path = lun_softc->periph->path;
1766 
1767 		if ((xpt_path_target_id(path) == softc->target_id)
1768 		 && (xpt_path_lun_id(path) == lun_id)) {
1769 			break;
1770 		}
1771 	}
1772 	if (lun_softc == NULL) {
1773 		mtx_unlock(&softc->lun_softc_mtx);
1774 		printf("%s: can't find lun %d\n", __func__, lun_id);
1775 		return (1);
1776 	}
1777 	cam_periph_acquire(lun_softc->periph);
1778 	mtx_unlock(&softc->lun_softc_mtx);
1779 
1780 	cam_periph_lock(lun_softc->periph);
1781 	cam_periph_invalidate(lun_softc->periph);
1782 	cam_periph_unlock(lun_softc->periph);
1783 	cam_periph_release(lun_softc->periph);
1784 	return (0);
1785 }
1786 
1787 static void
1788 ctlfe_dump_sim(struct cam_sim *sim)
1789 {
1790 
1791 	printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
1792 	       sim->sim_name, sim->unit_number,
1793 	       sim->max_tagged_dev_openings, sim->max_dev_openings);
1794 }
1795 
1796 /*
1797  * Assumes that the SIM lock is held.
1798  */
1799 static void
1800 ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
1801 {
1802 	struct ccb_hdr *hdr;
1803 	struct cam_periph *periph;
1804 	int num_items;
1805 
1806 	periph = softc->periph;
1807 	num_items = 0;
1808 
1809 	TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) {
1810 		union ctl_io *io = hdr->io_ptr;
1811 
1812 		num_items++;
1813 
1814 		/*
1815 		 * Only regular SCSI I/O is put on the work
1816 		 * queue, so we can print sense here.  There may be no
1817 		 * sense if it's no the queue for a DMA, but this serves to
1818 		 * print out the CCB as well.
1819 		 *
1820 		 * XXX KDM switch this over to scsi_sense_print() when
1821 		 * CTL is merged in with CAM.
1822 		 */
1823 		ctl_io_error_print(io, NULL);
1824 
1825 		/*
1826 		 * Print DMA status if we are DMA_QUEUED.
1827 		 */
1828 		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
1829 			xpt_print(periph->path,
1830 			    "Total %u, Current %u, Resid %u\n",
1831 			    io->scsiio.kern_total_len,
1832 			    io->scsiio.kern_data_len,
1833 			    io->scsiio.kern_data_resid);
1834 		}
1835 	}
1836 
1837 	xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items);
1838 	xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent);
1839 }
1840 
1841 /*
1842  * Datamove/done routine called by CTL.  Put ourselves on the queue to
1843  * receive a CCB from CAM so we can queue the continue I/O request down
1844  * to the adapter.
1845  */
1846 static void
1847 ctlfe_datamove(union ctl_io *io)
1848 {
1849 	union ccb *ccb;
1850 	struct cam_periph *periph;
1851 	struct ctlfe_lun_softc *softc;
1852 
1853 	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
1854 	    ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
1855 
1856 	io->scsiio.ext_data_filled = 0;
1857 	ccb = PRIV_CCB(io);
1858 	periph = xpt_path_periph(ccb->ccb_h.path);
1859 	cam_periph_lock(periph);
1860 	softc = (struct ctlfe_lun_softc *)periph->softc;
1861 	io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
1862 	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
1863 		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
1864 	TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
1865 			  periph_links.tqe);
1866 	xpt_schedule(periph, CAM_PRIORITY_NORMAL);
1867 	cam_periph_unlock(periph);
1868 }
1869 
1870 static void
1871 ctlfe_done(union ctl_io *io)
1872 {
1873 	union ccb *ccb;
1874 	struct cam_periph *periph;
1875 	struct ctlfe_lun_softc *softc;
1876 
1877 	ccb = PRIV_CCB(io);
1878 	periph = xpt_path_periph(ccb->ccb_h.path);
1879 	cam_periph_lock(periph);
1880 	softc = (struct ctlfe_lun_softc *)periph->softc;
1881 
1882 	if (io->io_hdr.io_type == CTL_IO_TASK) {
1883 		/*
1884 		 * Send the notify acknowledge down to the SIM, to let it
1885 		 * know we processed the task management command.
1886 		 */
1887 		ccb->ccb_h.status = CAM_REQ_INPROG;
1888 		ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1889 		switch (io->taskio.task_status) {
1890 		case CTL_TASK_FUNCTION_COMPLETE:
1891 			ccb->cna2.arg = CAM_RSP_TMF_COMPLETE;
1892 			break;
1893 		case CTL_TASK_FUNCTION_SUCCEEDED:
1894 			ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED;
1895 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1896 			break;
1897 		case CTL_TASK_FUNCTION_REJECTED:
1898 			ccb->cna2.arg = CAM_RSP_TMF_REJECTED;
1899 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1900 			break;
1901 		case CTL_TASK_LUN_DOES_NOT_EXIST:
1902 			ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN;
1903 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1904 			break;
1905 		case CTL_TASK_FUNCTION_NOT_SUPPORTED:
1906 			ccb->cna2.arg = CAM_RSP_TMF_FAILED;
1907 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1908 			break;
1909 		}
1910 		ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8;
1911 		xpt_action(ccb);
1912 	} else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) {
1913 		ctlfe_requeue_ccb(periph, ccb, /* unlock */1);
1914 		return;
1915 	} else {
1916 		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
1917 		TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
1918 				  periph_links.tqe);
1919 		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
1920 	}
1921 
1922 	cam_periph_unlock(periph);
1923 }
1924 
1925 static void
1926 ctlfe_dump(void)
1927 {
1928 	struct ctlfe_softc *bus_softc;
1929 	struct ctlfe_lun_softc *lun_softc;
1930 
1931 	STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
1932 		ctlfe_dump_sim(bus_softc->sim);
1933 		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links)
1934 			ctlfe_dump_queue(lun_softc);
1935 	}
1936 }
1937