xref: /freebsd/sys/cam/ctl/scsi_ctl.c (revision 93a065e7496dfbfbd0a5b0208ef763f37ea975c7)
1 /*-
2  * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
3  * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    substantially similar to the "NO WARRANTY" disclaimer below
14  *    ("Disclaimer") and any redistribution must be conditioned upon
15  *    including a substantially similar Disclaimer requirement for further
16  *    binary redistribution.
17  *
18  * NO WARRANTY
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
28  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGES.
30  *
31  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
32  */
33 /*
34  * Peripheral driver interface between CAM and CTL (CAM Target Layer).
35  *
36  * Author: Ken Merry <ken@FreeBSD.org>
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/queue.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/condvar.h>
49 #include <sys/malloc.h>
50 #include <sys/bus.h>
51 #include <sys/endian.h>
52 #include <sys/sbuf.h>
53 #include <sys/sysctl.h>
54 #include <sys/types.h>
55 #include <sys/systm.h>
56 #include <machine/bus.h>
57 
58 #include <cam/cam.h>
59 #include <cam/cam_ccb.h>
60 #include <cam/cam_periph.h>
61 #include <cam/cam_queue.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_debug.h>
64 #include <cam/cam_sim.h>
65 #include <cam/cam_xpt.h>
66 
67 #include <cam/scsi/scsi_all.h>
68 #include <cam/scsi/scsi_message.h>
69 
70 #include <cam/ctl/ctl_io.h>
71 #include <cam/ctl/ctl.h>
72 #include <cam/ctl/ctl_frontend.h>
73 #include <cam/ctl/ctl_util.h>
74 #include <cam/ctl/ctl_error.h>
75 
76 struct ctlfe_softc {
77 	struct ctl_port	port;
78 	path_id_t	path_id;
79 	target_id_t	target_id;
80 	uint32_t	hba_misc;
81 	u_int		maxio;
82 	struct cam_sim *sim;
83 	char		port_name[DEV_IDLEN];
84 	struct mtx	lun_softc_mtx;
85 	STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
86 	STAILQ_ENTRY(ctlfe_softc) links;
87 };
88 
89 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
90 struct mtx ctlfe_list_mtx;
91 static char ctlfe_mtx_desc[] = "ctlfelist";
92 #ifdef CTLFE_INIT_ENABLE
93 static int ctlfe_max_targets = 1;
94 static int ctlfe_num_targets = 0;
95 #endif
96 
97 typedef enum {
98 	CTLFE_LUN_NONE		= 0x00,
99 	CTLFE_LUN_WILDCARD	= 0x01
100 } ctlfe_lun_flags;
101 
102 struct ctlfe_lun_softc {
103 	struct ctlfe_softc *parent_softc;
104 	struct cam_periph *periph;
105 	ctlfe_lun_flags flags;
106 	uint64_t ccbs_alloced;
107 	uint64_t ccbs_freed;
108 	uint64_t ctios_sent;
109 	uint64_t ctios_returned;
110 	uint64_t atios_alloced;
111 	uint64_t atios_freed;
112 	uint64_t inots_alloced;
113 	uint64_t inots_freed;
114 	/* bus_dma_tag_t dma_tag; */
115 	TAILQ_HEAD(, ccb_hdr) work_queue;
116 	STAILQ_ENTRY(ctlfe_lun_softc) links;
117 };
118 
119 typedef enum {
120 	CTLFE_CMD_NONE		= 0x00,
121 	CTLFE_CMD_PIECEWISE	= 0x01
122 } ctlfe_cmd_flags;
123 
124 struct ctlfe_cmd_info {
125 	int cur_transfer_index;
126 	size_t cur_transfer_off;
127 	ctlfe_cmd_flags flags;
128 	/*
129 	 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
130 	 * bytes on amd64.  So with 32 elements, this is 256 bytes on
131 	 * i386 and 512 bytes on amd64.
132 	 */
133 #define CTLFE_MAX_SEGS	32
134 	bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS];
135 };
136 
137 /*
138  * When we register the adapter/bus, request that this many ctl_ios be
139  * allocated.  This should be the maximum supported by the adapter, but we
140  * currently don't have a way to get that back from the path inquiry.
141  * XXX KDM add that to the path inquiry.
142  */
143 #define	CTLFE_REQ_CTL_IO	4096
144 /*
145  * Number of Accept Target I/O CCBs to allocate and queue down to the
146  * adapter per LUN.
147  * XXX KDM should this be controlled by CTL?
148  */
149 #define	CTLFE_ATIO_PER_LUN	1024
150 /*
151  * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to
152  * allocate and queue down to the adapter per LUN.
153  * XXX KDM should this be controlled by CTL?
154  */
155 #define	CTLFE_IN_PER_LUN	1024
156 
157 /*
158  * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending
159  * status to the initiator.  The SIM is expected to have its own timeouts,
160  * so we're not putting this timeout around the CCB execution time.  The
161  * SIM should timeout and let us know if it has an issue.
162  */
163 #define	CTLFE_DMA_TIMEOUT	60
164 
165 /*
166  * Turn this on to enable extra debugging prints.
167  */
168 #if 0
169 #define	CTLFE_DEBUG
170 #endif
171 
172 /*
173  * Use randomly assigned WWNN/WWPN values.  This is to work around an issue
174  * in the FreeBSD initiator that makes it unable to rescan the target if
175  * the target gets rebooted and the WWNN/WWPN stay the same.
176  */
177 #if 0
178 #define	RANDOM_WWNN
179 #endif
180 
181 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
182 
183 #define	io_ptr		ppriv_ptr0
184 
185 /* This is only used in the CTIO */
186 #define	ccb_atio	ppriv_ptr1
187 
188 #define PRIV_CCB(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0])
189 #define PRIV_INFO(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1])
190 
191 static int		ctlfeinitialize(void);
192 static int		ctlfeshutdown(void);
193 static periph_init_t	ctlfeperiphinit;
194 static void		ctlfeasync(void *callback_arg, uint32_t code,
195 				   struct cam_path *path, void *arg);
196 static periph_ctor_t	ctlferegister;
197 static periph_oninv_t	ctlfeoninvalidate;
198 static periph_dtor_t	ctlfecleanup;
199 static periph_start_t	ctlfestart;
200 static void		ctlfedone(struct cam_periph *periph,
201 				  union ccb *done_ccb);
202 
203 static void 		ctlfe_onoffline(void *arg, int online);
204 static void 		ctlfe_online(void *arg);
205 static void 		ctlfe_offline(void *arg);
206 static int 		ctlfe_lun_enable(void *arg, int lun_id);
207 static int 		ctlfe_lun_disable(void *arg, int lun_id);
208 static void		ctlfe_dump_sim(struct cam_sim *sim);
209 static void		ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
210 static void 		ctlfe_datamove(union ctl_io *io);
211 static void 		ctlfe_done(union ctl_io *io);
212 static void 		ctlfe_dump(void);
213 
214 static struct periph_driver ctlfe_driver =
215 {
216 	ctlfeperiphinit, "ctl",
217 	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0,
218 	CAM_PERIPH_DRV_EARLY
219 };
220 
221 static struct ctl_frontend ctlfe_frontend =
222 {
223 	.name = "camtgt",
224 	.init = ctlfeinitialize,
225 	.fe_dump = ctlfe_dump,
226 	.shutdown = ctlfeshutdown,
227 };
228 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
229 
230 static int
231 ctlfeshutdown(void)
232 {
233 
234 	/* CAM does not support periph driver unregister now. */
235 	return (EBUSY);
236 }
237 
238 static int
239 ctlfeinitialize(void)
240 {
241 
242 	STAILQ_INIT(&ctlfe_softc_list);
243 	mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
244 	periphdriver_register(&ctlfe_driver);
245 	return (0);
246 }
247 
248 static void
249 ctlfeperiphinit(void)
250 {
251 	cam_status status;
252 
253 	status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
254 				    AC_CONTRACT, ctlfeasync, NULL, NULL);
255 	if (status != CAM_REQ_CMP) {
256 		printf("ctl: Failed to attach async callback due to CAM "
257 		       "status 0x%x!\n", status);
258 	}
259 }
260 
261 static void
262 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
263 {
264 	struct ctlfe_softc *softc;
265 
266 #ifdef CTLFEDEBUG
267 	printf("%s: entered\n", __func__);
268 #endif
269 
270 	mtx_lock(&ctlfe_list_mtx);
271 	STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
272 		if (softc->path_id == xpt_path_path_id(path))
273 			break;
274 	}
275 	mtx_unlock(&ctlfe_list_mtx);
276 
277 	/*
278 	 * When a new path gets registered, and it is capable of target
279 	 * mode, go ahead and attach.  Later on, we may need to be more
280 	 * selective, but for now this will be sufficient.
281  	 */
282 	switch (code) {
283 	case AC_PATH_REGISTERED: {
284 		struct ctl_port *port;
285 		struct ccb_pathinq *cpi;
286 		int retval;
287 
288 		cpi = (struct ccb_pathinq *)arg;
289 
290 		/* Don't attach if it doesn't support target mode */
291 		if ((cpi->target_sprt & PIT_PROCESSOR) == 0) {
292 #ifdef CTLFEDEBUG
293 			printf("%s: SIM %s%d doesn't support target mode\n",
294 			       __func__, cpi->dev_name, cpi->unit_number);
295 #endif
296 			break;
297 		}
298 
299 		if (softc != NULL) {
300 #ifdef CTLFEDEBUG
301 			printf("%s: CTL port for CAM path %u already exists\n",
302 			       __func__, xpt_path_path_id(path));
303 #endif
304 			break;
305 		}
306 
307 #ifdef CTLFE_INIT_ENABLE
308 		if (ctlfe_num_targets >= ctlfe_max_targets) {
309 			union ccb *ccb;
310 
311 			ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP,
312 						  M_NOWAIT | M_ZERO);
313 			if (ccb == NULL) {
314 				printf("%s: unable to malloc CCB!\n", __func__);
315 				return;
316 			}
317 			xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
318 
319 			ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
320 			ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
321 			ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR;
322 
323 			xpt_action(ccb);
324 
325 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
326 			     CAM_REQ_CMP) {
327 				printf("%s: SIM %s%d (path id %d) initiator "
328 				       "enable failed with status %#x\n",
329 				       __func__, cpi->dev_name,
330 				       cpi->unit_number, cpi->ccb_h.path_id,
331 				       ccb->ccb_h.status);
332 			} else {
333 				printf("%s: SIM %s%d (path id %d) initiator "
334 				       "enable succeeded\n",
335 				       __func__, cpi->dev_name,
336 				       cpi->unit_number, cpi->ccb_h.path_id);
337 			}
338 
339 			free(ccb, M_TEMP);
340 
341 			break;
342 		} else {
343 			ctlfe_num_targets++;
344 		}
345 
346 		printf("%s: ctlfe_num_targets = %d\n", __func__,
347 		       ctlfe_num_targets);
348 #endif /* CTLFE_INIT_ENABLE */
349 
350 		/*
351 		 * We're in an interrupt context here, so we have to
352 		 * use M_NOWAIT.  Of course this means trouble if we
353 		 * can't allocate memory.
354 		 */
355 		softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO);
356 		if (softc == NULL) {
357 			printf("%s: unable to malloc %zd bytes for softc\n",
358 			       __func__, sizeof(*softc));
359 			return;
360 		}
361 
362 		softc->path_id = cpi->ccb_h.path_id;
363 		softc->target_id = cpi->initiator_id;
364 		softc->sim = xpt_path_sim(path);
365 		softc->hba_misc = cpi->hba_misc;
366 		if (cpi->maxio != 0)
367 			softc->maxio = cpi->maxio;
368 		else
369 			softc->maxio = DFLTPHYS;
370 		mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF);
371 		STAILQ_INIT(&softc->lun_softc_list);
372 
373 		port = &softc->port;
374 		port->frontend = &ctlfe_frontend;
375 
376 		/*
377 		 * XXX KDM should we be more accurate here ?
378 		 */
379 		if (cpi->transport == XPORT_FC)
380 			port->port_type = CTL_PORT_FC;
381 		else if (cpi->transport == XPORT_SAS)
382 			port->port_type = CTL_PORT_SAS;
383 		else
384 			port->port_type = CTL_PORT_SCSI;
385 
386 		/* XXX KDM what should the real number be here? */
387 		port->num_requested_ctl_io = 4096;
388 		snprintf(softc->port_name, sizeof(softc->port_name),
389 			 "%s%d", cpi->dev_name, cpi->unit_number);
390 		/*
391 		 * XXX KDM it would be nice to allocate storage in the
392 		 * frontend structure itself.
393 	 	 */
394 		port->port_name = softc->port_name;
395 		port->physical_port = cpi->bus_id;
396 		port->virtual_port = 0;
397 		port->port_online = ctlfe_online;
398 		port->port_offline = ctlfe_offline;
399 		port->onoff_arg = softc;
400 		port->lun_enable = ctlfe_lun_enable;
401 		port->lun_disable = ctlfe_lun_disable;
402 		port->targ_lun_arg = softc;
403 		port->fe_datamove = ctlfe_datamove;
404 		port->fe_done = ctlfe_done;
405 		port->targ_port = -1;
406 
407 		/*
408 		 * XXX KDM need to figure out whether we're the master or
409 		 * slave.
410 		 */
411 #ifdef CTLFEDEBUG
412 		printf("%s: calling ctl_port_register() for %s%d\n",
413 		       __func__, cpi->dev_name, cpi->unit_number);
414 #endif
415 		retval = ctl_port_register(port);
416 		if (retval != 0) {
417 			printf("%s: ctl_port_register() failed with "
418 			       "error %d!\n", __func__, retval);
419 			mtx_destroy(&softc->lun_softc_mtx);
420 			free(softc, M_CTLFE);
421 			break;
422 		} else {
423 			mtx_lock(&ctlfe_list_mtx);
424 			STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links);
425 			mtx_unlock(&ctlfe_list_mtx);
426 		}
427 
428 		break;
429 	}
430 	case AC_PATH_DEREGISTERED: {
431 
432 		if (softc != NULL) {
433 			/*
434 			 * XXX KDM are we certain at this point that there
435 			 * are no outstanding commands for this frontend?
436 			 */
437 			mtx_lock(&ctlfe_list_mtx);
438 			STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc,
439 			    links);
440 			mtx_unlock(&ctlfe_list_mtx);
441 			ctl_port_deregister(&softc->port);
442 			mtx_destroy(&softc->lun_softc_mtx);
443 			free(softc, M_CTLFE);
444 		}
445 		break;
446 	}
447 	case AC_CONTRACT: {
448 		struct ac_contract *ac;
449 
450 		ac = (struct ac_contract *)arg;
451 
452 		switch (ac->contract_number) {
453 		case AC_CONTRACT_DEV_CHG: {
454 			struct ac_device_changed *dev_chg;
455 			int retval;
456 
457 			dev_chg = (struct ac_device_changed *)ac->contract_data;
458 
459 			printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n",
460 			       __func__, dev_chg->wwpn, dev_chg->port,
461 			       xpt_path_path_id(path), dev_chg->target,
462 			       (dev_chg->arrived == 0) ?  "left" : "arrived");
463 
464 			if (softc == NULL) {
465 				printf("%s: CTL port for CAM path %u not "
466 				       "found!\n", __func__,
467 				       xpt_path_path_id(path));
468 				break;
469 			}
470 			if (dev_chg->arrived != 0) {
471 				retval = ctl_add_initiator(&softc->port,
472 				    dev_chg->target, dev_chg->wwpn, NULL);
473 			} else {
474 				retval = ctl_remove_initiator(&softc->port,
475 				    dev_chg->target);
476 			}
477 
478 			if (retval < 0) {
479 				printf("%s: could not %s port %d iid %u "
480 				       "WWPN %#jx!\n", __func__,
481 				       (dev_chg->arrived != 0) ? "add" :
482 				       "remove", softc->port.targ_port,
483 				       dev_chg->target,
484 				       (uintmax_t)dev_chg->wwpn);
485 			}
486 			break;
487 		}
488 		default:
489 			printf("%s: unsupported contract number %ju\n",
490 			       __func__, (uintmax_t)ac->contract_number);
491 			break;
492 		}
493 		break;
494 	}
495 	default:
496 		break;
497 	}
498 }
499 
500 static cam_status
501 ctlferegister(struct cam_periph *periph, void *arg)
502 {
503 	struct ctlfe_softc *bus_softc;
504 	struct ctlfe_lun_softc *softc;
505 	union ccb en_lun_ccb;
506 	cam_status status;
507 	int i;
508 
509 	softc = (struct ctlfe_lun_softc *)arg;
510 	bus_softc = softc->parent_softc;
511 
512 	TAILQ_INIT(&softc->work_queue);
513 	softc->periph = periph;
514 	periph->softc = softc;
515 
516 	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
517 	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
518 	en_lun_ccb.cel.grp6_len = 0;
519 	en_lun_ccb.cel.grp7_len = 0;
520 	en_lun_ccb.cel.enable = 1;
521 	xpt_action(&en_lun_ccb);
522 	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
523 	if (status != CAM_REQ_CMP) {
524 		xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n",
525 			  __func__, en_lun_ccb.ccb_h.status);
526 		return (status);
527 	}
528 
529 	status = CAM_REQ_CMP;
530 
531 	for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
532 		union ccb *new_ccb;
533 		union ctl_io *new_io;
534 		struct ctlfe_cmd_info *cmd_info;
535 
536 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
537 					      M_ZERO|M_NOWAIT);
538 		if (new_ccb == NULL) {
539 			status = CAM_RESRC_UNAVAIL;
540 			break;
541 		}
542 		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
543 		if (new_io == NULL) {
544 			free(new_ccb, M_CTLFE);
545 			status = CAM_RESRC_UNAVAIL;
546 			break;
547 		}
548 		cmd_info = malloc(sizeof(*cmd_info), M_CTLFE,
549 		    M_ZERO | M_NOWAIT);
550 		if (cmd_info == NULL) {
551 			ctl_free_io(new_io);
552 			free(new_ccb, M_CTLFE);
553 			status = CAM_RESRC_UNAVAIL;
554 			break;
555 		}
556 		PRIV_INFO(new_io) = cmd_info;
557 		softc->atios_alloced++;
558 		new_ccb->ccb_h.io_ptr = new_io;
559 
560 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
561 		new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
562 		new_ccb->ccb_h.cbfcnp = ctlfedone;
563 		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
564 		xpt_action(new_ccb);
565 		status = new_ccb->ccb_h.status;
566 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
567 			free(cmd_info, M_CTLFE);
568 			ctl_free_io(new_io);
569 			free(new_ccb, M_CTLFE);
570 			break;
571 		}
572 	}
573 
574 	status = cam_periph_acquire(periph);
575 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
576 		xpt_print(periph->path, "%s: could not acquire reference "
577 			  "count, status = %#x\n", __func__, status);
578 		return (status);
579 	}
580 
581 	if (i == 0) {
582 		xpt_print(periph->path, "%s: could not allocate ATIO CCBs, "
583 			  "status 0x%x\n", __func__, status);
584 		return (CAM_REQ_CMP_ERR);
585 	}
586 
587 	for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
588 		union ccb *new_ccb;
589 		union ctl_io *new_io;
590 
591 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
592 					      M_ZERO|M_NOWAIT);
593 		if (new_ccb == NULL) {
594 			status = CAM_RESRC_UNAVAIL;
595 			break;
596 		}
597 		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
598 		if (new_io == NULL) {
599 			free(new_ccb, M_CTLFE);
600 			status = CAM_RESRC_UNAVAIL;
601 			break;
602 		}
603 		softc->inots_alloced++;
604 		new_ccb->ccb_h.io_ptr = new_io;
605 
606 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
607 		new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
608 		new_ccb->ccb_h.cbfcnp = ctlfedone;
609 		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
610 		xpt_action(new_ccb);
611 		status = new_ccb->ccb_h.status;
612 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
613 			/*
614 			 * Note that we don't free the CCB here.  If the
615 			 * status is not CAM_REQ_INPROG, then we're
616 			 * probably talking to a SIM that says it is
617 			 * target-capable but doesn't support the
618 			 * XPT_IMMEDIATE_NOTIFY CCB.  i.e. it supports the
619 			 * older API.  In that case, it'll call xpt_done()
620 			 * on the CCB, and we need to free it in our done
621 			 * routine as a result.
622 			 */
623 			break;
624 		}
625 	}
626 	if ((i == 0)
627 	 || (status != CAM_REQ_INPROG)) {
628 		xpt_print(periph->path, "%s: could not allocate immediate "
629 			  "notify CCBs, status 0x%x\n", __func__, status);
630 		return (CAM_REQ_CMP_ERR);
631 	}
632 	mtx_lock(&bus_softc->lun_softc_mtx);
633 	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
634 	mtx_unlock(&bus_softc->lun_softc_mtx);
635 	return (CAM_REQ_CMP);
636 }
637 
638 static void
639 ctlfeoninvalidate(struct cam_periph *periph)
640 {
641 	union ccb en_lun_ccb;
642 	cam_status status;
643 	struct ctlfe_softc *bus_softc;
644 	struct ctlfe_lun_softc *softc;
645 
646 	softc = (struct ctlfe_lun_softc *)periph->softc;
647 
648 	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
649 	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
650 	en_lun_ccb.cel.grp6_len = 0;
651 	en_lun_ccb.cel.grp7_len = 0;
652 	en_lun_ccb.cel.enable = 0;
653 	xpt_action(&en_lun_ccb);
654 	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
655 	if (status != CAM_REQ_CMP) {
656 		xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
657 			  __func__, en_lun_ccb.ccb_h.status);
658 		/*
659 		 * XXX KDM what do we do now?
660 		 */
661 	}
662 
663 	bus_softc = softc->parent_softc;
664 	mtx_lock(&bus_softc->lun_softc_mtx);
665 	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
666 	mtx_unlock(&bus_softc->lun_softc_mtx);
667 }
668 
669 static void
670 ctlfecleanup(struct cam_periph *periph)
671 {
672 	struct ctlfe_lun_softc *softc;
673 
674 	softc = (struct ctlfe_lun_softc *)periph->softc;
675 
676 	KASSERT(softc->ccbs_freed == softc->ccbs_alloced, ("%s: "
677 		"ccbs_freed %ju != ccbs_alloced %ju", __func__,
678 		softc->ccbs_freed, softc->ccbs_alloced));
679 	KASSERT(softc->ctios_returned == softc->ctios_sent, ("%s: "
680 		"ctios_returned %ju != ctios_sent %ju", __func__,
681 		softc->ctios_returned, softc->ctios_sent));
682 	KASSERT(softc->atios_freed == softc->atios_alloced, ("%s: "
683 		"atios_freed %ju != atios_alloced %ju", __func__,
684 		softc->atios_freed, softc->atios_alloced));
685 	KASSERT(softc->inots_freed == softc->inots_alloced, ("%s: "
686 		"inots_freed %ju != inots_alloced %ju", __func__,
687 		softc->inots_freed, softc->inots_alloced));
688 
689 	free(softc, M_CTLFE);
690 }
691 
692 static void
693 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
694     ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len,
695     u_int16_t *sglist_cnt)
696 {
697 	struct ctlfe_softc *bus_softc;
698 	struct ctlfe_cmd_info *cmd_info;
699 	struct ctl_sg_entry *ctl_sglist;
700 	bus_dma_segment_t *cam_sglist;
701 	size_t off;
702 	int i, idx;
703 
704 	cmd_info = PRIV_INFO(io);
705 	bus_softc = softc->parent_softc;
706 
707 	/*
708 	 * Set the direction, relative to the initiator.
709 	 */
710 	*flags &= ~CAM_DIR_MASK;
711 	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
712 		*flags |= CAM_DIR_IN;
713 	else
714 		*flags |= CAM_DIR_OUT;
715 
716 	*flags &= ~CAM_DATA_MASK;
717 	idx = cmd_info->cur_transfer_index;
718 	off = cmd_info->cur_transfer_off;
719 	cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
720 	if (io->scsiio.kern_sg_entries == 0) {	/* No S/G list. */
721 
722 		/* One time shift for SRR offset. */
723 		off += io->scsiio.ext_data_filled;
724 		io->scsiio.ext_data_filled = 0;
725 
726 		*data_ptr = io->scsiio.kern_data_ptr + off;
727 		if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
728 			*dxfer_len = io->scsiio.kern_data_len - off;
729 		} else {
730 			*dxfer_len = bus_softc->maxio;
731 			cmd_info->cur_transfer_off += bus_softc->maxio;
732 			cmd_info->flags |= CTLFE_CMD_PIECEWISE;
733 		}
734 		*sglist_cnt = 0;
735 
736 		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
737 			*flags |= CAM_DATA_PADDR;
738 		else
739 			*flags |= CAM_DATA_VADDR;
740 	} else {	/* S/G list with physical or virtual pointers. */
741 		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
742 
743 		/* One time shift for SRR offset. */
744 		while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
745 			io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
746 			idx++;
747 			off = 0;
748 		}
749 		off += io->scsiio.ext_data_filled;
750 		io->scsiio.ext_data_filled = 0;
751 
752 		cam_sglist = cmd_info->cam_sglist;
753 		*dxfer_len = 0;
754 		for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
755 			cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off;
756 			if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) {
757 				cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off;
758 				*dxfer_len += cam_sglist[i].ds_len;
759 			} else {
760 				cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len;
761 				cmd_info->cur_transfer_index = idx + i;
762 				cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off;
763 				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
764 				*dxfer_len += cam_sglist[i].ds_len;
765 				if (ctl_sglist[i].len != 0)
766 					i++;
767 				break;
768 			}
769 			if (i == (CTLFE_MAX_SEGS - 1) &&
770 			    idx + i < (io->scsiio.kern_sg_entries - 1)) {
771 				cmd_info->cur_transfer_index = idx + i + 1;
772 				cmd_info->cur_transfer_off = 0;
773 				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
774 				i++;
775 				break;
776 			}
777 			off = 0;
778 		}
779 		*sglist_cnt = i;
780 		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
781 			*flags |= CAM_DATA_SG_PADDR;
782 		else
783 			*flags |= CAM_DATA_SG;
784 		*data_ptr = (uint8_t *)cam_sglist;
785 	}
786 }
787 
788 static void
789 ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
790 {
791 	struct ctlfe_lun_softc *softc;
792 	struct ctlfe_cmd_info *cmd_info;
793 	struct ccb_hdr *ccb_h;
794 	struct ccb_accept_tio *atio;
795 	struct ccb_scsiio *csio;
796 	uint8_t *data_ptr;
797 	uint32_t dxfer_len;
798 	ccb_flags flags;
799 	union ctl_io *io;
800 	uint8_t scsi_status;
801 
802 	softc = (struct ctlfe_lun_softc *)periph->softc;
803 	softc->ccbs_alloced++;
804 
805 	ccb_h = TAILQ_FIRST(&softc->work_queue);
806 	if (ccb_h == NULL) {
807 		softc->ccbs_freed++;
808 		xpt_release_ccb(start_ccb);
809 		return;
810 	}
811 
812 	/* Take the ATIO off the work queue */
813 	TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe);
814 	atio = (struct ccb_accept_tio *)ccb_h;
815 	io = (union ctl_io *)ccb_h->io_ptr;
816 	csio = &start_ccb->csio;
817 
818 	flags = atio->ccb_h.flags &
819 		(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
820 	cmd_info = PRIV_INFO(io);
821 	cmd_info->cur_transfer_index = 0;
822 	cmd_info->cur_transfer_off = 0;
823 	cmd_info->flags = 0;
824 
825 	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
826 		/*
827 		 * Datamove call, we need to setup the S/G list.
828 		 */
829 		ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
830 		    &csio->sglist_cnt);
831 	} else {
832 		/*
833 		 * We're done, send status back.
834 		 */
835 		if ((io->io_hdr.flags & CTL_FLAG_ABORT) &&
836 		    (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) {
837 			io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
838 
839 			/*
840 			 * If this command was aborted, we don't
841 			 * need to send status back to the SIM.
842 			 * Just free the CTIO and ctl_io, and
843 			 * recycle the ATIO back to the SIM.
844 			 */
845 			xpt_print(periph->path, "%s: aborted "
846 				  "command 0x%04x discarded\n",
847 				  __func__, io->scsiio.tag_num);
848 			/*
849 			 * For a wildcard attachment, commands can
850 			 * come in with a specific target/lun.  Reset
851 			 * the target and LUN fields back to the
852 			 * wildcard values before we send them back
853 			 * down to the SIM.  The SIM has a wildcard
854 			 * LUN enabled, not whatever target/lun
855 			 * these happened to be.
856 			 */
857 			if (softc->flags & CTLFE_LUN_WILDCARD) {
858 				atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
859 				atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
860 			}
861 
862 			if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) {
863 				xpt_print(periph->path, "%s: func_code "
864 					  "is %#x\n", __func__,
865 					  atio->ccb_h.func_code);
866 			}
867 			start_ccb->ccb_h.func_code = XPT_ABORT;
868 			start_ccb->cab.abort_ccb = (union ccb *)atio;
869 
870 			/* Tell the SIM that we've aborted this ATIO */
871 			xpt_action(start_ccb);
872 			softc->ccbs_freed++;
873 			xpt_release_ccb(start_ccb);
874 
875 			/*
876 			 * Send the ATIO back down to the SIM.
877 			 */
878 			xpt_action((union ccb *)atio);
879 
880 			/*
881 			 * If we still have work to do, ask for
882 			 * another CCB.  Otherwise, deactivate our
883 			 * callout.
884 			 */
885 			if (!TAILQ_EMPTY(&softc->work_queue))
886 				xpt_schedule(periph, /*priority*/ 1);
887 			return;
888 		}
889 		data_ptr = NULL;
890 		dxfer_len = 0;
891 		csio->sglist_cnt = 0;
892 	}
893 	scsi_status = 0;
894 	if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
895 	    (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
896 	    ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
897 	     io->io_hdr.status == CTL_SUCCESS)) {
898 		flags |= CAM_SEND_STATUS;
899 		scsi_status = io->scsiio.scsi_status;
900 		csio->sense_len = io->scsiio.sense_len;
901 #ifdef CTLFEDEBUG
902 		printf("%s: tag %04x status %x\n", __func__,
903 		       atio->tag_id, io->io_hdr.status);
904 #endif
905 		if (csio->sense_len != 0) {
906 			csio->sense_data = io->scsiio.sense_data;
907 			flags |= CAM_SEND_SENSE;
908 		} else if (scsi_status == SCSI_STATUS_CHECK_COND) {
909 			xpt_print(periph->path, "%s: check condition "
910 				  "with no sense\n", __func__);
911 		}
912 	}
913 
914 #ifdef CTLFEDEBUG
915 	printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
916 	       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
917 	       atio->tag_id, flags, data_ptr, dxfer_len);
918 #endif
919 
920 	/*
921 	 * Valid combinations:
922 	 *  - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0,
923 	 *    sglist_cnt = 0
924 	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0,
925 	 *    sglist_cnt = 0
926 	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0,
927 	 *    sglist_cnt != 0
928 	 */
929 #ifdef CTLFEDEBUG
930 	if (((flags & CAM_SEND_STATUS)
931 	  && (((flags & CAM_DATA_SG) != 0)
932 	   || (dxfer_len != 0)
933 	   || (csio->sglist_cnt != 0)))
934 	 || (((flags & CAM_SEND_STATUS) == 0)
935 	  && (dxfer_len == 0))
936 	 || ((flags & CAM_DATA_SG)
937 	  && (csio->sglist_cnt == 0))
938 	 || (((flags & CAM_DATA_SG) == 0)
939 	  && (csio->sglist_cnt != 0))) {
940 		printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
941 		       "%d sg %u\n", __func__, atio->tag_id,
942 		       atio_cdb_ptr(atio)[0], flags, dxfer_len,
943 		       csio->sglist_cnt);
944 		printf("%s: tag %04x io status %#x\n", __func__,
945 		       atio->tag_id, io->io_hdr.status);
946 	}
947 #endif
948 	cam_fill_ctio(csio,
949 		      /*retries*/ 2,
950 		      ctlfedone,
951 		      flags,
952 		      (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0,
953 		      atio->tag_id,
954 		      atio->init_id,
955 		      scsi_status,
956 		      /*data_ptr*/ data_ptr,
957 		      /*dxfer_len*/ dxfer_len,
958 		      /*timeout*/ 5 * 1000);
959 	start_ccb->ccb_h.flags |= CAM_UNLOCKED;
960 	start_ccb->ccb_h.ccb_atio = atio;
961 	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
962 		io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
963 	io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED);
964 
965 	softc->ctios_sent++;
966 
967 	cam_periph_unlock(periph);
968 	xpt_action(start_ccb);
969 	cam_periph_lock(periph);
970 
971 	/*
972 	 * If we still have work to do, ask for another CCB.
973 	 */
974 	if (!TAILQ_EMPTY(&softc->work_queue))
975 		xpt_schedule(periph, /*priority*/ 1);
976 }
977 
978 static void
979 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
980 {
981 	struct ctlfe_lun_softc *softc;
982 	union ctl_io *io;
983 	struct ctlfe_cmd_info *cmd_info;
984 
985 	softc = (struct ctlfe_lun_softc *)periph->softc;
986 	io = ccb->ccb_h.io_ptr;
987 
988 	switch (ccb->ccb_h.func_code) {
989 	case XPT_ACCEPT_TARGET_IO:
990 		softc->atios_freed++;
991 		cmd_info = PRIV_INFO(io);
992 		free(cmd_info, M_CTLFE);
993 		break;
994 	case XPT_IMMEDIATE_NOTIFY:
995 	case XPT_NOTIFY_ACKNOWLEDGE:
996 		softc->inots_freed++;
997 		break;
998 	default:
999 		break;
1000 	}
1001 
1002 	ctl_free_io(io);
1003 	free(ccb, M_CTLFE);
1004 
1005 	KASSERT(softc->atios_freed <= softc->atios_alloced, ("%s: "
1006 		"atios_freed %ju > atios_alloced %ju", __func__,
1007 		softc->atios_freed, softc->atios_alloced));
1008 	KASSERT(softc->inots_freed <= softc->inots_alloced, ("%s: "
1009 		"inots_freed %ju > inots_alloced %ju", __func__,
1010 		softc->inots_freed, softc->inots_alloced));
1011 
1012 	/*
1013 	 * If we have received all of our CCBs, we can release our
1014 	 * reference on the peripheral driver.  It will probably go away
1015 	 * now.
1016 	 */
1017 	if ((softc->atios_freed == softc->atios_alloced)
1018 	 && (softc->inots_freed == softc->inots_alloced)) {
1019 		cam_periph_release_locked(periph);
1020 	}
1021 }
1022 
1023 static int
1024 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
1025 {
1026 	uint64_t lba;
1027 	uint32_t num_blocks, nbc;
1028 	uint8_t *cmdbyt = atio_cdb_ptr(atio);
1029 
1030 	nbc = offset >> 9;	/* ASSUMING 512 BYTE BLOCKS */
1031 
1032 	switch (cmdbyt[0]) {
1033 	case READ_6:
1034 	case WRITE_6:
1035 	{
1036 		struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt;
1037 		lba = scsi_3btoul(cdb->addr);
1038 		lba &= 0x1fffff;
1039 		num_blocks = cdb->length;
1040 		if (num_blocks == 0)
1041 			num_blocks = 256;
1042 		lba += nbc;
1043 		num_blocks -= nbc;
1044 		scsi_ulto3b(lba, cdb->addr);
1045 		cdb->length = num_blocks;
1046 		break;
1047 	}
1048 	case READ_10:
1049 	case WRITE_10:
1050 	{
1051 		struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt;
1052 		lba = scsi_4btoul(cdb->addr);
1053 		num_blocks = scsi_2btoul(cdb->length);
1054 		lba += nbc;
1055 		num_blocks -= nbc;
1056 		scsi_ulto4b(lba, cdb->addr);
1057 		scsi_ulto2b(num_blocks, cdb->length);
1058 		break;
1059 	}
1060 	case READ_12:
1061 	case WRITE_12:
1062 	{
1063 		struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt;
1064 		lba = scsi_4btoul(cdb->addr);
1065 		num_blocks = scsi_4btoul(cdb->length);
1066 		lba += nbc;
1067 		num_blocks -= nbc;
1068 		scsi_ulto4b(lba, cdb->addr);
1069 		scsi_ulto4b(num_blocks, cdb->length);
1070 		break;
1071 	}
1072 	case READ_16:
1073 	case WRITE_16:
1074 	{
1075 		struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt;
1076 		lba = scsi_8btou64(cdb->addr);
1077 		num_blocks = scsi_4btoul(cdb->length);
1078 		lba += nbc;
1079 		num_blocks -= nbc;
1080 		scsi_u64to8b(lba, cdb->addr);
1081 		scsi_ulto4b(num_blocks, cdb->length);
1082 		break;
1083 	}
1084 	default:
1085 		return -1;
1086 	}
1087 	return (0);
1088 }
1089 
1090 static void
1091 ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
1092 {
1093 	struct ctlfe_lun_softc *softc;
1094 	struct ctlfe_softc *bus_softc;
1095 	struct ctlfe_cmd_info *cmd_info;
1096 	struct ccb_accept_tio *atio = NULL;
1097 	union ctl_io *io = NULL;
1098 	struct mtx *mtx;
1099 
1100 	KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
1101 	    ("CCB in ctlfedone() without CAM_UNLOCKED flag"));
1102 #ifdef CTLFE_DEBUG
1103 	printf("%s: entered, func_code = %#x\n", __func__,
1104 	       done_ccb->ccb_h.func_code);
1105 #endif
1106 
1107 	/*
1108 	 * At this point CTL has no known use case for device queue freezes.
1109 	 * In case some SIM think different -- drop its freeze right here.
1110 	 */
1111 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1112 		cam_release_devq(periph->path,
1113 				 /*relsim_flags*/0,
1114 				 /*reduction*/0,
1115 				 /*timeout*/0,
1116 				 /*getcount_only*/0);
1117 		done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1118 	}
1119 
1120 	softc = (struct ctlfe_lun_softc *)periph->softc;
1121 	bus_softc = softc->parent_softc;
1122 	mtx = cam_periph_mtx(periph);
1123 	mtx_lock(mtx);
1124 
1125 	/*
1126 	 * If the peripheral is invalid, ATIOs and immediate notify CCBs
1127 	 * need to be freed.  Most of the ATIOs and INOTs that come back
1128 	 * will be CCBs that are being returned from the SIM as a result of
1129 	 * our disabling the LUN.
1130 	 *
1131 	 * Other CCB types are handled in their respective cases below.
1132 	 */
1133 	if (periph->flags & CAM_PERIPH_INVALID) {
1134 		switch (done_ccb->ccb_h.func_code) {
1135 		case XPT_ACCEPT_TARGET_IO:
1136 		case XPT_IMMEDIATE_NOTIFY:
1137 		case XPT_NOTIFY_ACKNOWLEDGE:
1138 			ctlfe_free_ccb(periph, done_ccb);
1139 			goto out;
1140 		default:
1141 			break;
1142 		}
1143 
1144 	}
1145 	switch (done_ccb->ccb_h.func_code) {
1146 	case XPT_ACCEPT_TARGET_IO: {
1147 
1148 		atio = &done_ccb->atio;
1149 
1150  resubmit:
1151 		/*
1152 		 * Allocate a ctl_io, pass it to CTL, and wait for the
1153 		 * datamove or done.
1154 		 */
1155 		mtx_unlock(mtx);
1156 		io = done_ccb->ccb_h.io_ptr;
1157 		cmd_info = PRIV_INFO(io);
1158 		ctl_zero_io(io);
1159 
1160 		/* Save pointers on both sides */
1161 		PRIV_CCB(io) = done_ccb;
1162 		PRIV_INFO(io) = cmd_info;
1163 		done_ccb->ccb_h.io_ptr = io;
1164 
1165 		/*
1166 		 * Only SCSI I/O comes down this path, resets, etc. come
1167 		 * down the immediate notify path below.
1168 		 */
1169 		io->io_hdr.io_type = CTL_IO_SCSI;
1170 		io->io_hdr.nexus.initid = atio->init_id;
1171 		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1172 		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1173 			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1174 			    CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun));
1175 		} else {
1176 			io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
1177 		}
1178 		io->scsiio.tag_num = atio->tag_id;
1179 		switch (atio->tag_action) {
1180 		case CAM_TAG_ACTION_NONE:
1181 			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1182 			break;
1183 		case MSG_SIMPLE_TASK:
1184 			io->scsiio.tag_type = CTL_TAG_SIMPLE;
1185 			break;
1186 		case MSG_HEAD_OF_QUEUE_TASK:
1187         		io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
1188 			break;
1189 		case MSG_ORDERED_TASK:
1190         		io->scsiio.tag_type = CTL_TAG_ORDERED;
1191 			break;
1192 		case MSG_ACA_TASK:
1193 			io->scsiio.tag_type = CTL_TAG_ACA;
1194 			break;
1195 		default:
1196 			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1197 			printf("%s: unhandled tag type %#x!!\n", __func__,
1198 			       atio->tag_action);
1199 			break;
1200 		}
1201 		if (atio->cdb_len > sizeof(io->scsiio.cdb)) {
1202 			printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
1203 			       __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
1204 		}
1205 		io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
1206 		bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len);
1207 
1208 #ifdef CTLFEDEBUG
1209 		printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
1210 		        io->io_hdr.nexus.initid,
1211 		        io->io_hdr.nexus.targ_port,
1212 		        io->io_hdr.nexus.targ_lun,
1213 			io->scsiio.tag_num, io->scsiio.cdb[0]);
1214 #endif
1215 
1216 		ctl_queue(io);
1217 		return;
1218 	}
1219 	case XPT_CONT_TARGET_IO: {
1220 		int srr = 0;
1221 		uint32_t srr_off = 0;
1222 
1223 		atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
1224 		io = (union ctl_io *)atio->ccb_h.io_ptr;
1225 
1226 		softc->ctios_returned++;
1227 #ifdef CTLFEDEBUG
1228 		printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
1229 		       __func__, atio->tag_id, done_ccb->ccb_h.flags);
1230 #endif
1231 		/*
1232 		 * Handle SRR case were the data pointer is pushed back hack
1233 		 */
1234 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV
1235 		    && done_ccb->csio.msg_ptr != NULL
1236 		    && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED
1237 		    && done_ccb->csio.msg_ptr[1] == 5
1238        		    && done_ccb->csio.msg_ptr[2] == 0) {
1239 			srr = 1;
1240 			srr_off =
1241 			    (done_ccb->csio.msg_ptr[3] << 24)
1242 			    | (done_ccb->csio.msg_ptr[4] << 16)
1243 			    | (done_ccb->csio.msg_ptr[5] << 8)
1244 			    | (done_ccb->csio.msg_ptr[6]);
1245 		}
1246 
1247 		/*
1248 		 * If we have an SRR and we're still sending data, we
1249 		 * should be able to adjust offsets and cycle again.
1250 		 * It is possible only if offset is from this datamove.
1251 		 */
1252 		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
1253 		    srr_off >= io->scsiio.kern_rel_offset &&
1254 		    srr_off < io->scsiio.kern_rel_offset +
1255 		     io->scsiio.kern_data_len) {
1256 			io->scsiio.kern_data_resid =
1257 			    io->scsiio.kern_rel_offset +
1258 			    io->scsiio.kern_data_len - srr_off;
1259 			io->scsiio.ext_data_filled = srr_off;
1260 			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
1261 			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
1262 			softc->ccbs_freed++;
1263 			xpt_release_ccb(done_ccb);
1264 			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1265 					  periph_links.tqe);
1266 			xpt_schedule(periph, /*priority*/ 1);
1267 			break;
1268 		}
1269 
1270 		/*
1271 		 * If status was being sent, the back end data is now history.
1272 		 * Hack it up and resubmit a new command with the CDB adjusted.
1273 		 * If the SIM does the right thing, all of the resid math
1274 		 * should work.
1275 		 */
1276 		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1277 			softc->ccbs_freed++;
1278 			xpt_release_ccb(done_ccb);
1279 			if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
1280 				done_ccb = (union ccb *)atio;
1281 				goto resubmit;
1282 			}
1283 			/*
1284 			 * Fall through to doom....
1285 			 */
1286 		}
1287 
1288 		if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1289 		    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1290 			io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
1291 
1292 		/*
1293 		 * If we were sending status back to the initiator, free up
1294 		 * resources.  If we were doing a datamove, call the
1295 		 * datamove done routine.
1296 		 */
1297 		if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1298 			softc->ccbs_freed++;
1299 			xpt_release_ccb(done_ccb);
1300 			/*
1301 			 * For a wildcard attachment, commands can come in
1302 			 * with a specific target/lun.  Reset the target
1303 			 * and LUN fields back to the wildcard values before
1304 			 * we send them back down to the SIM.  The SIM has
1305 			 * a wildcard LUN enabled, not whatever target/lun
1306 			 * these happened to be.
1307 			 */
1308 			if (softc->flags & CTLFE_LUN_WILDCARD) {
1309 				atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
1310 				atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
1311 			}
1312 			if (periph->flags & CAM_PERIPH_INVALID) {
1313 				ctlfe_free_ccb(periph, (union ccb *)atio);
1314 			} else {
1315 				mtx_unlock(mtx);
1316 				xpt_action((union ccb *)atio);
1317 				return;
1318 			}
1319 		} else {
1320 			struct ctlfe_cmd_info *cmd_info;
1321 			struct ccb_scsiio *csio;
1322 
1323 			csio = &done_ccb->csio;
1324 			cmd_info = PRIV_INFO(io);
1325 
1326 			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1327 
1328 			/*
1329 			 * Translate CAM status to CTL status.  Success
1330 			 * does not change the overall, ctl_io status.  In
1331 			 * that case we just set port_status to 0.  If we
1332 			 * have a failure, though, set a data phase error
1333 			 * for the overall ctl_io.
1334 			 */
1335 			switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
1336 			case CAM_REQ_CMP:
1337 				io->scsiio.kern_data_resid -= csio->dxfer_len;
1338 				io->io_hdr.port_status = 0;
1339 				break;
1340 			default:
1341 				/*
1342 				 * XXX KDM we probably need to figure out a
1343 				 * standard set of errors that the SIM
1344 				 * drivers should return in the event of a
1345 				 * data transfer failure.  A data phase
1346 				 * error will at least point the user to a
1347 				 * data transfer error of some sort.
1348 				 * Hopefully the SIM printed out some
1349 				 * additional information to give the user
1350 				 * a clue what happened.
1351 				 */
1352 				io->io_hdr.port_status = 0xbad1;
1353 				ctl_set_data_phase_error(&io->scsiio);
1354 				/*
1355 				 * XXX KDM figure out residual.
1356 				 */
1357 				break;
1358 			}
1359 			/*
1360 			 * If we had to break this S/G list into multiple
1361 			 * pieces, figure out where we are in the list, and
1362 			 * continue sending pieces if necessary.
1363 			 */
1364 			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
1365 			 && (io->io_hdr.port_status == 0)) {
1366 				ccb_flags flags;
1367 				uint8_t *data_ptr;
1368 				uint32_t dxfer_len;
1369 
1370 				flags = atio->ccb_h.flags &
1371 					(CAM_DIS_DISCONNECT|
1372 					 CAM_TAG_ACTION_VALID);
1373 
1374 				ctlfedata(softc, io, &flags, &data_ptr,
1375 				    &dxfer_len, &csio->sglist_cnt);
1376 
1377 				if (((flags & CAM_SEND_STATUS) == 0)
1378 				 && (dxfer_len == 0)) {
1379 					printf("%s: tag %04x no status or "
1380 					       "len cdb = %02x\n", __func__,
1381 					       atio->tag_id,
1382 					       atio_cdb_ptr(atio)[0]);
1383 					printf("%s: tag %04x io status %#x\n",
1384 					       __func__, atio->tag_id,
1385 					       io->io_hdr.status);
1386 				}
1387 
1388 				cam_fill_ctio(csio,
1389 					      /*retries*/ 2,
1390 					      ctlfedone,
1391 					      flags,
1392 					      (flags & CAM_TAG_ACTION_VALID) ?
1393 					       MSG_SIMPLE_Q_TAG : 0,
1394 					      atio->tag_id,
1395 					      atio->init_id,
1396 					      0,
1397 					      /*data_ptr*/ data_ptr,
1398 					      /*dxfer_len*/ dxfer_len,
1399 					      /*timeout*/ 5 * 1000);
1400 
1401 				csio->ccb_h.flags |= CAM_UNLOCKED;
1402 				csio->resid = 0;
1403 				csio->ccb_h.ccb_atio = atio;
1404 				io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1405 				softc->ctios_sent++;
1406 				mtx_unlock(mtx);
1407 				xpt_action((union ccb *)csio);
1408 			} else {
1409 				/*
1410 				 * Release the CTIO.  The ATIO will be sent back
1411 				 * down to the SIM once we send status.
1412 				 */
1413 				softc->ccbs_freed++;
1414 				xpt_release_ccb(done_ccb);
1415 				mtx_unlock(mtx);
1416 
1417 				/* Call the backend move done callback */
1418 				io->scsiio.be_move_done(io);
1419 			}
1420 			return;
1421 		}
1422 		break;
1423 	}
1424 	case XPT_IMMEDIATE_NOTIFY: {
1425 		union ctl_io *io;
1426 		struct ccb_immediate_notify *inot;
1427 		cam_status status;
1428 		int send_ctl_io;
1429 
1430 		inot = &done_ccb->cin1;
1431 		printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x "
1432 		       "seq %#x\n", __func__, inot->ccb_h.status,
1433 		       inot->tag_id, inot->seq_id);
1434 
1435 		io = done_ccb->ccb_h.io_ptr;
1436 		ctl_zero_io(io);
1437 
1438 		send_ctl_io = 1;
1439 
1440 		io->io_hdr.io_type = CTL_IO_TASK;
1441 		PRIV_CCB(io) = done_ccb;
1442 		inot->ccb_h.io_ptr = io;
1443 		io->io_hdr.nexus.initid = inot->initiator_id;
1444 		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1445 		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1446 			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1447 			    CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun));
1448 		} else {
1449 			io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
1450 		}
1451 		/* XXX KDM should this be the tag_id? */
1452 		io->taskio.tag_num = inot->seq_id;
1453 
1454 		status = inot->ccb_h.status & CAM_STATUS_MASK;
1455 		switch (status) {
1456 		case CAM_SCSI_BUS_RESET:
1457 			io->taskio.task_action = CTL_TASK_BUS_RESET;
1458 			break;
1459 		case CAM_BDR_SENT:
1460 			io->taskio.task_action = CTL_TASK_TARGET_RESET;
1461 			break;
1462 		case CAM_MESSAGE_RECV:
1463 			switch (inot->arg) {
1464 			case MSG_ABORT_TASK_SET:
1465 				io->taskio.task_action =
1466 				    CTL_TASK_ABORT_TASK_SET;
1467 				break;
1468 			case MSG_TARGET_RESET:
1469 				io->taskio.task_action = CTL_TASK_TARGET_RESET;
1470 				break;
1471 			case MSG_ABORT_TASK:
1472 				io->taskio.task_action = CTL_TASK_ABORT_TASK;
1473 				break;
1474 			case MSG_LOGICAL_UNIT_RESET:
1475 				io->taskio.task_action = CTL_TASK_LUN_RESET;
1476 				break;
1477 			case MSG_CLEAR_TASK_SET:
1478 				io->taskio.task_action =
1479 				    CTL_TASK_CLEAR_TASK_SET;
1480 				break;
1481 			case MSG_CLEAR_ACA:
1482 				io->taskio.task_action = CTL_TASK_CLEAR_ACA;
1483 				break;
1484 			case MSG_QUERY_TASK:
1485 				io->taskio.task_action = CTL_TASK_QUERY_TASK;
1486 				break;
1487 			case MSG_QUERY_TASK_SET:
1488 				io->taskio.task_action =
1489 				    CTL_TASK_QUERY_TASK_SET;
1490 				break;
1491 			case MSG_QUERY_ASYNC_EVENT:
1492 				io->taskio.task_action =
1493 				    CTL_TASK_QUERY_ASYNC_EVENT;
1494 				break;
1495 			case MSG_NOOP:
1496 				send_ctl_io = 0;
1497 				break;
1498 			default:
1499 				xpt_print(periph->path,
1500 					  "%s: unsupported message 0x%x\n",
1501 					  __func__, inot->arg);
1502 				send_ctl_io = 0;
1503 				break;
1504 			}
1505 			break;
1506 		case CAM_REQ_ABORTED:
1507 			/*
1508 			 * This request was sent back by the driver.
1509 			 * XXX KDM what do we do here?
1510 			 */
1511 			send_ctl_io = 0;
1512 			break;
1513 		case CAM_REQ_INVALID:
1514 		case CAM_PROVIDE_FAIL:
1515 		default:
1516 			/*
1517 			 * We should only get here if we're talking
1518 			 * to a talking to a SIM that is target
1519 			 * capable but supports the old API.  In
1520 			 * that case, we need to just free the CCB.
1521 			 * If we actually send a notify acknowledge,
1522 			 * it will send that back with an error as
1523 			 * well.
1524 			 */
1525 
1526 			if ((status != CAM_REQ_INVALID)
1527 			 && (status != CAM_PROVIDE_FAIL))
1528 				xpt_print(periph->path,
1529 					  "%s: unsupported CAM status 0x%x\n",
1530 					  __func__, status);
1531 
1532 			ctlfe_free_ccb(periph, done_ccb);
1533 
1534 			goto out;
1535 		}
1536 		if (send_ctl_io != 0) {
1537 			ctl_queue(io);
1538 		} else {
1539 			done_ccb->ccb_h.status = CAM_REQ_INPROG;
1540 			done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1541 			xpt_action(done_ccb);
1542 		}
1543 		break;
1544 	}
1545 	case XPT_NOTIFY_ACKNOWLEDGE:
1546 		/*
1547 		 * Queue this back down to the SIM as an immediate notify.
1548 		 */
1549 		done_ccb->ccb_h.status = CAM_REQ_INPROG;
1550 		done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
1551 		xpt_action(done_ccb);
1552 		break;
1553 	case XPT_SET_SIM_KNOB:
1554 	case XPT_GET_SIM_KNOB:
1555 	case XPT_GET_SIM_KNOB_OLD:
1556 		break;
1557 	default:
1558 		panic("%s: unexpected CCB type %#x", __func__,
1559 		      done_ccb->ccb_h.func_code);
1560 		break;
1561 	}
1562 
1563 out:
1564 	mtx_unlock(mtx);
1565 }
1566 
1567 static void
1568 ctlfe_onoffline(void *arg, int online)
1569 {
1570 	struct ctlfe_softc *bus_softc;
1571 	union ccb *ccb;
1572 	cam_status status;
1573 	struct cam_path *path;
1574 	int set_wwnn;
1575 
1576 	bus_softc = (struct ctlfe_softc *)arg;
1577 
1578 	set_wwnn = 0;
1579 
1580 	status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
1581 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1582 	if (status != CAM_REQ_CMP) {
1583 		printf("%s: unable to create path!\n", __func__);
1584 		return;
1585 	}
1586 	ccb = xpt_alloc_ccb();
1587 	xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
1588 	ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
1589 	xpt_action(ccb);
1590 
1591 	/*
1592 	 * Copan WWN format:
1593 	 *
1594 	 * Bits 63-60:	0x5		NAA, IEEE registered name
1595 	 * Bits 59-36:	0x000ED5	IEEE Company name assigned to Copan
1596 	 * Bits 35-12:			Copan SSN (Sequential Serial Number)
1597 	 * Bits 11-8:			Type of port:
1598 	 *					1 == N-Port
1599 	 *					2 == F-Port
1600 	 *					3 == NL-Port
1601 	 * Bits 7-0:			0 == Node Name, >0 == Port Number
1602 	 */
1603 	if (online != 0) {
1604 		if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
1605 #ifdef RANDOM_WWNN
1606 			uint64_t random_bits;
1607 #endif
1608 
1609 			printf("%s: %s current WWNN %#jx\n", __func__,
1610 			       bus_softc->port_name,
1611 			       ccb->knob.xport_specific.fc.wwnn);
1612 			printf("%s: %s current WWPN %#jx\n", __func__,
1613 			       bus_softc->port_name,
1614 			       ccb->knob.xport_specific.fc.wwpn);
1615 
1616 #ifdef RANDOM_WWNN
1617 			arc4rand(&random_bits, sizeof(random_bits), 0);
1618 #endif
1619 
1620 			/*
1621 			 * XXX KDM this is a bit of a kludge for now.  We
1622 			 * take the current WWNN/WWPN from the card, and
1623 			 * replace the company identifier and the NL-Port
1624 			 * indicator and the port number (for the WWPN).
1625 			 * This should be replaced later with ddb_GetWWNN,
1626 			 * or possibly a more centralized scheme.  (It
1627 			 * would be nice to have the WWNN/WWPN for each
1628 			 * port stored in the ctl_port structure.)
1629 			 */
1630 #ifdef RANDOM_WWNN
1631 			ccb->knob.xport_specific.fc.wwnn =
1632 				(random_bits &
1633 				0x0000000fffffff00ULL) |
1634 				/* Company ID */ 0x5000ED5000000000ULL |
1635 				/* NL-Port */    0x0300;
1636 			ccb->knob.xport_specific.fc.wwpn =
1637 				(random_bits &
1638 				0x0000000fffffff00ULL) |
1639 				/* Company ID */ 0x5000ED5000000000ULL |
1640 				/* NL-Port */    0x3000 |
1641 				/* Port Num */ (bus_softc->port.targ_port & 0xff);
1642 
1643 			/*
1644 			 * This is a bit of an API break/reversal, but if
1645 			 * we're doing the random WWNN that's a little
1646 			 * different anyway.  So record what we're actually
1647 			 * using with the frontend code so it's reported
1648 			 * accurately.
1649 			 */
1650 			ctl_port_set_wwns(&bus_softc->port,
1651 			    true, ccb->knob.xport_specific.fc.wwnn,
1652 			    true, ccb->knob.xport_specific.fc.wwpn);
1653 			set_wwnn = 1;
1654 #else /* RANDOM_WWNN */
1655 			/*
1656 			 * If the user has specified a WWNN/WWPN, send them
1657 			 * down to the SIM.  Otherwise, record what the SIM
1658 			 * has reported.
1659 			 */
1660 			if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn
1661 			    != ccb->knob.xport_specific.fc.wwnn) {
1662 				ccb->knob.xport_specific.fc.wwnn =
1663 				    bus_softc->port.wwnn;
1664 				set_wwnn = 1;
1665 			} else {
1666 				ctl_port_set_wwns(&bus_softc->port,
1667 				    true, ccb->knob.xport_specific.fc.wwnn,
1668 				    false, 0);
1669 			}
1670 			if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn
1671 			     != ccb->knob.xport_specific.fc.wwpn) {
1672 				ccb->knob.xport_specific.fc.wwpn =
1673 				    bus_softc->port.wwpn;
1674 				set_wwnn = 1;
1675 			} else {
1676 				ctl_port_set_wwns(&bus_softc->port,
1677 				    false, 0,
1678 				    true, ccb->knob.xport_specific.fc.wwpn);
1679 			}
1680 #endif /* RANDOM_WWNN */
1681 
1682 
1683 			if (set_wwnn != 0) {
1684 				printf("%s: %s new WWNN %#jx\n", __func__,
1685 				       bus_softc->port_name,
1686 				ccb->knob.xport_specific.fc.wwnn);
1687 				printf("%s: %s new WWPN %#jx\n", __func__,
1688 				       bus_softc->port_name,
1689 				       ccb->knob.xport_specific.fc.wwpn);
1690 			}
1691 		} else {
1692 			printf("%s: %s has no valid WWNN/WWPN\n", __func__,
1693 			       bus_softc->port_name);
1694 		}
1695 	}
1696 	ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
1697 	ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
1698 	if (set_wwnn != 0)
1699 		ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
1700 
1701 	if (online != 0)
1702 		ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET;
1703 	else
1704 		ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET;
1705 
1706 	xpt_action(ccb);
1707 
1708 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1709 		printf("%s: SIM %s (path id %d) target %s failed with "
1710 		       "status %#x\n",
1711 		       __func__, bus_softc->port_name, bus_softc->path_id,
1712 		       (online != 0) ? "enable" : "disable",
1713 		       ccb->ccb_h.status);
1714 	} else {
1715 		printf("%s: SIM %s (path id %d) target %s succeeded\n",
1716 		       __func__, bus_softc->port_name, bus_softc->path_id,
1717 		       (online != 0) ? "enable" : "disable");
1718 	}
1719 
1720 	xpt_free_path(path);
1721 	xpt_free_ccb(ccb);
1722 }
1723 
1724 static void
1725 ctlfe_online(void *arg)
1726 {
1727 	struct ctlfe_softc *bus_softc;
1728 	struct cam_path *path;
1729 	cam_status status;
1730 	struct ctlfe_lun_softc *lun_softc;
1731 	struct cam_periph *periph;
1732 
1733 	bus_softc = (struct ctlfe_softc *)arg;
1734 
1735 	/*
1736 	 * Create the wildcard LUN before bringing the port online.
1737 	 */
1738 	status = xpt_create_path(&path, /*periph*/ NULL,
1739 				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1740 				 CAM_LUN_WILDCARD);
1741 	if (status != CAM_REQ_CMP) {
1742 		printf("%s: unable to create path for wildcard periph\n",
1743 				__func__);
1744 		return;
1745 	}
1746 
1747 	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO);
1748 
1749 	xpt_path_lock(path);
1750 	periph = cam_periph_find(path, "ctl");
1751 	if (periph != NULL) {
1752 		/* We've already got a periph, no need to alloc a new one. */
1753 		xpt_path_unlock(path);
1754 		xpt_free_path(path);
1755 		free(lun_softc, M_CTLFE);
1756 		return;
1757 	}
1758 	lun_softc->parent_softc = bus_softc;
1759 	lun_softc->flags |= CTLFE_LUN_WILDCARD;
1760 
1761 	status = cam_periph_alloc(ctlferegister,
1762 				  ctlfeoninvalidate,
1763 				  ctlfecleanup,
1764 				  ctlfestart,
1765 				  "ctl",
1766 				  CAM_PERIPH_BIO,
1767 				  path,
1768 				  ctlfeasync,
1769 				  0,
1770 				  lun_softc);
1771 
1772 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1773 		const struct cam_status_entry *entry;
1774 
1775 		entry = cam_fetch_status_entry(status);
1776 		printf("%s: CAM error %s (%#x) returned from "
1777 		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1778 		       entry->status_text : "Unknown", status);
1779 		free(lun_softc, M_CTLFE);
1780 	}
1781 
1782 	xpt_path_unlock(path);
1783 	ctlfe_onoffline(arg, /*online*/ 1);
1784 	xpt_free_path(path);
1785 }
1786 
1787 static void
1788 ctlfe_offline(void *arg)
1789 {
1790 	struct ctlfe_softc *bus_softc;
1791 	struct cam_path *path;
1792 	cam_status status;
1793 	struct cam_periph *periph;
1794 
1795 	bus_softc = (struct ctlfe_softc *)arg;
1796 
1797 	ctlfe_onoffline(arg, /*online*/ 0);
1798 
1799 	/*
1800 	 * Disable the wildcard LUN for this port now that we have taken
1801 	 * the port offline.
1802 	 */
1803 	status = xpt_create_path(&path, /*periph*/ NULL,
1804 				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1805 				 CAM_LUN_WILDCARD);
1806 	if (status != CAM_REQ_CMP) {
1807 		printf("%s: unable to create path for wildcard periph\n",
1808 		       __func__);
1809 		return;
1810 	}
1811 	xpt_path_lock(path);
1812 	if ((periph = cam_periph_find(path, "ctl")) != NULL)
1813 		cam_periph_invalidate(periph);
1814 	xpt_path_unlock(path);
1815 	xpt_free_path(path);
1816 }
1817 
1818 /*
1819  * This will get called to enable a LUN on every bus that is attached to
1820  * CTL.  So we only need to create a path/periph for this particular bus.
1821  */
1822 static int
1823 ctlfe_lun_enable(void *arg, int lun_id)
1824 {
1825 	struct ctlfe_softc *bus_softc;
1826 	struct ctlfe_lun_softc *softc;
1827 	struct cam_path *path;
1828 	struct cam_periph *periph;
1829 	cam_status status;
1830 
1831 	bus_softc = (struct ctlfe_softc *)arg;
1832 	if (bus_softc->hba_misc & PIM_EXTLUNS)
1833 		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1834 
1835 	status = xpt_create_path(&path, /*periph*/ NULL,
1836 	    bus_softc->path_id, bus_softc->target_id, lun_id);
1837 	/* XXX KDM need some way to return status to CTL here? */
1838 	if (status != CAM_REQ_CMP) {
1839 		printf("%s: could not create path, status %#x\n", __func__,
1840 		       status);
1841 		return (1);
1842 	}
1843 
1844 	softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
1845 	xpt_path_lock(path);
1846 	periph = cam_periph_find(path, "ctl");
1847 	if (periph != NULL) {
1848 		/* We've already got a periph, no need to alloc a new one. */
1849 		xpt_path_unlock(path);
1850 		xpt_free_path(path);
1851 		free(softc, M_CTLFE);
1852 		return (0);
1853 	}
1854 	softc->parent_softc = bus_softc;
1855 
1856 	status = cam_periph_alloc(ctlferegister,
1857 				  ctlfeoninvalidate,
1858 				  ctlfecleanup,
1859 				  ctlfestart,
1860 				  "ctl",
1861 				  CAM_PERIPH_BIO,
1862 				  path,
1863 				  ctlfeasync,
1864 				  0,
1865 				  softc);
1866 
1867 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1868 		const struct cam_status_entry *entry;
1869 
1870 		entry = cam_fetch_status_entry(status);
1871 		printf("%s: CAM error %s (%#x) returned from "
1872 		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1873 		       entry->status_text : "Unknown", status);
1874 		free(softc, M_CTLFE);
1875 	}
1876 
1877 	xpt_path_unlock(path);
1878 	xpt_free_path(path);
1879 	return (0);
1880 }
1881 
1882 /*
1883  * This will get called when the user removes a LUN to disable that LUN
1884  * on every bus that is attached to CTL.
1885  */
1886 static int
1887 ctlfe_lun_disable(void *arg, int lun_id)
1888 {
1889 	struct ctlfe_softc *softc;
1890 	struct ctlfe_lun_softc *lun_softc;
1891 
1892 	softc = (struct ctlfe_softc *)arg;
1893 	if (softc->hba_misc & PIM_EXTLUNS)
1894 		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1895 
1896 	mtx_lock(&softc->lun_softc_mtx);
1897 	STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
1898 		struct cam_path *path;
1899 
1900 		path = lun_softc->periph->path;
1901 
1902 		if ((xpt_path_target_id(path) == softc->target_id)
1903 		 && (xpt_path_lun_id(path) == lun_id)) {
1904 			break;
1905 		}
1906 	}
1907 	if (lun_softc == NULL) {
1908 		mtx_unlock(&softc->lun_softc_mtx);
1909 		printf("%s: can't find lun %d\n", __func__, lun_id);
1910 		return (1);
1911 	}
1912 	cam_periph_acquire(lun_softc->periph);
1913 	mtx_unlock(&softc->lun_softc_mtx);
1914 
1915 	cam_periph_lock(lun_softc->periph);
1916 	cam_periph_invalidate(lun_softc->periph);
1917 	cam_periph_unlock(lun_softc->periph);
1918 	cam_periph_release(lun_softc->periph);
1919 	return (0);
1920 }
1921 
1922 static void
1923 ctlfe_dump_sim(struct cam_sim *sim)
1924 {
1925 
1926 	printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
1927 	       sim->sim_name, sim->unit_number,
1928 	       sim->max_tagged_dev_openings, sim->max_dev_openings);
1929 }
1930 
1931 /*
1932  * Assumes that the SIM lock is held.
1933  */
1934 static void
1935 ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
1936 {
1937 	struct ccb_hdr *hdr;
1938 	struct cam_periph *periph;
1939 	int num_items;
1940 
1941 	periph = softc->periph;
1942 	num_items = 0;
1943 
1944 	TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) {
1945 		union ctl_io *io = hdr->io_ptr;
1946 
1947 		num_items++;
1948 
1949 		/*
1950 		 * Only regular SCSI I/O is put on the work
1951 		 * queue, so we can print sense here.  There may be no
1952 		 * sense if it's no the queue for a DMA, but this serves to
1953 		 * print out the CCB as well.
1954 		 *
1955 		 * XXX KDM switch this over to scsi_sense_print() when
1956 		 * CTL is merged in with CAM.
1957 		 */
1958 		ctl_io_error_print(io, NULL);
1959 
1960 		/*
1961 		 * Print DMA status if we are DMA_QUEUED.
1962 		 */
1963 		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
1964 			xpt_print(periph->path,
1965 			    "Total %u, Current %u, Resid %u\n",
1966 			    io->scsiio.kern_total_len,
1967 			    io->scsiio.kern_data_len,
1968 			    io->scsiio.kern_data_resid);
1969 		}
1970 	}
1971 
1972 	xpt_print(periph->path, "%d requests total waiting for CCBs\n",
1973 		  num_items);
1974 	xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju "
1975 		  "freed)\n", (uintmax_t)(softc->ccbs_alloced -
1976 		  softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced,
1977 		  (uintmax_t)softc->ccbs_freed);
1978 	xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju "
1979 		  "returned\n", (uintmax_t)(softc->ctios_sent -
1980 		  softc->ctios_returned), softc->ctios_sent,
1981 		  softc->ctios_returned);
1982 }
1983 
1984 /*
1985  * Datamove/done routine called by CTL.  Put ourselves on the queue to
1986  * receive a CCB from CAM so we can queue the continue I/O request down
1987  * to the adapter.
1988  */
1989 static void
1990 ctlfe_datamove(union ctl_io *io)
1991 {
1992 	union ccb *ccb;
1993 	struct cam_periph *periph;
1994 	struct ctlfe_lun_softc *softc;
1995 
1996 	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
1997 	    ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
1998 
1999 	io->scsiio.ext_data_filled = 0;
2000 	ccb = PRIV_CCB(io);
2001 	periph = xpt_path_periph(ccb->ccb_h.path);
2002 	cam_periph_lock(periph);
2003 	softc = (struct ctlfe_lun_softc *)periph->softc;
2004 	io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
2005 	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
2006 		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
2007 	TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
2008 			  periph_links.tqe);
2009 	xpt_schedule(periph, /*priority*/ 1);
2010 	cam_periph_unlock(periph);
2011 }
2012 
2013 static void
2014 ctlfe_done(union ctl_io *io)
2015 {
2016 	union ccb *ccb;
2017 	struct cam_periph *periph;
2018 	struct ctlfe_lun_softc *softc;
2019 
2020 	ccb = PRIV_CCB(io);
2021 	periph = xpt_path_periph(ccb->ccb_h.path);
2022 	cam_periph_lock(periph);
2023 	softc = (struct ctlfe_lun_softc *)periph->softc;
2024 
2025 	if (io->io_hdr.io_type == CTL_IO_TASK) {
2026 		/*
2027 		 * Task management commands don't require any further
2028 		 * communication back to the adapter.  Requeue the CCB
2029 		 * to the adapter, and free the CTL I/O.
2030 		 */
2031 		xpt_print(ccb->ccb_h.path, "%s: returning task I/O "
2032 			  "tag %#x seq %#x\n", __func__,
2033 			  ccb->cin1.tag_id, ccb->cin1.seq_id);
2034 		/*
2035 		 * Send the notify acknowledge down to the SIM, to let it
2036 		 * know we processed the task management command.
2037 		 */
2038 		ccb->ccb_h.status = CAM_REQ_INPROG;
2039 		ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
2040 		switch (io->taskio.task_status) {
2041 		case CTL_TASK_FUNCTION_COMPLETE:
2042 			ccb->cna2.arg = CAM_RSP_TMF_COMPLETE;
2043 			break;
2044 		case CTL_TASK_FUNCTION_SUCCEEDED:
2045 			ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED;
2046 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2047 			break;
2048 		case CTL_TASK_FUNCTION_REJECTED:
2049 			ccb->cna2.arg = CAM_RSP_TMF_REJECTED;
2050 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2051 			break;
2052 		case CTL_TASK_LUN_DOES_NOT_EXIST:
2053 			ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN;
2054 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2055 			break;
2056 		case CTL_TASK_FUNCTION_NOT_SUPPORTED:
2057 			ccb->cna2.arg = CAM_RSP_TMF_FAILED;
2058 			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2059 			break;
2060 		}
2061 		ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8;
2062 		xpt_action(ccb);
2063 	} else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) {
2064 		if (softc->flags & CTLFE_LUN_WILDCARD) {
2065 			ccb->ccb_h.target_id = CAM_TARGET_WILDCARD;
2066 			ccb->ccb_h.target_lun = CAM_LUN_WILDCARD;
2067 		}
2068 		if (periph->flags & CAM_PERIPH_INVALID) {
2069 			ctlfe_free_ccb(periph, ccb);
2070 		} else {
2071 			cam_periph_unlock(periph);
2072 			xpt_action(ccb);
2073 			return;
2074 		}
2075 	} else {
2076 		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
2077 		TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
2078 				  periph_links.tqe);
2079 		xpt_schedule(periph, /*priority*/ 1);
2080 	}
2081 
2082 	cam_periph_unlock(periph);
2083 }
2084 
2085 static void
2086 ctlfe_dump(void)
2087 {
2088 	struct ctlfe_softc *bus_softc;
2089 	struct ctlfe_lun_softc *lun_softc;
2090 
2091 	STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
2092 		ctlfe_dump_sim(bus_softc->sim);
2093 		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links)
2094 			ctlfe_dump_queue(lun_softc);
2095 	}
2096 }
2097