xref: /freebsd/sys/cam/ctl/scsi_ctl.c (revision 3bdf775801b218aa5a89564839405b122f4b233e)
1 /*-
2  * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    substantially similar to the "NO WARRANTY" disclaimer below
13  *    ("Disclaimer") and any redistribution must be conditioned upon
14  *    including a substantially similar Disclaimer requirement for further
15  *    binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGES.
29  *
30  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
31  */
32 /*
33  * Peripheral driver interface between CAM and CTL (CAM Target Layer).
34  *
35  * Author: Ken Merry <ken@FreeBSD.org>
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/queue.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/condvar.h>
48 #include <sys/malloc.h>
49 #include <sys/bus.h>
50 #include <sys/endian.h>
51 #include <sys/sbuf.h>
52 #include <sys/sysctl.h>
53 #include <sys/types.h>
54 #include <sys/systm.h>
55 #include <machine/bus.h>
56 
57 #include <cam/cam.h>
58 #include <cam/cam_ccb.h>
59 #include <cam/cam_periph.h>
60 #include <cam/cam_queue.h>
61 #include <cam/cam_xpt_periph.h>
62 #include <cam/cam_debug.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_xpt.h>
65 
66 #include <cam/scsi/scsi_all.h>
67 #include <cam/scsi/scsi_message.h>
68 
69 #include <cam/ctl/ctl_io.h>
70 #include <cam/ctl/ctl.h>
71 #include <cam/ctl/ctl_frontend.h>
72 #include <cam/ctl/ctl_util.h>
73 #include <cam/ctl/ctl_error.h>
74 
75 typedef enum {
76 	CTLFE_CCB_DEFAULT	= 0x00
77 } ctlfe_ccb_types;
78 
79 struct ctlfe_softc {
80 	struct ctl_frontend fe;
81 	path_id_t path_id;
82 	struct cam_sim *sim;
83 	char port_name[DEV_IDLEN];
84 	struct mtx lun_softc_mtx;
85 	STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
86 	STAILQ_ENTRY(ctlfe_softc) links;
87 };
88 
89 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
90 struct mtx ctlfe_list_mtx;
91 static char ctlfe_mtx_desc[] = "ctlfelist";
92 static int ctlfe_dma_enabled = 1;
93 #ifdef CTLFE_INIT_ENABLE
94 static int ctlfe_max_targets = 1;
95 static int ctlfe_num_targets = 0;
96 #endif
97 
98 typedef enum {
99 	CTLFE_LUN_NONE		= 0x00,
100 	CTLFE_LUN_WILDCARD	= 0x01
101 } ctlfe_lun_flags;
102 
103 struct ctlfe_lun_softc {
104 	struct ctlfe_softc *parent_softc;
105 	struct cam_periph *periph;
106 	ctlfe_lun_flags flags;
107 	struct callout dma_callout;
108 	uint64_t ccbs_alloced;
109 	uint64_t ccbs_freed;
110 	uint64_t ctios_sent;
111 	uint64_t ctios_returned;
112 	uint64_t atios_sent;
113 	uint64_t atios_returned;
114 	uint64_t inots_sent;
115 	uint64_t inots_returned;
116 	/* bus_dma_tag_t dma_tag; */
117 	TAILQ_HEAD(, ccb_hdr) work_queue;
118 	STAILQ_ENTRY(ctlfe_lun_softc) links;
119 };
120 
121 typedef enum {
122 	CTLFE_CMD_NONE		= 0x00,
123 	CTLFE_CMD_PIECEWISE	= 0x01
124 } ctlfe_cmd_flags;
125 
126 /*
127  * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h.
128  * Currently that is 600 bytes.
129  */
130 struct ctlfe_lun_cmd_info {
131 	int cur_transfer_index;
132 	ctlfe_cmd_flags flags;
133 	/*
134 	 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
135 	 * bytes on amd64.  So with 32 elements, this is 256 bytes on
136 	 * i386 and 512 bytes on amd64.
137 	 */
138 	bus_dma_segment_t cam_sglist[32];
139 };
140 
141 /*
142  * When we register the adapter/bus, request that this many ctl_ios be
143  * allocated.  This should be the maximum supported by the adapter, but we
144  * currently don't have a way to get that back from the path inquiry.
145  * XXX KDM add that to the path inquiry.
146  */
147 #define	CTLFE_REQ_CTL_IO	4096
148 /*
149  * Number of Accept Target I/O CCBs to allocate and queue down to the
150  * adapter per LUN.
151  * XXX KDM should this be controlled by CTL?
152  */
153 #define	CTLFE_ATIO_PER_LUN	1024
154 /*
155  * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to
156  * allocate and queue down to the adapter per LUN.
157  * XXX KDM should this be controlled by CTL?
158  */
159 #define	CTLFE_IN_PER_LUN	1024
160 
161 /*
162  * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending
163  * status to the initiator.  The SIM is expected to have its own timeouts,
164  * so we're not putting this timeout around the CCB execution time.  The
165  * SIM should timeout and let us know if it has an issue.
166  */
167 #define	CTLFE_DMA_TIMEOUT	60
168 
169 /*
170  * Turn this on to enable extra debugging prints.
171  */
172 #if 0
173 #define	CTLFE_DEBUG
174 #endif
175 
176 /*
177  * Use randomly assigned WWNN/WWPN values.  This is to work around an issue
178  * in the FreeBSD initiator that makes it unable to rescan the target if
179  * the target gets rebooted and the WWNN/WWPN stay the same.
180  */
181 #if 0
182 #define	RANDOM_WWNN
183 #endif
184 
185 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW,
186 	   &ctlfe_dma_enabled, 0, "DMA enabled");
187 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
188 
189 #define	ccb_type	ppriv_field0
190 /* This is only used in the ATIO */
191 #define	io_ptr		ppriv_ptr1
192 
193 /* This is only used in the CTIO */
194 #define	ccb_atio	ppriv_ptr1
195 
196 int			ctlfeinitialize(void);
197 void			ctlfeshutdown(void);
198 static periph_init_t	ctlfeinit;
199 static void		ctlfeasync(void *callback_arg, uint32_t code,
200 				   struct cam_path *path, void *arg);
201 static periph_ctor_t	ctlferegister;
202 static periph_oninv_t	ctlfeoninvalidate;
203 static periph_dtor_t	ctlfecleanup;
204 static periph_start_t	ctlfestart;
205 static void		ctlfedone(struct cam_periph *periph,
206 				  union ccb *done_ccb);
207 
208 static void 		ctlfe_onoffline(void *arg, int online);
209 static void 		ctlfe_online(void *arg);
210 static void 		ctlfe_offline(void *arg);
211 static int 		ctlfe_targ_enable(void *arg, struct ctl_id targ_id);
212 static int 		ctlfe_targ_disable(void *arg, struct ctl_id targ_id);
213 static int 		ctlfe_lun_enable(void *arg, struct ctl_id targ_id,
214 					 int lun_id);
215 static int 		ctlfe_lun_disable(void *arg, struct ctl_id targ_id,
216 					  int lun_id);
217 static void		ctlfe_dump_sim(struct cam_sim *sim);
218 static void		ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
219 static void		ctlfe_dma_timeout(void *arg);
220 static void 		ctlfe_datamove_done(union ctl_io *io);
221 static void 		ctlfe_dump(void);
222 
223 static struct periph_driver ctlfe_driver =
224 {
225 	ctlfeinit, "ctl",
226 	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0
227 };
228 
229 static int ctlfe_module_event_handler(module_t, int /*modeventtype_t*/, void *);
230 
231 /*
232  * We're not using PERIPHDRIVER_DECLARE(), because it runs at SI_SUB_DRIVERS,
233  * and that happens before CTL gets initialised.
234  */
235 static moduledata_t ctlfe_moduledata = {
236 	"ctlfe",
237 	ctlfe_module_event_handler,
238 	NULL
239 };
240 
241 DECLARE_MODULE(ctlfe, ctlfe_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
242 MODULE_VERSION(ctlfe, 1);
243 MODULE_DEPEND(ctlfe, ctl, 1, 1, 1);
244 MODULE_DEPEND(ctlfe, cam, 1, 1, 1);
245 
246 extern struct ctl_softc *control_softc;
247 
248 void
249 ctlfeshutdown(void)
250 {
251 	return;
252 }
253 
254 void
255 ctlfeinit(void)
256 {
257 	cam_status status;
258 
259 	STAILQ_INIT(&ctlfe_softc_list);
260 
261 	mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
262 
263 	KASSERT(control_softc != NULL, ("CTL is not initialized!"));
264 
265 	status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
266 				    AC_CONTRACT, ctlfeasync, NULL, NULL);
267 
268 	if (status != CAM_REQ_CMP) {
269 		printf("ctl: Failed to attach async callback due to CAM "
270 		       "status 0x%x!\n", status);
271 	}
272 }
273 
274 static int
275 ctlfe_module_event_handler(module_t mod, int what, void *arg)
276 {
277 
278 	switch (what) {
279 	case MOD_LOAD:
280 		periphdriver_register(&ctlfe_driver);
281 		return (0);
282 	case MOD_UNLOAD:
283 		return (EBUSY);
284 	default:
285 		return (EOPNOTSUPP);
286 	}
287 }
288 
289 static void
290 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
291 {
292 
293 #ifdef CTLFEDEBUG
294 	printf("%s: entered\n", __func__);
295 #endif
296 
297 	/*
298 	 * When a new path gets registered, and it is capable of target
299 	 * mode, go ahead and attach.  Later on, we may need to be more
300 	 * selective, but for now this will be sufficient.
301  	 */
302 	switch (code) {
303 	case AC_PATH_REGISTERED: {
304 		struct ctl_frontend *fe;
305 		struct ctlfe_softc *bus_softc;
306 		struct ccb_pathinq *cpi;
307 		int retval;
308 
309 		cpi = (struct ccb_pathinq *)arg;
310 
311 		/* Don't attach if it doesn't support target mode */
312 		if ((cpi->target_sprt & PIT_PROCESSOR) == 0) {
313 #ifdef CTLFEDEBUG
314 			printf("%s: SIM %s%d doesn't support target mode\n",
315 			       __func__, cpi->dev_name, cpi->unit_number);
316 #endif
317 			break;
318 		}
319 
320 #ifdef CTLFE_INIT_ENABLE
321 		if (ctlfe_num_targets >= ctlfe_max_targets) {
322 			union ccb *ccb;
323 
324 			ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP,
325 						  M_NOWAIT | M_ZERO);
326 			if (ccb == NULL) {
327 				printf("%s: unable to malloc CCB!\n", __func__);
328 				return;
329 			}
330 			xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
331 
332 			ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
333 			ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
334 			ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR;
335 
336 			xpt_action(ccb);
337 
338 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
339 			     CAM_REQ_CMP) {
340 				printf("%s: SIM %s%d (path id %d) initiator "
341 				       "enable failed with status %#x\n",
342 				       __func__, cpi->dev_name,
343 				       cpi->unit_number, cpi->ccb_h.path_id,
344 				       ccb->ccb_h.status);
345 			} else {
346 				printf("%s: SIM %s%d (path id %d) initiator "
347 				       "enable succeeded\n",
348 				       __func__, cpi->dev_name,
349 				       cpi->unit_number, cpi->ccb_h.path_id);
350 			}
351 
352 			free(ccb, M_TEMP);
353 
354 			break;
355 		} else {
356 			ctlfe_num_targets++;
357 		}
358 
359 		printf("%s: ctlfe_num_targets = %d\n", __func__,
360 		       ctlfe_num_targets);
361 #endif /* CTLFE_INIT_ENABLE */
362 
363 		/*
364 		 * We're in an interrupt context here, so we have to
365 		 * use M_NOWAIT.  Of course this means trouble if we
366 		 * can't allocate memory.
367 		 */
368 		bus_softc = malloc(sizeof(*bus_softc), M_CTLFE,
369 				   M_NOWAIT | M_ZERO);
370 		if (bus_softc == NULL) {
371 			printf("%s: unable to malloc %zd bytes for softc\n",
372 			       __func__, sizeof(*bus_softc));
373 			return;
374 		}
375 
376 		bus_softc->path_id = cpi->ccb_h.path_id;
377 		bus_softc->sim = xpt_path_sim(path);
378 		mtx_init(&bus_softc->lun_softc_mtx, "LUN softc mtx", NULL,
379 		    MTX_DEF);
380 		STAILQ_INIT(&bus_softc->lun_softc_list);
381 
382 		fe = &bus_softc->fe;
383 
384 		/*
385 		 * XXX KDM should we be more accurate here ?
386 		 */
387 		if (cpi->transport == XPORT_FC)
388 			fe->port_type = CTL_PORT_FC;
389 		else
390 			fe->port_type = CTL_PORT_SCSI;
391 
392 		/* XXX KDM what should the real number be here? */
393 		fe->num_requested_ctl_io = 4096;
394 		snprintf(bus_softc->port_name, sizeof(bus_softc->port_name),
395 			 "%s%d", cpi->dev_name, cpi->unit_number);
396 		/*
397 		 * XXX KDM it would be nice to allocate storage in the
398 		 * frontend structure itself.
399 	 	 */
400 		fe->port_name = bus_softc->port_name;
401 		fe->physical_port = cpi->unit_number;
402 		fe->virtual_port = cpi->bus_id;
403 		fe->port_online = ctlfe_online;
404 		fe->port_offline = ctlfe_offline;
405 		fe->onoff_arg = bus_softc;
406 		fe->targ_enable = ctlfe_targ_enable;
407 		fe->targ_disable = ctlfe_targ_disable;
408 		fe->lun_enable = ctlfe_lun_enable;
409 		fe->lun_disable = ctlfe_lun_disable;
410 		fe->targ_lun_arg = bus_softc;
411 		fe->fe_datamove = ctlfe_datamove_done;
412 		fe->fe_done = ctlfe_datamove_done;
413 		fe->fe_dump = ctlfe_dump;
414 		/*
415 		 * XXX KDM the path inquiry doesn't give us the maximum
416 		 * number of targets supported.
417 		 */
418 		fe->max_targets = cpi->max_target;
419 		fe->max_target_id = cpi->max_target;
420 
421 		/*
422 		 * XXX KDM need to figure out whether we're the master or
423 		 * slave.
424 		 */
425 #ifdef CTLFEDEBUG
426 		printf("%s: calling ctl_frontend_register() for %s%d\n",
427 		       __func__, cpi->dev_name, cpi->unit_number);
428 #endif
429 		retval = ctl_frontend_register(fe, /*master_SC*/ 1);
430 		if (retval != 0) {
431 			printf("%s: ctl_frontend_register() failed with "
432 			       "error %d!\n", __func__, retval);
433 			mtx_destroy(&bus_softc->lun_softc_mtx);
434 			free(bus_softc, M_CTLFE);
435 			break;
436 		} else {
437 			mtx_lock(&ctlfe_list_mtx);
438 			STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links);
439 			mtx_unlock(&ctlfe_list_mtx);
440 		}
441 
442 		break;
443 	}
444 	case AC_PATH_DEREGISTERED: {
445 		struct ctlfe_softc *softc = NULL;
446 
447 		mtx_lock(&ctlfe_list_mtx);
448 		STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
449 			if (softc->path_id == xpt_path_path_id(path)) {
450 				STAILQ_REMOVE(&ctlfe_softc_list, softc,
451 						ctlfe_softc, links);
452 				break;
453 			}
454 		}
455 		mtx_unlock(&ctlfe_list_mtx);
456 
457 		if (softc != NULL) {
458 			/*
459 			 * XXX KDM are we certain at this point that there
460 			 * are no outstanding commands for this frontend?
461 			 */
462 			ctl_frontend_deregister(&softc->fe);
463 			mtx_destroy(&softc->lun_softc_mtx);
464 			free(softc, M_CTLFE);
465 		}
466 		break;
467 	}
468 	case AC_CONTRACT: {
469 		struct ac_contract *ac;
470 
471 		ac = (struct ac_contract *)arg;
472 
473 		switch (ac->contract_number) {
474 		case AC_CONTRACT_DEV_CHG: {
475 			struct ac_device_changed *dev_chg;
476 			struct ctlfe_softc *softc;
477 			int retval, found;
478 
479 			dev_chg = (struct ac_device_changed *)ac->contract_data;
480 
481 			printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n",
482 			       __func__, dev_chg->wwpn, dev_chg->port,
483 			       xpt_path_path_id(path), dev_chg->target,
484 			       (dev_chg->arrived == 0) ?  "left" : "arrived");
485 
486 			found = 0;
487 
488 			mtx_lock(&ctlfe_list_mtx);
489 			STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
490 				if (softc->path_id == xpt_path_path_id(path)) {
491 					found = 1;
492 					break;
493 				}
494 			}
495 			mtx_unlock(&ctlfe_list_mtx);
496 
497 			if (found == 0) {
498 				printf("%s: CTL port for CAM path %u not "
499 				       "found!\n", __func__,
500 				       xpt_path_path_id(path));
501 				break;
502 			}
503 			if (dev_chg->arrived != 0) {
504 				retval = ctl_add_initiator(dev_chg->wwpn,
505 					softc->fe.targ_port, dev_chg->target);
506 			} else {
507 				retval = ctl_remove_initiator(
508 					softc->fe.targ_port, dev_chg->target);
509 			}
510 
511 			if (retval != 0) {
512 				printf("%s: could not %s port %d iid %u "
513 				       "WWPN %#jx!\n", __func__,
514 				       (dev_chg->arrived != 0) ? "add" :
515 				       "remove", softc->fe.targ_port,
516 				       dev_chg->target,
517 				       (uintmax_t)dev_chg->wwpn);
518 			}
519 			break;
520 		}
521 		default:
522 			printf("%s: unsupported contract number %ju\n",
523 			       __func__, (uintmax_t)ac->contract_number);
524 			break;
525 		}
526 		break;
527 	}
528 	default:
529 		break;
530 	}
531 }
532 
533 static cam_status
534 ctlferegister(struct cam_periph *periph, void *arg)
535 {
536 	struct ctlfe_softc *bus_softc;
537 	struct ctlfe_lun_softc *softc;
538 	union ccb en_lun_ccb;
539 	cam_status status;
540 	int i;
541 
542 	softc = (struct ctlfe_lun_softc *)arg;
543 	bus_softc = softc->parent_softc;
544 
545 	TAILQ_INIT(&softc->work_queue);
546 	softc->periph = periph;
547 
548 	callout_init_mtx(&softc->dma_callout, xpt_path_mtx(periph->path),
549 	    /*flags*/ 0);
550 	periph->softc = softc;
551 
552 	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
553 	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
554 	en_lun_ccb.cel.grp6_len = 0;
555 	en_lun_ccb.cel.grp7_len = 0;
556 	en_lun_ccb.cel.enable = 1;
557 	xpt_action(&en_lun_ccb);
558 	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
559 	if (status != CAM_REQ_CMP) {
560 		xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n",
561 			  __func__, en_lun_ccb.ccb_h.status);
562 		return (status);
563 	}
564 
565 	status = CAM_REQ_CMP;
566 
567 	for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
568 		union ccb *new_ccb;
569 
570 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
571 					      M_ZERO|M_NOWAIT);
572 		if (new_ccb == NULL) {
573 			status = CAM_RESRC_UNAVAIL;
574 			break;
575 		}
576 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
577 		new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
578 		new_ccb->ccb_h.cbfcnp = ctlfedone;
579 		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
580 		xpt_action(new_ccb);
581 		softc->atios_sent++;
582 		status = new_ccb->ccb_h.status;
583 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
584 			free(new_ccb, M_CTLFE);
585 			break;
586 		}
587 	}
588 
589 	status = cam_periph_acquire(periph);
590 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
591 		xpt_print(periph->path, "%s: could not acquire reference "
592 			  "count, status = %#x\n", __func__, status);
593 		return (status);
594 	}
595 
596 	if (i == 0) {
597 		xpt_print(periph->path, "%s: could not allocate ATIO CCBs, "
598 			  "status 0x%x\n", __func__, status);
599 		return (CAM_REQ_CMP_ERR);
600 	}
601 
602 	for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
603 		union ccb *new_ccb;
604 
605 		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
606 					      M_ZERO|M_NOWAIT);
607 		if (new_ccb == NULL) {
608 			status = CAM_RESRC_UNAVAIL;
609 			break;
610 		}
611 
612 		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
613 		new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
614 		new_ccb->ccb_h.cbfcnp = ctlfedone;
615 		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
616 		xpt_action(new_ccb);
617 		softc->inots_sent++;
618 		status = new_ccb->ccb_h.status;
619 		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
620 			/*
621 			 * Note that we don't free the CCB here.  If the
622 			 * status is not CAM_REQ_INPROG, then we're
623 			 * probably talking to a SIM that says it is
624 			 * target-capable but doesn't support the
625 			 * XPT_IMMEDIATE_NOTIFY CCB.  i.e. it supports the
626 			 * older API.  In that case, it'll call xpt_done()
627 			 * on the CCB, and we need to free it in our done
628 			 * routine as a result.
629 			 */
630 			break;
631 		}
632 	}
633 	if ((i == 0)
634 	 || (status != CAM_REQ_INPROG)) {
635 		xpt_print(periph->path, "%s: could not allocate immediate "
636 			  "notify CCBs, status 0x%x\n", __func__, status);
637 		return (CAM_REQ_CMP_ERR);
638 	}
639 	return (CAM_REQ_CMP);
640 }
641 
642 static void
643 ctlfeoninvalidate(struct cam_periph *periph)
644 {
645 	union ccb en_lun_ccb;
646 	cam_status status;
647 	struct ctlfe_softc *bus_softc;
648 	struct ctlfe_lun_softc *softc;
649 
650 	softc = (struct ctlfe_lun_softc *)periph->softc;
651 
652 	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
653 	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
654 	en_lun_ccb.cel.grp6_len = 0;
655 	en_lun_ccb.cel.grp7_len = 0;
656 	en_lun_ccb.cel.enable = 0;
657 	xpt_action(&en_lun_ccb);
658 	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
659 	if (status != CAM_REQ_CMP) {
660 		xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
661 			  __func__, en_lun_ccb.ccb_h.status);
662 		/*
663 		 * XXX KDM what do we do now?
664 		 */
665 	}
666 	xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju "
667 		  "INOTs outstanding, %d refs\n", softc->atios_sent -
668 		  softc->atios_returned, softc->inots_sent -
669 		  softc->inots_returned, periph->refcount);
670 
671 	bus_softc = softc->parent_softc;
672 	mtx_lock(&bus_softc->lun_softc_mtx);
673 	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
674 	mtx_unlock(&bus_softc->lun_softc_mtx);
675 }
676 
677 static void
678 ctlfecleanup(struct cam_periph *periph)
679 {
680 	struct ctlfe_lun_softc *softc;
681 
682 	xpt_print(periph->path, "%s: Called\n", __func__);
683 
684 	softc = (struct ctlfe_lun_softc *)periph->softc;
685 
686 	/*
687 	 * XXX KDM is there anything else that needs to be done here?
688 	 */
689 
690 	callout_stop(&softc->dma_callout);
691 
692 	free(softc, M_CTLFE);
693 }
694 
695 static void
696 ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
697 {
698 	struct ctlfe_lun_softc *softc;
699 	struct ccb_hdr *ccb_h;
700 
701 	softc = (struct ctlfe_lun_softc *)periph->softc;
702 
703 	softc->ccbs_alloced++;
704 
705 	start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT;
706 
707 	ccb_h = TAILQ_FIRST(&softc->work_queue);
708 	if (ccb_h == NULL) {
709 		softc->ccbs_freed++;
710 		xpt_release_ccb(start_ccb);
711 	} else {
712 		struct ccb_accept_tio *atio;
713 		struct ccb_scsiio *csio;
714 		uint8_t *data_ptr;
715 		uint32_t dxfer_len;
716 		ccb_flags flags;
717 		union ctl_io *io;
718 		uint8_t scsi_status;
719 
720 		/* Take the ATIO off the work queue */
721 		TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe);
722 		atio = (struct ccb_accept_tio *)ccb_h;
723 		io = (union ctl_io *)ccb_h->io_ptr;
724 		csio = &start_ccb->csio;
725 
726 		flags = atio->ccb_h.flags &
727 			(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
728 
729 		if ((io == NULL)
730 		 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
731 			/*
732 			 * We're done, send status back.
733 			 */
734 			flags |= CAM_SEND_STATUS;
735 			if (io == NULL) {
736 				scsi_status = SCSI_STATUS_BUSY;
737 				csio->sense_len = 0;
738 			} else if ((io->io_hdr.status & CTL_STATUS_MASK) ==
739 				   CTL_CMD_ABORTED) {
740 				io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
741 
742 				/*
743 				 * If this command was aborted, we don't
744 				 * need to send status back to the SIM.
745 				 * Just free the CTIO and ctl_io, and
746 				 * recycle the ATIO back to the SIM.
747 				 */
748 				xpt_print(periph->path, "%s: aborted "
749 					  "command 0x%04x discarded\n",
750 					  __func__, io->scsiio.tag_num);
751 				ctl_free_io(io);
752 				/*
753 				 * For a wildcard attachment, commands can
754 				 * come in with a specific target/lun.  Reset
755 				 * the target and LUN fields back to the
756 				 * wildcard values before we send them back
757 				 * down to the SIM.  The SIM has a wildcard
758 				 * LUN enabled, not whatever target/lun
759 				 * these happened to be.
760 				 */
761 				if (softc->flags & CTLFE_LUN_WILDCARD) {
762 					atio->ccb_h.target_id =
763 						CAM_TARGET_WILDCARD;
764 					atio->ccb_h.target_lun =
765 						CAM_LUN_WILDCARD;
766 				}
767 
768 				if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
769 					cam_release_devq(periph->path,
770 							 /*relsim_flags*/0,
771 							 /*reduction*/0,
772  							 /*timeout*/0,
773 							 /*getcount_only*/0);
774 					atio->ccb_h.status &= ~CAM_DEV_QFRZN;
775 				}
776 
777 				ccb_h = TAILQ_FIRST(&softc->work_queue);
778 
779 				if (atio->ccb_h.func_code !=
780 				    XPT_ACCEPT_TARGET_IO) {
781 					xpt_print(periph->path, "%s: func_code "
782 						  "is %#x\n", __func__,
783 						  atio->ccb_h.func_code);
784 				}
785 				start_ccb->ccb_h.func_code = XPT_ABORT;
786 				start_ccb->cab.abort_ccb = (union ccb *)atio;
787 
788 				/* Tell the SIM that we've aborted this ATIO */
789 				xpt_action(start_ccb);
790 				softc->ccbs_freed++;
791 				xpt_release_ccb(start_ccb);
792 
793 				/*
794 				 * Send the ATIO back down to the SIM.
795 				 */
796 				xpt_action((union ccb *)atio);
797 				softc->atios_sent++;
798 
799 				/*
800 				 * If we still have work to do, ask for
801 				 * another CCB.  Otherwise, deactivate our
802 				 * callout.
803 				 */
804 				if (ccb_h != NULL)
805 					xpt_schedule(periph, /*priority*/ 1);
806 				else
807 					callout_stop(&softc->dma_callout);
808 
809 				return;
810 			} else {
811 				io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
812 				scsi_status = io->scsiio.scsi_status;
813 				csio->sense_len = io->scsiio.sense_len;
814 			}
815 			data_ptr = NULL;
816 			dxfer_len = 0;
817 			if (io == NULL) {
818 				printf("%s: tag %04x io is NULL\n", __func__,
819 				       atio->tag_id);
820 			} else {
821 #ifdef CTLFEDEBUG
822 				printf("%s: tag %04x status %x\n", __func__,
823 				       atio->tag_id, io->io_hdr.status);
824 #endif
825 			}
826 			csio->sglist_cnt = 0;
827 			if (csio->sense_len != 0) {
828 				csio->sense_data = io->scsiio.sense_data;
829 				flags |= CAM_SEND_SENSE;
830 			} else if (scsi_status == SCSI_STATUS_CHECK_COND) {
831 				xpt_print(periph->path, "%s: check condition "
832 					  "with no sense\n", __func__);
833 			}
834 		} else {
835 			struct ctlfe_lun_cmd_info *cmd_info;
836 
837 			/*
838 			 * Datamove call, we need to setup the S/G list.
839 			 */
840 
841 			cmd_info = (struct ctlfe_lun_cmd_info *)
842 				io->io_hdr.port_priv;
843 
844 			KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE,
845 				("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < "
846 				"CTL_PORT_PRIV_SIZE %d", __func__,
847 				sizeof(*cmd_info), CTL_PORT_PRIV_SIZE));
848 			io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED;
849 
850 			/*
851 			 * Need to zero this, in case it has been used for
852 			 * a previous datamove for this particular I/O.
853 			 */
854 			bzero(cmd_info, sizeof(*cmd_info));
855 			scsi_status = 0;
856 
857 			/*
858 			 * Set the direction, relative to the initiator.
859 			 */
860 			flags &= ~CAM_DIR_MASK;
861 			if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
862 			     CTL_FLAG_DATA_IN)
863 				flags |= CAM_DIR_IN;
864 			else
865 				flags |= CAM_DIR_OUT;
866 
867 			csio->cdb_len = atio->cdb_len;
868 
869 			flags &= ~CAM_DATA_MASK;
870 			if (io->scsiio.kern_sg_entries == 0) {
871 				/* No S/G list */
872 				data_ptr = io->scsiio.kern_data_ptr;
873 				dxfer_len = io->scsiio.kern_data_len;
874 				csio->sglist_cnt = 0;
875 
876 				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
877 					flags |= CAM_DATA_PADDR;
878 				else
879 					flags |= CAM_DATA_VADDR;
880 			} else if (io->scsiio.kern_sg_entries <=
881 				   (sizeof(cmd_info->cam_sglist)/
882 				   sizeof(cmd_info->cam_sglist[0]))) {
883 				/*
884 				 * S/G list with physical or virtual pointers.
885 				 * Just populate the CAM S/G list with the
886 				 * pointers.
887 				 */
888 				int i;
889 				struct ctl_sg_entry *ctl_sglist;
890 				bus_dma_segment_t *cam_sglist;
891 
892 				ctl_sglist = (struct ctl_sg_entry *)
893 					io->scsiio.kern_data_ptr;
894 				cam_sglist = cmd_info->cam_sglist;
895 
896 				for (i = 0; i < io->scsiio.kern_sg_entries;i++){
897 					cam_sglist[i].ds_addr =
898 						(bus_addr_t)ctl_sglist[i].addr;
899 					cam_sglist[i].ds_len =
900 						ctl_sglist[i].len;
901 				}
902 				csio->sglist_cnt = io->scsiio.kern_sg_entries;
903 				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
904 					flags |= CAM_DATA_SG_PADDR;
905 				else
906 					flags |= CAM_DATA_SG;
907 				data_ptr = (uint8_t *)cam_sglist;
908 				dxfer_len = io->scsiio.kern_data_len;
909 			} else {
910 				/* S/G list with virtual pointers */
911 				struct ctl_sg_entry *sglist;
912 				int *ti;
913 
914 				/*
915 				 * If we have more S/G list pointers than
916 				 * will fit in the available storage in the
917 				 * cmd_info structure inside the ctl_io header,
918 				 * then we need to send down the pointers
919 				 * one element at a time.
920 				 */
921 
922 				sglist = (struct ctl_sg_entry *)
923 					io->scsiio.kern_data_ptr;
924 				ti = &cmd_info->cur_transfer_index;
925 				data_ptr = sglist[*ti].addr;
926 				dxfer_len = sglist[*ti].len;
927 				csio->sglist_cnt = 0;
928 				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
929 					flags |= CAM_DATA_PADDR;
930 				else
931 					flags |= CAM_DATA_VADDR;
932 				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
933 				(*ti)++;
934 			}
935 
936 			io->scsiio.ext_data_filled += dxfer_len;
937 
938 			if (io->scsiio.ext_data_filled >
939 			    io->scsiio.kern_total_len) {
940 				xpt_print(periph->path, "%s: tag 0x%04x "
941 					  "fill len %u > total %u\n",
942 					  __func__, io->scsiio.tag_num,
943 					  io->scsiio.ext_data_filled,
944 					  io->scsiio.kern_total_len);
945 			}
946 		}
947 
948 #ifdef CTLFEDEBUG
949 		printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
950 		       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
951 		       atio->tag_id, flags, data_ptr, dxfer_len);
952 #endif
953 
954 		/*
955 		 * Valid combinations:
956 		 *  - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0,
957 		 *    sglist_cnt = 0
958 		 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0,
959 		 *    sglist_cnt = 0
960 		 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0,
961 		 *    sglist_cnt != 0
962 		 */
963 #ifdef CTLFEDEBUG
964 		if (((flags & CAM_SEND_STATUS)
965 		  && (((flags & CAM_DATA_SG) != 0)
966 		   || (dxfer_len != 0)
967 		   || (csio->sglist_cnt != 0)))
968 		 || (((flags & CAM_SEND_STATUS) == 0)
969 		  && (dxfer_len == 0))
970 		 || ((flags & CAM_DATA_SG)
971 		  && (csio->sglist_cnt == 0))
972 		 || (((flags & CAM_DATA_SG) == 0)
973 		  && (csio->sglist_cnt != 0))) {
974 			printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
975 			       "%d sg %u\n", __func__, atio->tag_id,
976 			       atio->cdb_io.cdb_bytes[0], flags, dxfer_len,
977 			       csio->sglist_cnt);
978 			if (io != NULL) {
979 				printf("%s: tag %04x io status %#x\n", __func__,
980 				       atio->tag_id, io->io_hdr.status);
981 			} else {
982 				printf("%s: tag %04x no associated io\n",
983 				       __func__, atio->tag_id);
984 			}
985 		}
986 #endif
987 		cam_fill_ctio(csio,
988 			      /*retries*/ 2,
989 			      ctlfedone,
990 			      flags,
991 			      (flags & CAM_TAG_ACTION_VALID) ?
992 			       MSG_SIMPLE_Q_TAG : 0,
993 			      atio->tag_id,
994 			      atio->init_id,
995 			      scsi_status,
996 			      /*data_ptr*/ data_ptr,
997 			      /*dxfer_len*/ dxfer_len,
998 			      /*timeout*/ 5 * 1000);
999 		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
1000 		start_ccb->ccb_h.ccb_atio = atio;
1001 		if (((flags & CAM_SEND_STATUS) == 0)
1002 		 && (io != NULL))
1003 			io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1004 
1005 		softc->ctios_sent++;
1006 
1007 		cam_periph_unlock(periph);
1008 		xpt_action(start_ccb);
1009 		cam_periph_lock(periph);
1010 
1011 		if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1012 			cam_release_devq(periph->path,
1013 					 /*relsim_flags*/0,
1014 					 /*reduction*/0,
1015  					 /*timeout*/0,
1016 					 /*getcount_only*/0);
1017 			atio->ccb_h.status &= ~CAM_DEV_QFRZN;
1018 		}
1019 
1020 		ccb_h = TAILQ_FIRST(&softc->work_queue);
1021 	}
1022 	/*
1023 	 * If we still have work to do, ask for another CCB.  Otherwise,
1024 	 * deactivate our callout.
1025 	 */
1026 	if (ccb_h != NULL)
1027 		xpt_schedule(periph, /*priority*/ 1);
1028 	else
1029 		callout_stop(&softc->dma_callout);
1030 }
1031 
1032 static void
1033 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
1034 {
1035 	struct ctlfe_lun_softc *softc;
1036 
1037 	softc = (struct ctlfe_lun_softc *)periph->softc;
1038 
1039 	switch (ccb->ccb_h.func_code) {
1040 	case XPT_ACCEPT_TARGET_IO:
1041 		softc->atios_returned++;
1042 		break;
1043 	case XPT_IMMEDIATE_NOTIFY:
1044 	case XPT_NOTIFY_ACKNOWLEDGE:
1045 		softc->inots_returned++;
1046 		break;
1047 	default:
1048 		break;
1049 	}
1050 
1051 	free(ccb, M_CTLFE);
1052 
1053 	KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: "
1054 		"atios_returned %ju > atios_sent %ju", __func__,
1055 		softc->atios_returned, softc->atios_sent));
1056 	KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: "
1057 		"inots_returned %ju > inots_sent %ju", __func__,
1058 		softc->inots_returned, softc->inots_sent));
1059 
1060 	/*
1061 	 * If we have received all of our CCBs, we can release our
1062 	 * reference on the peripheral driver.  It will probably go away
1063 	 * now.
1064 	 */
1065 	if ((softc->atios_returned == softc->atios_sent)
1066 	 && (softc->inots_returned == softc->inots_sent)) {
1067 		cam_periph_release_locked(periph);
1068 	}
1069 }
1070 
1071 static int
1072 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
1073 {
1074 	uint64_t lba;
1075 	uint32_t num_blocks, nbc;
1076 	uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)?
1077 	    atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes;
1078 
1079 	nbc = offset >> 9;	/* ASSUMING 512 BYTE BLOCKS */
1080 
1081 	switch (cmdbyt[0]) {
1082 	case READ_6:
1083 	case WRITE_6:
1084 	{
1085 		struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt;
1086 		lba = scsi_3btoul(cdb->addr);
1087 		lba &= 0x1fffff;
1088 		num_blocks = cdb->length;
1089 		if (num_blocks == 0)
1090 			num_blocks = 256;
1091 		lba += nbc;
1092 		num_blocks -= nbc;
1093 		scsi_ulto3b(lba, cdb->addr);
1094 		cdb->length = num_blocks;
1095 		break;
1096 	}
1097 	case READ_10:
1098 	case WRITE_10:
1099 	{
1100 		struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt;
1101 		lba = scsi_4btoul(cdb->addr);
1102 		num_blocks = scsi_2btoul(cdb->length);
1103 		lba += nbc;
1104 		num_blocks -= nbc;
1105 		scsi_ulto4b(lba, cdb->addr);
1106 		scsi_ulto2b(num_blocks, cdb->length);
1107 		break;
1108 	}
1109 	case READ_12:
1110 	case WRITE_12:
1111 	{
1112 		struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt;
1113 		lba = scsi_4btoul(cdb->addr);
1114 		num_blocks = scsi_4btoul(cdb->length);
1115 		lba += nbc;
1116 		num_blocks -= nbc;
1117 		scsi_ulto4b(lba, cdb->addr);
1118 		scsi_ulto4b(num_blocks, cdb->length);
1119 		break;
1120 	}
1121 	case READ_16:
1122 	case WRITE_16:
1123 	{
1124 		struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt;
1125 		lba = scsi_8btou64(cdb->addr);
1126 		num_blocks = scsi_4btoul(cdb->length);
1127 		lba += nbc;
1128 		num_blocks -= nbc;
1129 		scsi_u64to8b(lba, cdb->addr);
1130 		scsi_ulto4b(num_blocks, cdb->length);
1131 		break;
1132 	}
1133 	default:
1134 		return -1;
1135 	}
1136 	return (0);
1137 }
1138 
1139 static void
1140 ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
1141 {
1142 	struct ctlfe_lun_softc *softc;
1143 	struct ctlfe_softc *bus_softc;
1144 	struct ccb_accept_tio *atio = NULL;
1145 	union ctl_io *io = NULL;
1146 	struct mtx *mtx;
1147 
1148 	KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
1149 	    ("CCB in ctlfedone() without CAM_UNLOCKED flag"));
1150 #ifdef CTLFE_DEBUG
1151 	printf("%s: entered, func_code = %#x, type = %#lx\n", __func__,
1152 	       done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type);
1153 #endif
1154 
1155 	softc = (struct ctlfe_lun_softc *)periph->softc;
1156 	bus_softc = softc->parent_softc;
1157 	mtx = cam_periph_mtx(periph);
1158 	mtx_lock(mtx);
1159 
1160 	/*
1161 	 * If the peripheral is invalid, ATIOs and immediate notify CCBs
1162 	 * need to be freed.  Most of the ATIOs and INOTs that come back
1163 	 * will be CCBs that are being returned from the SIM as a result of
1164 	 * our disabling the LUN.
1165 	 *
1166 	 * Other CCB types are handled in their respective cases below.
1167 	 */
1168 	if (periph->flags & CAM_PERIPH_INVALID) {
1169 		switch (done_ccb->ccb_h.func_code) {
1170 		case XPT_ACCEPT_TARGET_IO:
1171 		case XPT_IMMEDIATE_NOTIFY:
1172 		case XPT_NOTIFY_ACKNOWLEDGE:
1173 			ctlfe_free_ccb(periph, done_ccb);
1174 			goto out;
1175 		default:
1176 			break;
1177 		}
1178 
1179 	}
1180 	switch (done_ccb->ccb_h.func_code) {
1181 	case XPT_ACCEPT_TARGET_IO: {
1182 
1183 		atio = &done_ccb->atio;
1184 
1185 		softc->atios_returned++;
1186 
1187  resubmit:
1188 		/*
1189 		 * Allocate a ctl_io, pass it to CTL, and wait for the
1190 		 * datamove or done.
1191 		 */
1192 		io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
1193 		if (io == NULL) {
1194 			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1195 			atio->ccb_h.flags |= CAM_DIR_NONE;
1196 
1197 			printf("%s: ctl_alloc_io failed!\n", __func__);
1198 
1199 			/*
1200 			 * XXX KDM need to set SCSI_STATUS_BUSY, but there
1201 			 * is no field in the ATIO structure to do that,
1202 			 * and we aren't able to allocate a ctl_io here.
1203 			 * What to do?
1204 			 */
1205 			atio->sense_len = 0;
1206 			done_ccb->ccb_h.io_ptr = NULL;
1207 			TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1208 					  periph_links.tqe);
1209 			xpt_schedule(periph, /*priority*/ 1);
1210 			break;
1211 		}
1212 		mtx_unlock(mtx);
1213 		ctl_zero_io(io);
1214 
1215 		/* Save pointers on both sides */
1216 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb;
1217 		done_ccb->ccb_h.io_ptr = io;
1218 
1219 		/*
1220 		 * Only SCSI I/O comes down this path, resets, etc. come
1221 		 * down the immediate notify path below.
1222 		 */
1223 		io->io_hdr.io_type = CTL_IO_SCSI;
1224 		io->io_hdr.nexus.initid.id = atio->init_id;
1225 		io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
1226 		io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id;
1227 		io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
1228 		io->scsiio.tag_num = atio->tag_id;
1229 		switch (atio->tag_action) {
1230 		case CAM_TAG_ACTION_NONE:
1231 			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1232 			break;
1233 		case MSG_SIMPLE_TASK:
1234 			io->scsiio.tag_type = CTL_TAG_SIMPLE;
1235 			break;
1236 		case MSG_HEAD_OF_QUEUE_TASK:
1237         		io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
1238 			break;
1239 		case MSG_ORDERED_TASK:
1240         		io->scsiio.tag_type = CTL_TAG_ORDERED;
1241 			break;
1242 		case MSG_ACA_TASK:
1243 			io->scsiio.tag_type = CTL_TAG_ACA;
1244 			break;
1245 		default:
1246 			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1247 			printf("%s: unhandled tag type %#x!!\n", __func__,
1248 			       atio->tag_action);
1249 			break;
1250 		}
1251 		if (atio->cdb_len > sizeof(io->scsiio.cdb)) {
1252 			printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
1253 			       __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
1254 		}
1255 		io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
1256 		bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb,
1257 		      io->scsiio.cdb_len);
1258 
1259 #ifdef CTLFEDEBUG
1260 		printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__,
1261 		        (uintmax_t)io->io_hdr.nexus.initid.id,
1262 		        io->io_hdr.nexus.targ_port,
1263 		        (uintmax_t)io->io_hdr.nexus.targ_target.id,
1264 		        io->io_hdr.nexus.targ_lun,
1265 			io->scsiio.tag_num, io->scsiio.cdb[0]);
1266 #endif
1267 
1268 		ctl_queue(io);
1269 		return;
1270 	}
1271 	case XPT_CONT_TARGET_IO: {
1272 		int srr = 0;
1273 		uint32_t srr_off = 0;
1274 
1275 		atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
1276 		io = (union ctl_io *)atio->ccb_h.io_ptr;
1277 
1278 		softc->ctios_returned++;
1279 #ifdef CTLFEDEBUG
1280 		printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
1281 		       __func__, atio->tag_id, done_ccb->ccb_h.flags);
1282 #endif
1283 		/*
1284 		 * Handle SRR case were the data pointer is pushed back hack
1285 		 */
1286 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV
1287 		    && done_ccb->csio.msg_ptr != NULL
1288 		    && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED
1289 		    && done_ccb->csio.msg_ptr[1] == 5
1290        		    && done_ccb->csio.msg_ptr[2] == 0) {
1291 			srr = 1;
1292 			srr_off =
1293 			    (done_ccb->csio.msg_ptr[3] << 24)
1294 			    | (done_ccb->csio.msg_ptr[4] << 16)
1295 			    | (done_ccb->csio.msg_ptr[5] << 8)
1296 			    | (done_ccb->csio.msg_ptr[6]);
1297 		}
1298 
1299 		if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) {
1300 			/*
1301 			 * If status was being sent, the back end data is now
1302 			 * history. Hack it up and resubmit a new command with
1303 			 * the CDB adjusted. If the SIM does the right thing,
1304 			 * all of the resid math should work.
1305 			 */
1306 			softc->ccbs_freed++;
1307 			xpt_release_ccb(done_ccb);
1308 			ctl_free_io(io);
1309 			if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
1310 				done_ccb = (union ccb *)atio;
1311 				goto resubmit;
1312 			}
1313 			/*
1314 			 * Fall through to doom....
1315 			 */
1316 		} else if (srr) {
1317 			/*
1318 			 * If we have an srr and we're still sending data, we
1319 			 * should be able to adjust offsets and cycle again.
1320 			 */
1321 			io->scsiio.kern_rel_offset =
1322 			    io->scsiio.ext_data_filled = srr_off;
1323 			io->scsiio.ext_data_len = io->scsiio.kern_total_len -
1324 			    io->scsiio.kern_rel_offset;
1325 			softc->ccbs_freed++;
1326 			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
1327 			xpt_release_ccb(done_ccb);
1328 			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1329 					  periph_links.tqe);
1330 			xpt_schedule(periph, /*priority*/ 1);
1331 			break;
1332 		}
1333 
1334 		/*
1335 		 * If we were sending status back to the initiator, free up
1336 		 * resources.  If we were doing a datamove, call the
1337 		 * datamove done routine.
1338 		 */
1339 		if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) {
1340 			softc->ccbs_freed++;
1341 			xpt_release_ccb(done_ccb);
1342 			ctl_free_io(io);
1343 			/*
1344 			 * For a wildcard attachment, commands can come in
1345 			 * with a specific target/lun.  Reset the target
1346 			 * and LUN fields back to the wildcard values before
1347 			 * we send them back down to the SIM.  The SIM has
1348 			 * a wildcard LUN enabled, not whatever target/lun
1349 			 * these happened to be.
1350 			 */
1351 			if (softc->flags & CTLFE_LUN_WILDCARD) {
1352 				atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
1353 				atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
1354 			}
1355 			if (periph->flags & CAM_PERIPH_INVALID) {
1356 				ctlfe_free_ccb(periph, (union ccb *)atio);
1357 			} else {
1358 				softc->atios_sent++;
1359 				mtx_unlock(mtx);
1360 				xpt_action((union ccb *)atio);
1361 				return;
1362 			}
1363 		} else {
1364 			struct ctlfe_lun_cmd_info *cmd_info;
1365 			struct ccb_scsiio *csio;
1366 
1367 			csio = &done_ccb->csio;
1368 			cmd_info = (struct ctlfe_lun_cmd_info *)
1369 				io->io_hdr.port_priv;
1370 
1371 			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1372 
1373 			io->scsiio.ext_data_len += csio->dxfer_len;
1374 			if (io->scsiio.ext_data_len >
1375 			    io->scsiio.kern_total_len) {
1376 				xpt_print(periph->path, "%s: tag 0x%04x "
1377 					  "done len %u > total %u sent %u\n",
1378 					  __func__, io->scsiio.tag_num,
1379 					  io->scsiio.ext_data_len,
1380 					  io->scsiio.kern_total_len,
1381 					  io->scsiio.ext_data_filled);
1382 			}
1383 			/*
1384 			 * Translate CAM status to CTL status.  Success
1385 			 * does not change the overall, ctl_io status.  In
1386 			 * that case we just set port_status to 0.  If we
1387 			 * have a failure, though, set a data phase error
1388 			 * for the overall ctl_io.
1389 			 */
1390 			switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
1391 			case CAM_REQ_CMP:
1392 				io->io_hdr.port_status = 0;
1393 				break;
1394 			default:
1395 				/*
1396 				 * XXX KDM we probably need to figure out a
1397 				 * standard set of errors that the SIM
1398 				 * drivers should return in the event of a
1399 				 * data transfer failure.  A data phase
1400 				 * error will at least point the user to a
1401 				 * data transfer error of some sort.
1402 				 * Hopefully the SIM printed out some
1403 				 * additional information to give the user
1404 				 * a clue what happened.
1405 				 */
1406 				io->io_hdr.port_status = 0xbad1;
1407 				ctl_set_data_phase_error(&io->scsiio);
1408 				/*
1409 				 * XXX KDM figure out residual.
1410 				 */
1411 				break;
1412 			}
1413 			/*
1414 			 * If we had to break this S/G list into multiple
1415 			 * pieces, figure out where we are in the list, and
1416 			 * continue sending pieces if necessary.
1417 			 */
1418 			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
1419 			 && (io->io_hdr.port_status == 0)
1420 			 && (cmd_info->cur_transfer_index <
1421 			     io->scsiio.kern_sg_entries)) {
1422 				struct ctl_sg_entry *sglist;
1423 				ccb_flags flags;
1424 				uint8_t scsi_status;
1425 				uint8_t *data_ptr;
1426 				uint32_t dxfer_len;
1427 				int *ti;
1428 
1429 				sglist = (struct ctl_sg_entry *)
1430 					io->scsiio.kern_data_ptr;
1431 				ti = &cmd_info->cur_transfer_index;
1432 				flags = atio->ccb_h.flags &
1433 					(CAM_DIS_DISCONNECT|
1434 					 CAM_TAG_ACTION_VALID|
1435 					 CAM_DIR_MASK);
1436 
1437 				/*
1438 				 * Set the direction, relative to the initiator.
1439 				 */
1440 				flags &= ~CAM_DIR_MASK;
1441 				if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
1442 				     CTL_FLAG_DATA_IN)
1443 					flags |= CAM_DIR_IN;
1444 				else
1445 					flags |= CAM_DIR_OUT;
1446 
1447 				data_ptr = sglist[*ti].addr;
1448 				dxfer_len = sglist[*ti].len;
1449 				(*ti)++;
1450 
1451 				scsi_status = 0;
1452 
1453 				if (((flags & CAM_SEND_STATUS) == 0)
1454 				 && (dxfer_len == 0)) {
1455 					printf("%s: tag %04x no status or "
1456 					       "len cdb = %02x\n", __func__,
1457 					       atio->tag_id,
1458 					atio->cdb_io.cdb_bytes[0]);
1459 					printf("%s: tag %04x io status %#x\n",
1460 					       __func__, atio->tag_id,
1461 					       io->io_hdr.status);
1462 				}
1463 
1464 				cam_fill_ctio(csio,
1465 					      /*retries*/ 2,
1466 					      ctlfedone,
1467 					      flags,
1468 					      (flags & CAM_TAG_ACTION_VALID) ?
1469 					       MSG_SIMPLE_Q_TAG : 0,
1470 					      atio->tag_id,
1471 					      atio->init_id,
1472 					      scsi_status,
1473 					      /*data_ptr*/ data_ptr,
1474 					      /*dxfer_len*/ dxfer_len,
1475 					      /*timeout*/ 5 * 1000);
1476 
1477 				csio->ccb_h.flags |= CAM_UNLOCKED;
1478 				csio->resid = 0;
1479 				csio->ccb_h.ccb_atio = atio;
1480 				io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1481 				softc->ctios_sent++;
1482 				mtx_unlock(mtx);
1483 				xpt_action((union ccb *)csio);
1484 			} else {
1485 				/*
1486 				 * Release the CTIO.  The ATIO will be sent back
1487 				 * down to the SIM once we send status.
1488 				 */
1489 				softc->ccbs_freed++;
1490 				xpt_release_ccb(done_ccb);
1491 				mtx_unlock(mtx);
1492 
1493 				/* Call the backend move done callback */
1494 				io->scsiio.be_move_done(io);
1495 			}
1496 			return;
1497 		}
1498 		break;
1499 	}
1500 	case XPT_IMMEDIATE_NOTIFY: {
1501 		union ctl_io *io;
1502 		struct ccb_immediate_notify *inot;
1503 		cam_status status;
1504 		int frozen;
1505 
1506 		inot = &done_ccb->cin1;
1507 
1508 		softc->inots_returned++;
1509 
1510 		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1511 
1512 		printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x "
1513 		       "seq %#x\n", __func__, inot->ccb_h.status,
1514 		       inot->tag_id, inot->seq_id);
1515 
1516 		io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
1517 		if (io != NULL) {
1518 			int send_ctl_io;
1519 
1520 			send_ctl_io = 1;
1521 
1522 			ctl_zero_io(io);
1523 			io->io_hdr.io_type = CTL_IO_TASK;
1524 			io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
1525 			inot->ccb_h.io_ptr = io;
1526 			io->io_hdr.nexus.initid.id = inot->initiator_id;
1527 			io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
1528 			io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
1529 			io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
1530 			/* XXX KDM should this be the tag_id? */
1531 			io->taskio.tag_num = inot->seq_id;
1532 
1533 			status = inot->ccb_h.status & CAM_STATUS_MASK;
1534 			switch (status) {
1535 			case CAM_SCSI_BUS_RESET:
1536 				io->taskio.task_action = CTL_TASK_BUS_RESET;
1537 				break;
1538 			case CAM_BDR_SENT:
1539 				io->taskio.task_action = CTL_TASK_TARGET_RESET;
1540 				break;
1541 			case CAM_MESSAGE_RECV:
1542 				switch (inot->arg) {
1543 				case MSG_ABORT_TASK_SET:
1544 					/*
1545 					 * XXX KDM this isn't currently
1546 					 * supported by CTL.  It ends up
1547 					 * being a no-op.
1548 					 */
1549 					io->taskio.task_action =
1550 						CTL_TASK_ABORT_TASK_SET;
1551 					break;
1552 				case MSG_TARGET_RESET:
1553 					io->taskio.task_action =
1554 						CTL_TASK_TARGET_RESET;
1555 					break;
1556 				case MSG_ABORT_TASK:
1557 					io->taskio.task_action =
1558 						CTL_TASK_ABORT_TASK;
1559 					break;
1560 				case MSG_LOGICAL_UNIT_RESET:
1561 					io->taskio.task_action =
1562 						CTL_TASK_LUN_RESET;
1563 					break;
1564 				case MSG_CLEAR_TASK_SET:
1565 					/*
1566 					 * XXX KDM this isn't currently
1567 					 * supported by CTL.  It ends up
1568 					 * being a no-op.
1569 					 */
1570 					io->taskio.task_action =
1571 						CTL_TASK_CLEAR_TASK_SET;
1572 					break;
1573 				case MSG_CLEAR_ACA:
1574 					io->taskio.task_action =
1575 						CTL_TASK_CLEAR_ACA;
1576 					break;
1577 				case MSG_NOOP:
1578 					send_ctl_io = 0;
1579 					break;
1580 				default:
1581 					xpt_print(periph->path, "%s: "
1582 						  "unsupported message 0x%x\n",
1583 						  __func__, inot->arg);
1584 					send_ctl_io = 0;
1585 					break;
1586 				}
1587 				break;
1588 			case CAM_REQ_ABORTED:
1589 				/*
1590 				 * This request was sent back by the driver.
1591 				 * XXX KDM what do we do here?
1592 				 */
1593 				send_ctl_io = 0;
1594 				break;
1595 			case CAM_REQ_INVALID:
1596 			case CAM_PROVIDE_FAIL:
1597 			default:
1598 				/*
1599 				 * We should only get here if we're talking
1600 				 * to a talking to a SIM that is target
1601 				 * capable but supports the old API.  In
1602 				 * that case, we need to just free the CCB.
1603 				 * If we actually send a notify acknowledge,
1604 				 * it will send that back with an error as
1605 				 * well.
1606 				 */
1607 
1608 				if ((status != CAM_REQ_INVALID)
1609 				 && (status != CAM_PROVIDE_FAIL))
1610 					xpt_print(periph->path, "%s: "
1611 						  "unsupported CAM status "
1612 						  "0x%x\n", __func__, status);
1613 
1614 				ctl_free_io(io);
1615 				ctlfe_free_ccb(periph, done_ccb);
1616 
1617 				goto out;
1618 			}
1619 			if (send_ctl_io != 0) {
1620 				ctl_queue(io);
1621 			} else {
1622 				ctl_free_io(io);
1623 				done_ccb->ccb_h.status = CAM_REQ_INPROG;
1624 				done_ccb->ccb_h.func_code =
1625 					XPT_NOTIFY_ACKNOWLEDGE;
1626 				xpt_action(done_ccb);
1627 			}
1628 		} else {
1629 			xpt_print(periph->path, "%s: could not allocate "
1630 				  "ctl_io for immediate notify!\n", __func__);
1631 			/* requeue this to the adapter */
1632 			done_ccb->ccb_h.status = CAM_REQ_INPROG;
1633 			done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1634 			xpt_action(done_ccb);
1635 		}
1636 
1637 		if (frozen != 0) {
1638 			cam_release_devq(periph->path,
1639 					 /*relsim_flags*/ 0,
1640 					 /*opening reduction*/ 0,
1641 					 /*timeout*/ 0,
1642 					 /*getcount_only*/ 0);
1643 		}
1644 		break;
1645 	}
1646 	case XPT_NOTIFY_ACKNOWLEDGE:
1647 		/*
1648 		 * Queue this back down to the SIM as an immediate notify.
1649 		 */
1650 		done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
1651 		xpt_action(done_ccb);
1652 		softc->inots_sent++;
1653 		break;
1654 	case XPT_SET_SIM_KNOB:
1655 	case XPT_GET_SIM_KNOB:
1656 		break;
1657 	default:
1658 		panic("%s: unexpected CCB type %#x", __func__,
1659 		      done_ccb->ccb_h.func_code);
1660 		break;
1661 	}
1662 
1663 out:
1664 	mtx_unlock(mtx);
1665 }
1666 
1667 static void
1668 ctlfe_onoffline(void *arg, int online)
1669 {
1670 	struct ctlfe_softc *bus_softc;
1671 	union ccb *ccb;
1672 	cam_status status;
1673 	struct cam_path *path;
1674 	int set_wwnn;
1675 
1676 	bus_softc = (struct ctlfe_softc *)arg;
1677 
1678 	set_wwnn = 0;
1679 
1680 	status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
1681 		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1682 	if (status != CAM_REQ_CMP) {
1683 		printf("%s: unable to create path!\n", __func__);
1684 		return;
1685 	}
1686 	ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO);
1687 	if (ccb == NULL) {
1688 		printf("%s: unable to malloc CCB!\n", __func__);
1689 		xpt_free_path(path);
1690 		return;
1691 	}
1692 	xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
1693 
1694 	/*
1695 	 * Copan WWN format:
1696 	 *
1697 	 * Bits 63-60:	0x5		NAA, IEEE registered name
1698 	 * Bits 59-36:	0x000ED5	IEEE Company name assigned to Copan
1699 	 * Bits 35-12:			Copan SSN (Sequential Serial Number)
1700 	 * Bits 11-8:			Type of port:
1701 	 *					1 == N-Port
1702 	 *					2 == F-Port
1703 	 *					3 == NL-Port
1704 	 * Bits 7-0:			0 == Node Name, >0 == Port Number
1705 	 */
1706 
1707 	if (online != 0) {
1708 
1709 		ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
1710 
1711 
1712 		xpt_action(ccb);
1713 
1714 
1715 		if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
1716 #ifdef RANDOM_WWNN
1717 			uint64_t random_bits;
1718 #endif
1719 
1720 			printf("%s: %s current WWNN %#jx\n", __func__,
1721 			       bus_softc->port_name,
1722 			       ccb->knob.xport_specific.fc.wwnn);
1723 			printf("%s: %s current WWPN %#jx\n", __func__,
1724 			       bus_softc->port_name,
1725 			       ccb->knob.xport_specific.fc.wwpn);
1726 
1727 #ifdef RANDOM_WWNN
1728 			arc4rand(&random_bits, sizeof(random_bits), 0);
1729 #endif
1730 
1731 			/*
1732 			 * XXX KDM this is a bit of a kludge for now.  We
1733 			 * take the current WWNN/WWPN from the card, and
1734 			 * replace the company identifier and the NL-Port
1735 			 * indicator and the port number (for the WWPN).
1736 			 * This should be replaced later with ddb_GetWWNN,
1737 			 * or possibly a more centralized scheme.  (It
1738 			 * would be nice to have the WWNN/WWPN for each
1739 			 * port stored in the ctl_frontend structure.)
1740 			 */
1741 #ifdef RANDOM_WWNN
1742 			ccb->knob.xport_specific.fc.wwnn =
1743 				(random_bits &
1744 				0x0000000fffffff00ULL) |
1745 				/* Company ID */ 0x5000ED5000000000ULL |
1746 				/* NL-Port */    0x0300;
1747 			ccb->knob.xport_specific.fc.wwpn =
1748 				(random_bits &
1749 				0x0000000fffffff00ULL) |
1750 				/* Company ID */ 0x5000ED5000000000ULL |
1751 				/* NL-Port */    0x3000 |
1752 				/* Port Num */ (bus_softc->fe.targ_port & 0xff);
1753 
1754 			/*
1755 			 * This is a bit of an API break/reversal, but if
1756 			 * we're doing the random WWNN that's a little
1757 			 * different anyway.  So record what we're actually
1758 			 * using with the frontend code so it's reported
1759 			 * accurately.
1760 			 */
1761 			bus_softc->fe.wwnn =
1762 				ccb->knob.xport_specific.fc.wwnn;
1763 			bus_softc->fe.wwpn =
1764 				ccb->knob.xport_specific.fc.wwpn;
1765 			set_wwnn = 1;
1766 #else /* RANDOM_WWNN */
1767 			/*
1768 			 * If the user has specified a WWNN/WWPN, send them
1769 			 * down to the SIM.  Otherwise, record what the SIM
1770 			 * has reported.
1771 			 */
1772 			if ((bus_softc->fe.wwnn != 0)
1773 			 && (bus_softc->fe.wwpn != 0)) {
1774 				ccb->knob.xport_specific.fc.wwnn =
1775 					bus_softc->fe.wwnn;
1776 				ccb->knob.xport_specific.fc.wwpn =
1777 					bus_softc->fe.wwpn;
1778 				set_wwnn = 1;
1779 			} else {
1780 				bus_softc->fe.wwnn =
1781 					ccb->knob.xport_specific.fc.wwnn;
1782 				bus_softc->fe.wwpn =
1783 					ccb->knob.xport_specific.fc.wwpn;
1784 			}
1785 #endif /* RANDOM_WWNN */
1786 
1787 
1788 			if (set_wwnn != 0) {
1789 				printf("%s: %s new WWNN %#jx\n", __func__,
1790 				       bus_softc->port_name,
1791 				ccb->knob.xport_specific.fc.wwnn);
1792 				printf("%s: %s new WWPN %#jx\n", __func__,
1793 				       bus_softc->port_name,
1794 				       ccb->knob.xport_specific.fc.wwpn);
1795 			}
1796 		} else {
1797 			printf("%s: %s has no valid WWNN/WWPN\n", __func__,
1798 			       bus_softc->port_name);
1799 		}
1800 	}
1801 	ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
1802 	ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
1803 	if (set_wwnn != 0)
1804 		ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
1805 
1806 	if (online != 0)
1807 		ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET;
1808 	else
1809 		ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE;
1810 
1811 	xpt_action(ccb);
1812 
1813 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1814 		printf("%s: SIM %s (path id %d) target %s failed with "
1815 		       "status %#x\n",
1816 		       __func__, bus_softc->port_name, bus_softc->path_id,
1817 		       (online != 0) ? "enable" : "disable",
1818 		       ccb->ccb_h.status);
1819 	} else {
1820 		printf("%s: SIM %s (path id %d) target %s succeeded\n",
1821 		       __func__, bus_softc->port_name, bus_softc->path_id,
1822 		       (online != 0) ? "enable" : "disable");
1823 	}
1824 
1825 	xpt_free_path(path);
1826 
1827 	free(ccb, M_TEMP);
1828 
1829 	return;
1830 }
1831 
1832 static void
1833 ctlfe_online(void *arg)
1834 {
1835 	struct ctlfe_softc *bus_softc;
1836 	struct cam_path *path;
1837 	cam_status status;
1838 	struct ctlfe_lun_softc *lun_softc;
1839 
1840 	bus_softc = (struct ctlfe_softc *)arg;
1841 
1842 	/*
1843 	 * Create the wildcard LUN before bringing the port online.
1844 	 */
1845 	status = xpt_create_path(&path, /*periph*/ NULL,
1846 				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1847 				 CAM_LUN_WILDCARD);
1848 	if (status != CAM_REQ_CMP) {
1849 		printf("%s: unable to create path for wildcard periph\n",
1850 				__func__);
1851 		return;
1852 	}
1853 
1854 	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE,
1855 			M_NOWAIT | M_ZERO);
1856 	if (lun_softc == NULL) {
1857 		xpt_print(path, "%s: unable to allocate softc for "
1858 				"wildcard periph\n", __func__);
1859 		xpt_free_path(path);
1860 		return;
1861 	}
1862 
1863 	xpt_path_lock(path);
1864 	lun_softc->parent_softc = bus_softc;
1865 	lun_softc->flags |= CTLFE_LUN_WILDCARD;
1866 
1867 	mtx_lock(&bus_softc->lun_softc_mtx);
1868 	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links);
1869 	mtx_unlock(&bus_softc->lun_softc_mtx);
1870 
1871 	status = cam_periph_alloc(ctlferegister,
1872 				  ctlfeoninvalidate,
1873 				  ctlfecleanup,
1874 				  ctlfestart,
1875 				  "ctl",
1876 				  CAM_PERIPH_BIO,
1877 				  path,
1878 				  ctlfeasync,
1879 				  0,
1880 				  lun_softc);
1881 
1882 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1883 		const struct cam_status_entry *entry;
1884 
1885 		entry = cam_fetch_status_entry(status);
1886 
1887 		printf("%s: CAM error %s (%#x) returned from "
1888 		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1889 		       entry->status_text : "Unknown", status);
1890 	}
1891 
1892 	ctlfe_onoffline(arg, /*online*/ 1);
1893 
1894 	xpt_path_unlock(path);
1895 	xpt_free_path(path);
1896 }
1897 
1898 static void
1899 ctlfe_offline(void *arg)
1900 {
1901 	struct ctlfe_softc *bus_softc;
1902 	struct cam_path *path;
1903 	cam_status status;
1904 	struct cam_periph *periph;
1905 
1906 	bus_softc = (struct ctlfe_softc *)arg;
1907 
1908 	/*
1909 	 * Disable the wildcard LUN for this port now that we have taken
1910 	 * the port offline.
1911 	 */
1912 	status = xpt_create_path(&path, /*periph*/ NULL,
1913 				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1914 				 CAM_LUN_WILDCARD);
1915 	if (status != CAM_REQ_CMP) {
1916 		printf("%s: unable to create path for wildcard periph\n",
1917 		       __func__);
1918 		return;
1919 	}
1920 
1921 	xpt_path_lock(path);
1922 
1923 	ctlfe_onoffline(arg, /*online*/ 0);
1924 
1925 	if ((periph = cam_periph_find(path, "ctl")) != NULL)
1926 		cam_periph_invalidate(periph);
1927 
1928 	xpt_path_unlock(path);
1929 	xpt_free_path(path);
1930 }
1931 
1932 static int
1933 ctlfe_targ_enable(void *arg, struct ctl_id targ_id)
1934 {
1935 	return (0);
1936 }
1937 
1938 static int
1939 ctlfe_targ_disable(void *arg, struct ctl_id targ_id)
1940 {
1941 	return (0);
1942 }
1943 
1944 /*
1945  * This will get called to enable a LUN on every bus that is attached to
1946  * CTL.  So we only need to create a path/periph for this particular bus.
1947  */
1948 static int
1949 ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
1950 {
1951 	struct ctlfe_softc *bus_softc;
1952 	struct ctlfe_lun_softc *softc;
1953 	struct cam_path *path;
1954 	struct cam_periph *periph;
1955 	cam_status status;
1956 
1957 	bus_softc = (struct ctlfe_softc *)arg;
1958 
1959 	status = xpt_create_path(&path, /*periph*/ NULL,
1960 				  bus_softc->path_id,
1961 				  targ_id.id, lun_id);
1962 	/* XXX KDM need some way to return status to CTL here? */
1963 	if (status != CAM_REQ_CMP) {
1964 		printf("%s: could not create path, status %#x\n", __func__,
1965 		       status);
1966 		return (1);
1967 	}
1968 
1969 	softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
1970 	xpt_path_lock(path);
1971 	periph = cam_periph_find(path, "ctl");
1972 	if (periph != NULL) {
1973 		/* We've already got a periph, no need to alloc a new one. */
1974 		xpt_path_unlock(path);
1975 		xpt_free_path(path);
1976 		free(softc, M_CTLFE);
1977 		return (0);
1978 	}
1979 
1980 	softc->parent_softc = bus_softc;
1981 	mtx_lock(&bus_softc->lun_softc_mtx);
1982 	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
1983 	mtx_unlock(&bus_softc->lun_softc_mtx);
1984 
1985 	status = cam_periph_alloc(ctlferegister,
1986 				  ctlfeoninvalidate,
1987 				  ctlfecleanup,
1988 				  ctlfestart,
1989 				  "ctl",
1990 				  CAM_PERIPH_BIO,
1991 				  path,
1992 				  ctlfeasync,
1993 				  0,
1994 				  softc);
1995 
1996 	xpt_path_unlock(path);
1997 	xpt_free_path(path);
1998 	return (0);
1999 }
2000 
2001 /*
2002  * This will get called when the user removes a LUN to disable that LUN
2003  * on every bus that is attached to CTL.
2004  */
2005 static int
2006 ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
2007 {
2008 	struct ctlfe_softc *softc;
2009 	struct ctlfe_lun_softc *lun_softc;
2010 
2011 	softc = (struct ctlfe_softc *)arg;
2012 
2013 	mtx_lock(&softc->lun_softc_mtx);
2014 	STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
2015 		struct cam_path *path;
2016 
2017 		path = lun_softc->periph->path;
2018 
2019 		if ((xpt_path_target_id(path) == targ_id.id)
2020 		 && (xpt_path_lun_id(path) == lun_id)) {
2021 			break;
2022 		}
2023 	}
2024 	if (lun_softc == NULL) {
2025 		mtx_unlock(&softc->lun_softc_mtx);
2026 		printf("%s: can't find target %d lun %d\n", __func__,
2027 		       targ_id.id, lun_id);
2028 		return (1);
2029 	}
2030 	cam_periph_acquire(lun_softc->periph);
2031 	mtx_unlock(&softc->lun_softc_mtx);
2032 
2033 	cam_periph_lock(lun_softc->periph);
2034 	cam_periph_invalidate(lun_softc->periph);
2035 	cam_periph_unlock(lun_softc->periph);
2036 	cam_periph_release(lun_softc->periph);
2037 	return (0);
2038 }
2039 
2040 static void
2041 ctlfe_dump_sim(struct cam_sim *sim)
2042 {
2043 
2044 	printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
2045 	       sim->sim_name, sim->unit_number,
2046 	       sim->max_tagged_dev_openings, sim->max_dev_openings);
2047 	printf("\n");
2048 }
2049 
2050 /*
2051  * Assumes that the SIM lock is held.
2052  */
2053 static void
2054 ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
2055 {
2056 	struct ccb_hdr *hdr;
2057 	struct cam_periph *periph;
2058 	int num_items;
2059 
2060 	periph = softc->periph;
2061 	num_items = 0;
2062 
2063 	TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) {
2064 		union ctl_io *io;
2065 
2066 		io = hdr->io_ptr;
2067 
2068 		num_items++;
2069 
2070 		/*
2071 		 * This can happen when we get an ATIO but can't allocate
2072 		 * a ctl_io.  See the XPT_ACCEPT_TARGET_IO case in ctlfedone().
2073 		 */
2074 		if (io == NULL) {
2075 			struct ccb_scsiio *csio;
2076 
2077 			csio = (struct ccb_scsiio *)hdr;
2078 
2079 			xpt_print(periph->path, "CCB %#x ctl_io allocation "
2080 				  "failed\n", csio->tag_id);
2081 			continue;
2082 		}
2083 
2084 		/*
2085 		 * Only regular SCSI I/O is put on the work
2086 		 * queue, so we can print sense here.  There may be no
2087 		 * sense if it's no the queue for a DMA, but this serves to
2088 		 * print out the CCB as well.
2089 		 *
2090 		 * XXX KDM switch this over to scsi_sense_print() when
2091 		 * CTL is merged in with CAM.
2092 		 */
2093 		ctl_io_error_print(io, NULL);
2094 
2095 		/*
2096 		 * We're sending status back to the
2097 		 * initiator, so we're on the queue waiting
2098 		 * for a CTIO to do that.
2099 		 */
2100 		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
2101 			continue;
2102 
2103 		/*
2104 		 * Otherwise, we're on the queue waiting to
2105 		 * do a data transfer.
2106 		 */
2107 		xpt_print(periph->path, "Total %u, Current %u, Resid %u\n",
2108 			  io->scsiio.kern_total_len, io->scsiio.kern_data_len,
2109 			  io->scsiio.kern_data_resid);
2110 	}
2111 
2112 	xpt_print(periph->path, "%d requests total waiting for CCBs\n",
2113 		  num_items);
2114 	xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju "
2115 		  "freed)\n", (uintmax_t)(softc->ccbs_alloced -
2116 		  softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced,
2117 		  (uintmax_t)softc->ccbs_freed);
2118 	xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju "
2119 		  "returned\n", (uintmax_t)(softc->ctios_sent -
2120 		  softc->ctios_returned), softc->ctios_sent,
2121 		  softc->ctios_returned);
2122 }
2123 
2124 /*
2125  * This function is called when we fail to get a CCB for a DMA or status return
2126  * to the initiator within the specified time period.
2127  *
2128  * The callout code should insure that we hold the sim mutex here.
2129  */
2130 static void
2131 ctlfe_dma_timeout(void *arg)
2132 {
2133 	struct ctlfe_lun_softc *softc;
2134 	struct cam_periph *periph;
2135 	struct cam_sim *sim;
2136 	int num_queued;
2137 
2138 	softc = (struct ctlfe_lun_softc *)arg;
2139 	periph = softc->periph;
2140 	sim = xpt_path_sim(periph->path);
2141 	num_queued = 0;
2142 
2143 	/*
2144 	 * Nothing to do...
2145 	 */
2146 	if (TAILQ_FIRST(&softc->work_queue) == NULL) {
2147 		xpt_print(periph->path, "TIMEOUT triggered after %d "
2148 			  "seconds, but nothing on work queue??\n",
2149 			  CTLFE_DMA_TIMEOUT);
2150 		return;
2151 	}
2152 
2153 	xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to "
2154 		  "start\n", CTLFE_DMA_TIMEOUT);
2155 
2156 	ctlfe_dump_queue(softc);
2157 
2158 	ctlfe_dump_sim(sim);
2159 
2160 	xpt_print(periph->path, "calling xpt_schedule() to attempt to "
2161 		  "unstick our queue\n");
2162 
2163 	xpt_schedule(periph, /*priority*/ 1);
2164 
2165 	xpt_print(periph->path, "xpt_schedule() call complete\n");
2166 }
2167 
2168 /*
2169  * Datamove/done routine called by CTL.  Put ourselves on the queue to
2170  * receive a CCB from CAM so we can queue the continue I/O request down
2171  * to the adapter.
2172  */
2173 static void
2174 ctlfe_datamove_done(union ctl_io *io)
2175 {
2176 	union ccb *ccb;
2177 	struct cam_periph *periph;
2178 	struct ctlfe_lun_softc *softc;
2179 
2180 	ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
2181 
2182 	periph = xpt_path_periph(ccb->ccb_h.path);
2183 	cam_periph_lock(periph);
2184 
2185 	softc = (struct ctlfe_lun_softc *)periph->softc;
2186 
2187 	if (io->io_hdr.io_type == CTL_IO_TASK) {
2188 		/*
2189 		 * Task management commands don't require any further
2190 		 * communication back to the adapter.  Requeue the CCB
2191 		 * to the adapter, and free the CTL I/O.
2192 		 */
2193 		xpt_print(ccb->ccb_h.path, "%s: returning task I/O "
2194 			  "tag %#x seq %#x\n", __func__,
2195 			  ccb->cin1.tag_id, ccb->cin1.seq_id);
2196 		/*
2197 		 * Send the notify acknowledge down to the SIM, to let it
2198 		 * know we processed the task management command.
2199 		 */
2200 		ccb->ccb_h.status = CAM_REQ_INPROG;
2201 		ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
2202 		xpt_action(ccb);
2203 		ctl_free_io(io);
2204 	} else {
2205 		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
2206 			io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
2207 		else
2208 			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
2209 
2210 		TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
2211 				  periph_links.tqe);
2212 
2213 		/*
2214 		 * Reset the timeout for our latest active DMA.
2215 		 */
2216 		callout_reset(&softc->dma_callout,
2217 			      CTLFE_DMA_TIMEOUT * hz,
2218 			      ctlfe_dma_timeout, softc);
2219 		/*
2220 		 * Ask for the CAM transport layer to send us a CCB to do
2221 		 * the DMA or send status, unless ctlfe_dma_enabled is set
2222 		 * to 0.
2223 		 */
2224 		if (ctlfe_dma_enabled != 0)
2225 			xpt_schedule(periph, /*priority*/ 1);
2226 	}
2227 
2228 	cam_periph_unlock(periph);
2229 }
2230 
2231 static void
2232 ctlfe_dump(void)
2233 {
2234 	struct ctlfe_softc *bus_softc;
2235 
2236 	STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
2237 		struct ctlfe_lun_softc *lun_softc;
2238 
2239 		ctlfe_dump_sim(bus_softc->sim);
2240 
2241 		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) {
2242 			ctlfe_dump_queue(lun_softc);
2243 		}
2244 	}
2245 }
2246