xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 8cb0d414a8bc7f4114d208fefdf614cd3647086c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 1997-2009 by Matthew Jacob
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
32  */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <dev/isp/isp_freebsd.h>
37 #include <sys/unistd.h>
38 #include <sys/kthread.h>
39 #include <sys/conf.h>
40 #include <sys/module.h>
41 #include <sys/ioccom.h>
42 #include <dev/isp/isp_ioctl.h>
43 #include <sys/devicestat.h>
44 #include <cam/cam_periph.h>
45 #include <cam/cam_xpt_periph.h>
46 
47 MODULE_VERSION(isp, 1);
48 MODULE_DEPEND(isp, cam, 1, 1, 1);
49 int isp_announced = 0;
50 int isp_loop_down_limit = 60;	/* default loop down limit */
51 int isp_quickboot_time = 7;	/* don't wait more than N secs for loop up */
52 int isp_gone_device_time = 30;	/* grace time before reporting device lost */
53 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s";
54 
55 static void isp_freeze_loopdown(ispsoftc_t *, int);
56 static void isp_loop_changed(ispsoftc_t *isp, int chan);
57 static void isp_rq_check_above(ispsoftc_t *);
58 static void isp_rq_check_below(ispsoftc_t *);
59 static d_ioctl_t ispioctl;
60 static void isp_poll(struct cam_sim *);
61 static callout_func_t isp_watchdog;
62 static callout_func_t isp_gdt;
63 static task_fn_t isp_gdt_task;
64 static void isp_kthread(void *);
65 static void isp_action(struct cam_sim *, union ccb *);
66 static int isp_timer_count;
67 static void isp_timer(void *);
68 
69 static struct cdevsw isp_cdevsw = {
70 	.d_version =	D_VERSION,
71 	.d_ioctl =	ispioctl,
72 	.d_name =	"isp",
73 };
74 
75 static int
76 isp_role_sysctl(SYSCTL_HANDLER_ARGS)
77 {
78 	ispsoftc_t *isp = (ispsoftc_t *)arg1;
79 	int chan = arg2;
80 	int error, old, value;
81 
82 	value = FCPARAM(isp, chan)->role;
83 
84 	error = sysctl_handle_int(oidp, &value, 0, req);
85 	if ((error != 0) || (req->newptr == NULL))
86 		return (error);
87 
88 	if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH)
89 		return (EINVAL);
90 
91 	ISP_LOCK(isp);
92 	old = FCPARAM(isp, chan)->role;
93 
94 	/* We don't allow target mode switch from here. */
95 	value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR);
96 
97 	/* If nothing has changed -- we are done. */
98 	if (value == old) {
99 		ISP_UNLOCK(isp);
100 		return (0);
101 	}
102 
103 	/* Actually change the role. */
104 	error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value);
105 	ISP_UNLOCK(isp);
106 	return (error);
107 }
108 
109 static int
110 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
111 {
112 	fcparam *fcp = FCPARAM(isp, chan);
113 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
114 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev);
115 	struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
116 	char name[16];
117 	struct cam_sim *sim;
118 	struct cam_path *path;
119 #ifdef	ISP_TARGET_MODE
120 	int i;
121 #endif
122 
123 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
124 	    device_get_unit(isp->isp_dev), &isp->isp_lock,
125 	    isp->isp_maxcmds, isp->isp_maxcmds, devq);
126 	if (sim == NULL)
127 		return (ENOMEM);
128 
129 	if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) {
130 		cam_sim_free(sim, FALSE);
131 		return (EIO);
132 	}
133 	if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
134 		xpt_bus_deregister(cam_sim_path(sim));
135 		cam_sim_free(sim, FALSE);
136 		return (ENXIO);
137 	}
138 
139 	ISP_LOCK(isp);
140 	fc->sim = sim;
141 	fc->path = path;
142 	fc->isp = isp;
143 	fc->ready = 1;
144 	fcp->isp_use_gft_id = 1;
145 	fcp->isp_use_gff_id = 1;
146 
147 	callout_init_mtx(&fc->gdt, &isp->isp_lock, 0);
148 	TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc);
149 #ifdef	ISP_TARGET_MODE
150 	TAILQ_INIT(&fc->waitq);
151 	STAILQ_INIT(&fc->ntfree);
152 	for (i = 0; i < ATPDPSIZE; i++)
153 		STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next);
154 	LIST_INIT(&fc->atfree);
155 	for (i = ATPDPSIZE-1; i >= 0; i--)
156 		LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next);
157 	for (i = 0; i < ATPDPHASHSIZE; i++)
158 		LIST_INIT(&fc->atused[i]);
159 #endif
160 	isp_loop_changed(isp, chan);
161 	ISP_UNLOCK(isp);
162 	if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0,
163 	    "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
164 		xpt_free_path(fc->path);
165 		xpt_bus_deregister(cam_sim_path(fc->sim));
166 		cam_sim_free(fc->sim, FALSE);
167 		return (ENOMEM);
168 	}
169 	fc->num_threads += 1;
170 	if (chan > 0) {
171 		snprintf(name, sizeof(name), "chan%d", chan);
172 		tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
173 		    OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
174 		    "Virtual channel");
175 	}
176 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
177 	    "wwnn", CTLFLAG_RD, &fcp->isp_wwnn,
178 	    "World Wide Node Name");
179 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
180 	    "wwpn", CTLFLAG_RD, &fcp->isp_wwpn,
181 	    "World Wide Port Name");
182 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
183 	    "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0,
184 	    "Loop Down Limit");
185 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
186 	    "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0,
187 	    "Gone Device Time");
188 #if defined(ISP_TARGET_MODE) && defined(DEBUG)
189 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
190 	    "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0,
191 	    "Cause a Lost Frame on a Read");
192 #endif
193 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
194 	    "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
195 	    isp, chan, isp_role_sysctl, "I", "Current role");
196 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
197 	    "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0,
198 	    "Connection speed in gigabits");
199 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
200 	    "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0,
201 	    "Link state");
202 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
203 	    "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0,
204 	    "Firmware state");
205 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
206 	    "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0,
207 	    "Loop state");
208 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
209 	    "topo", CTLFLAG_RD, &fcp->isp_topo, 0,
210 	    "Connection topology");
211 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
212 	    "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0,
213 	    "Use GFT_ID during fabric scan");
214 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
215 	    "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0,
216 	    "Use GFF_ID during fabric scan");
217 	return (0);
218 }
219 
220 static void
221 isp_detach_chan(ispsoftc_t *isp, int chan)
222 {
223 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
224 
225 	xpt_free_path(fc->path);
226 	xpt_bus_deregister(cam_sim_path(fc->sim));
227 	cam_sim_free(fc->sim, FALSE);
228 
229 	/* Wait for the channel's spawned threads to exit. */
230 	wakeup(fc);
231 	while (fc->num_threads != 0)
232 		mtx_sleep(&fc->num_threads, &isp->isp_lock, PRIBIO, "isp_reap", 0);
233 }
234 
235 int
236 isp_attach(ispsoftc_t *isp)
237 {
238 	const char *nu = device_get_nameunit(isp->isp_osinfo.dev);
239 	int du = device_get_unit(isp->isp_dev);
240 	int chan;
241 
242 	/*
243 	 * Create the device queue for our SIM(s).
244 	 */
245 	isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds);
246 	if (isp->isp_osinfo.devq == NULL) {
247 		return (EIO);
248 	}
249 
250 	for (chan = 0; chan < isp->isp_nchan; chan++) {
251 		if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) {
252 			goto unwind;
253 		}
254 	}
255 
256 	callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0);
257 	isp_timer_count = hz >> 2;
258 	callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
259 
260 	isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu);
261 	if (isp->isp_osinfo.cdev) {
262 		isp->isp_osinfo.cdev->si_drv1 = isp;
263 	}
264 	return (0);
265 
266 unwind:
267 	ISP_LOCK(isp);
268 	isp->isp_osinfo.is_exiting = 1;
269 	while (--chan >= 0)
270 		isp_detach_chan(isp, chan);
271 	ISP_UNLOCK(isp);
272 	cam_simq_free(isp->isp_osinfo.devq);
273 	isp->isp_osinfo.devq = NULL;
274 	return (-1);
275 }
276 
277 int
278 isp_detach(ispsoftc_t *isp)
279 {
280 	int chan;
281 
282 	if (isp->isp_osinfo.cdev) {
283 		destroy_dev(isp->isp_osinfo.cdev);
284 		isp->isp_osinfo.cdev = NULL;
285 	}
286 	ISP_LOCK(isp);
287 	/* Tell spawned threads that we're exiting. */
288 	isp->isp_osinfo.is_exiting = 1;
289 	for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1)
290 		isp_detach_chan(isp, chan);
291 	ISP_UNLOCK(isp);
292 	callout_drain(&isp->isp_osinfo.tmo);
293 	cam_simq_free(isp->isp_osinfo.devq);
294 	return (0);
295 }
296 
297 static void
298 isp_freeze_loopdown(ispsoftc_t *isp, int chan)
299 {
300 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
301 
302 	if (fc->sim == NULL)
303 		return;
304 	if (fc->simqfrozen == 0) {
305 		isp_prt(isp, ISP_LOGDEBUG0,
306 		    "Chan %d Freeze simq (loopdown)", chan);
307 		fc->simqfrozen = SIMQFRZ_LOOPDOWN;
308 		xpt_hold_boot();
309 		xpt_freeze_simq(fc->sim, 1);
310 	} else {
311 		isp_prt(isp, ISP_LOGDEBUG0,
312 		    "Chan %d Mark simq frozen (loopdown)", chan);
313 		fc->simqfrozen |= SIMQFRZ_LOOPDOWN;
314 	}
315 }
316 
317 static void
318 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
319 {
320 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
321 
322 	if (fc->sim == NULL)
323 		return;
324 	int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN;
325 	fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN;
326 	if (wasfrozen && fc->simqfrozen == 0) {
327 		isp_prt(isp, ISP_LOGDEBUG0,
328 		    "Chan %d Release simq", chan);
329 		xpt_release_simq(fc->sim, 1);
330 		xpt_release_boot();
331 	}
332 }
333 
334 /*
335  * Functions to protect from request queue overflow by freezing SIM queue.
336  * XXX: freezing only one arbitrary SIM, since they all share the queue.
337  */
338 static void
339 isp_rq_check_above(ispsoftc_t *isp)
340 {
341 	struct isp_fc *fc = ISP_FC_PC(isp, 0);
342 
343 	if (isp->isp_rqovf || fc->sim == NULL)
344 		return;
345 	if (!isp_rqentry_avail(isp, QENTRY_MAX)) {
346 		xpt_freeze_simq(fc->sim, 1);
347 		isp->isp_rqovf = 1;
348 	}
349 }
350 
351 static void
352 isp_rq_check_below(ispsoftc_t *isp)
353 {
354 	struct isp_fc *fc = ISP_FC_PC(isp, 0);
355 
356 	if (!isp->isp_rqovf || fc->sim == NULL)
357 		return;
358 	if (isp_rqentry_avail(isp, QENTRY_MAX)) {
359 		xpt_release_simq(fc->sim, 0);
360 		isp->isp_rqovf = 0;
361 	}
362 }
363 
364 static int
365 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
366 {
367 	ispsoftc_t *isp;
368 	int nr, chan, retval = ENOTTY;
369 
370 	isp = dev->si_drv1;
371 
372 	switch (c) {
373 	case ISP_SDBLEV:
374 	{
375 		int olddblev = isp->isp_dblev;
376 		isp->isp_dblev = *(int *)addr;
377 		*(int *)addr = olddblev;
378 		retval = 0;
379 		break;
380 	}
381 	case ISP_GETROLE:
382 		chan = *(int *)addr;
383 		if (chan < 0 || chan >= isp->isp_nchan) {
384 			retval = -ENXIO;
385 			break;
386 		}
387 		*(int *)addr = FCPARAM(isp, chan)->role;
388 		retval = 0;
389 		break;
390 	case ISP_SETROLE:
391 		nr = *(int *)addr;
392 		chan = nr >> 8;
393 		if (chan < 0 || chan >= isp->isp_nchan) {
394 			retval = -ENXIO;
395 			break;
396 		}
397 		nr &= 0xff;
398 		if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
399 			retval = EINVAL;
400 			break;
401 		}
402 		ISP_LOCK(isp);
403 		*(int *)addr = FCPARAM(isp, chan)->role;
404 		retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr);
405 		ISP_UNLOCK(isp);
406 		retval = 0;
407 		break;
408 
409 	case ISP_RESETHBA:
410 		ISP_LOCK(isp);
411 		isp_reinit(isp, 0);
412 		ISP_UNLOCK(isp);
413 		retval = 0;
414 		break;
415 
416 	case ISP_RESCAN:
417 		chan = *(intptr_t *)addr;
418 		if (chan < 0 || chan >= isp->isp_nchan) {
419 			retval = -ENXIO;
420 			break;
421 		}
422 		ISP_LOCK(isp);
423 		if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) {
424 			retval = EIO;
425 		} else {
426 			retval = 0;
427 		}
428 		ISP_UNLOCK(isp);
429 		break;
430 
431 	case ISP_FC_LIP:
432 		chan = *(intptr_t *)addr;
433 		if (chan < 0 || chan >= isp->isp_nchan) {
434 			retval = -ENXIO;
435 			break;
436 		}
437 		ISP_LOCK(isp);
438 		if (isp_control(isp, ISPCTL_SEND_LIP, chan)) {
439 			retval = EIO;
440 		} else {
441 			retval = 0;
442 		}
443 		ISP_UNLOCK(isp);
444 		break;
445 	case ISP_FC_GETDINFO:
446 	{
447 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
448 		fcportdb_t *lp;
449 
450 		if (ifc->loopid >= MAX_FC_TARG) {
451 			retval = EINVAL;
452 			break;
453 		}
454 		lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
455 		if (lp->state != FC_PORTDB_STATE_NIL) {
456 			ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT;
457 			ifc->loopid = lp->handle;
458 			ifc->portid = lp->portid;
459 			ifc->node_wwn = lp->node_wwn;
460 			ifc->port_wwn = lp->port_wwn;
461 			retval = 0;
462 		} else {
463 			retval = ENODEV;
464 		}
465 		break;
466 	}
467 	case ISP_FC_GETHINFO:
468 	{
469 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
470 		int chan = hba->fc_channel;
471 
472 		if (chan < 0 || chan >= isp->isp_nchan) {
473 			retval = ENXIO;
474 			break;
475 		}
476 		hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
477 		hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
478 		hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
479 		hba->fc_nchannels = isp->isp_nchan;
480 		hba->fc_nports = MAX_FC_TARG;
481 		hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed;
482 		hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1;
483 		hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid;
484 		hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram;
485 		hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram;
486 		hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn;
487 		hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn;
488 		retval = 0;
489 		break;
490 	}
491 	case ISP_TSK_MGMT:
492 	{
493 		int needmarker;
494 		struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
495 		uint16_t nphdl;
496 		void *reqp;
497 		uint8_t resp[QENTRY_LEN];
498 		isp24xx_tmf_t tmf;
499 		isp24xx_statusreq_t sp;
500 		fcparam *fcp;
501 		fcportdb_t *lp;
502 		int i;
503 
504 		chan = fct->chan;
505 		if (chan < 0 || chan >= isp->isp_nchan) {
506 			retval = -ENXIO;
507 			break;
508 		}
509 
510 		needmarker = retval = 0;
511 		nphdl = fct->loopid;
512 		ISP_LOCK(isp);
513 		fcp = FCPARAM(isp, chan);
514 
515 		for (i = 0; i < MAX_FC_TARG; i++) {
516 			lp = &fcp->portdb[i];
517 			if (lp->handle == nphdl) {
518 				break;
519 			}
520 		}
521 		if (i == MAX_FC_TARG) {
522 			retval = ENXIO;
523 			ISP_UNLOCK(isp);
524 			break;
525 		}
526 		ISP_MEMZERO(&tmf, sizeof(tmf));
527 		tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT;
528 		tmf.tmf_header.rqs_entry_count = 1;
529 		tmf.tmf_nphdl = lp->handle;
530 		tmf.tmf_delay = 2;
531 		tmf.tmf_timeout = 4;
532 		tmf.tmf_tidlo = lp->portid;
533 		tmf.tmf_tidhi = lp->portid >> 16;
534 		tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan);
535 		tmf.tmf_lun[1] = fct->lun & 0xff;
536 		if (fct->lun >= 256) {
537 			tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8);
538 		}
539 		switch (fct->action) {
540 		case IPT_CLEAR_ACA:
541 			tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA;
542 			break;
543 		case IPT_TARGET_RESET:
544 			tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET;
545 			needmarker = 1;
546 			break;
547 		case IPT_LUN_RESET:
548 			tmf.tmf_flags = ISP24XX_TMF_LUN_RESET;
549 			needmarker = 1;
550 			break;
551 		case IPT_CLEAR_TASK_SET:
552 			tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET;
553 			needmarker = 1;
554 			break;
555 		case IPT_ABORT_TASK_SET:
556 			tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET;
557 			needmarker = 1;
558 			break;
559 		default:
560 			retval = EINVAL;
561 			break;
562 		}
563 		if (retval) {
564 			ISP_UNLOCK(isp);
565 			break;
566 		}
567 
568 		/* Prepare space for response in memory */
569 		memset(resp, 0xff, sizeof(resp));
570 		tmf.tmf_handle = isp_allocate_handle(isp, resp,
571 		    ISP_HANDLE_CTRL);
572 		if (tmf.tmf_handle == 0) {
573 			isp_prt(isp, ISP_LOGERR,
574 			    "%s: TMF of Chan %d out of handles",
575 			    __func__, chan);
576 			ISP_UNLOCK(isp);
577 			retval = ENOMEM;
578 			break;
579 		}
580 
581 		/* Send request and wait for response. */
582 		reqp = isp_getrqentry(isp);
583 		if (reqp == NULL) {
584 			isp_prt(isp, ISP_LOGERR,
585 			    "%s: TMF of Chan %d out of rqent",
586 			    __func__, chan);
587 			isp_destroy_handle(isp, tmf.tmf_handle);
588 			ISP_UNLOCK(isp);
589 			retval = EIO;
590 			break;
591 		}
592 		isp_put_24xx_tmf(isp, &tmf, (isp24xx_tmf_t *)reqp);
593 		if (isp->isp_dblev & ISP_LOGDEBUG1)
594 			isp_print_bytes(isp, "IOCB TMF", QENTRY_LEN, reqp);
595 		ISP_SYNC_REQUEST(isp);
596 		if (msleep(resp, &isp->isp_lock, 0, "TMF", 5*hz) == EWOULDBLOCK) {
597 			isp_prt(isp, ISP_LOGERR,
598 			    "%s: TMF of Chan %d timed out",
599 			    __func__, chan);
600 			isp_destroy_handle(isp, tmf.tmf_handle);
601 			ISP_UNLOCK(isp);
602 			retval = EIO;
603 			break;
604 		}
605 		if (isp->isp_dblev & ISP_LOGDEBUG1)
606 		isp_print_bytes(isp, "IOCB TMF response", QENTRY_LEN, resp);
607 		isp_get_24xx_response(isp, (isp24xx_statusreq_t *)resp, &sp);
608 
609 		if (sp.req_completion_status != 0)
610 			retval = EIO;
611 		else if (needmarker)
612 			fcp->sendmarker = 1;
613 		ISP_UNLOCK(isp);
614 		break;
615 	}
616 	default:
617 		break;
618 	}
619 	return (retval);
620 }
621 
622 /*
623  * Local Inlines
624  */
625 
626 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *);
627 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *);
628 
629 static ISP_INLINE int
630 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb)
631 {
632 	ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free;
633 	if (ISP_PCMD(ccb) == NULL) {
634 		return (-1);
635 	}
636 	isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next;
637 	return (0);
638 }
639 
640 static ISP_INLINE void
641 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
642 {
643 	if (ISP_PCMD(ccb)) {
644 #ifdef	ISP_TARGET_MODE
645 		PISP_PCMD(ccb)->datalen = 0;
646 #endif
647 		PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free;
648 		isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb);
649 		ISP_PCMD(ccb) = NULL;
650 	}
651 }
652 
653 /*
654  * Put the target mode functions here, because some are inlines
655  */
656 #ifdef	ISP_TARGET_MODE
657 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
658 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t);
659 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t);
660 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *);
661 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int);
662 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t);
663 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *);
664 static tstate_t *create_lun_state(ispsoftc_t *, int, struct cam_path *);
665 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *);
666 static void isp_enable_lun(ispsoftc_t *, union ccb *);
667 static void isp_disable_lun(ispsoftc_t *, union ccb *);
668 static callout_func_t isp_refire_notify_ack;
669 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *);
670 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE };
671 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How);
672 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
673 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *);
674 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp);
675 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
676 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t);
677 
678 static ISP_INLINE tstate_t *
679 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
680 {
681 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
682 	tstate_t *tptr;
683 
684 	SLIST_FOREACH(tptr, &fc->lun_hash[LUN_HASH_FUNC(lun)], next) {
685 		if (tptr->ts_lun == lun)
686 			return (tptr);
687 	}
688 	return (NULL);
689 }
690 
691 static int
692 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr)
693 {
694 	inot_private_data_t *ntp;
695 	struct ntpdlist rq;
696 
697 	if (STAILQ_EMPTY(&tptr->restart_queue))
698 		return (0);
699 	STAILQ_INIT(&rq);
700 	STAILQ_CONCAT(&rq, &tptr->restart_queue);
701 	while ((ntp = STAILQ_FIRST(&rq)) != NULL) {
702 		STAILQ_REMOVE_HEAD(&rq, next);
703 		isp_prt(isp, ISP_LOGTDEBUG0,
704 		    "%s: restarting resrc deprived %x", __func__,
705 		    ((at7_entry_t *)ntp->data)->at_rxid);
706 		isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data);
707 		isp_put_ntpd(isp, bus, ntp);
708 		if (!STAILQ_EMPTY(&tptr->restart_queue))
709 			break;
710 	}
711 	if (!STAILQ_EMPTY(&rq)) {
712 		STAILQ_CONCAT(&rq, &tptr->restart_queue);
713 		STAILQ_CONCAT(&tptr->restart_queue, &rq);
714 	}
715 	return (!STAILQ_EMPTY(&tptr->restart_queue));
716 }
717 
718 static void
719 isp_tmcmd_restart(ispsoftc_t *isp)
720 {
721 	struct isp_fc *fc;
722 	tstate_t *tptr;
723 	union ccb *ccb;
724 	int bus, i;
725 
726 	for (bus = 0; bus < isp->isp_nchan; bus++) {
727 		fc = ISP_FC_PC(isp, bus);
728 		for (i = 0; i < LUN_HASH_SIZE; i++) {
729 			SLIST_FOREACH(tptr, &fc->lun_hash[i], next)
730 				isp_atio_restart(isp, bus, tptr);
731 		}
732 
733 		/*
734 		 * We only need to do this once per channel.
735 		 */
736 		ccb = (union ccb *)TAILQ_FIRST(&fc->waitq);
737 		if (ccb != NULL) {
738 			TAILQ_REMOVE(&fc->waitq, &ccb->ccb_h, sim_links.tqe);
739 			isp_target_start_ctio(isp, ccb, FROM_TIMER);
740 		}
741 	}
742 	isp_rq_check_above(isp);
743 	isp_rq_check_below(isp);
744 }
745 
746 static atio_private_data_t *
747 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
748 {
749 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
750 	atio_private_data_t *atp;
751 
752 	atp = LIST_FIRST(&fc->atfree);
753 	if (atp) {
754 		LIST_REMOVE(atp, next);
755 		atp->tag = tag;
756 		LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next);
757 	}
758 	return (atp);
759 }
760 
761 static atio_private_data_t *
762 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
763 {
764 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
765 	atio_private_data_t *atp;
766 
767 	LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) {
768 		if (atp->tag == tag)
769 			return (atp);
770 	}
771 	return (NULL);
772 }
773 
774 static void
775 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
776 {
777 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
778 
779 	if (atp->ests)
780 		isp_put_ecmd(isp, atp->ests);
781 	LIST_REMOVE(atp, next);
782 	memset(atp, 0, sizeof (*atp));
783 	LIST_INSERT_HEAD(&fc->atfree, atp, next);
784 }
785 
786 static void
787 isp_dump_atpd(ispsoftc_t *isp, int chan)
788 {
789 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
790 	atio_private_data_t *atp;
791 	const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" };
792 
793 	for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
794 		if (atp->state == ATPD_STATE_FREE)
795 			continue;
796 		isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s",
797 		    chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]);
798 	}
799 }
800 
801 static inot_private_data_t *
802 isp_get_ntpd(ispsoftc_t *isp, int chan)
803 {
804 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
805 	inot_private_data_t *ntp;
806 
807 	ntp = STAILQ_FIRST(&fc->ntfree);
808 	if (ntp)
809 		STAILQ_REMOVE_HEAD(&fc->ntfree, next);
810 	return (ntp);
811 }
812 
813 static inot_private_data_t *
814 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id)
815 {
816 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
817 	inot_private_data_t *ntp;
818 
819 	for (ntp = fc->ntpool; ntp < &fc->ntpool[ATPDPSIZE]; ntp++) {
820 		if (ntp->tag_id == tag_id && ntp->seq_id == seq_id)
821 			return (ntp);
822 	}
823 	return (NULL);
824 }
825 
826 static void
827 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp)
828 {
829 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
830 
831 	ntp->tag_id = ntp->seq_id = 0;
832 	STAILQ_INSERT_HEAD(&fc->ntfree, ntp, next);
833 }
834 
835 tstate_t *
836 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path)
837 {
838 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
839 	lun_id_t lun;
840 	tstate_t *tptr;
841 
842 	lun = xpt_path_lun_id(path);
843 	tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
844 	if (tptr == NULL)
845 		return (NULL);
846 	tptr->ts_lun = lun;
847 	SLIST_INIT(&tptr->atios);
848 	SLIST_INIT(&tptr->inots);
849 	STAILQ_INIT(&tptr->restart_queue);
850 	SLIST_INSERT_HEAD(&fc->lun_hash[LUN_HASH_FUNC(lun)], tptr, next);
851 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n");
852 	return (tptr);
853 }
854 
855 static void
856 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr)
857 {
858 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
859 	union ccb *ccb;
860 	inot_private_data_t *ntp;
861 
862 	while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) {
863 		SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
864 		ccb->ccb_h.status = CAM_REQ_ABORTED;
865 		xpt_done(ccb);
866 	};
867 	while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) {
868 		SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
869 		ccb->ccb_h.status = CAM_REQ_ABORTED;
870 		xpt_done(ccb);
871 	}
872 	while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) {
873 		isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0);
874 		STAILQ_REMOVE_HEAD(&tptr->restart_queue, next);
875 		isp_put_ntpd(isp, bus, ntp);
876 	}
877 	SLIST_REMOVE(&fc->lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], tptr, tstate, next);
878 	free(tptr, M_DEVBUF);
879 }
880 
881 static void
882 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb)
883 {
884 	tstate_t *tptr;
885 	int bus = XS_CHANNEL(ccb);
886 	target_id_t target = ccb->ccb_h.target_id;
887 	lun_id_t lun = ccb->ccb_h.target_lun;
888 
889 	/*
890 	 * We only support either target and lun both wildcard
891 	 * or target and lun both non-wildcard.
892 	 */
893 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
894 	    "enabling lun %jx\n", (uintmax_t)lun);
895 	if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
896 		ccb->ccb_h.status = CAM_LUN_INVALID;
897 		xpt_done(ccb);
898 		return;
899 	}
900 
901 	/* Create the state pointer. It should not already exist. */
902 	tptr = get_lun_statep(isp, bus, lun);
903 	if (tptr) {
904 		ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
905 		xpt_done(ccb);
906 		return;
907 	}
908 	tptr = create_lun_state(isp, bus, ccb->ccb_h.path);
909 	if (tptr == NULL) {
910 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
911 		xpt_done(ccb);
912 		return;
913 	}
914 
915 	ccb->ccb_h.status = CAM_REQ_CMP;
916 	xpt_done(ccb);
917 }
918 
919 static void
920 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb)
921 {
922 	tstate_t *tptr;
923 	int bus = XS_CHANNEL(ccb);
924 	target_id_t target = ccb->ccb_h.target_id;
925 	lun_id_t lun = ccb->ccb_h.target_lun;
926 
927 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
928 	    "disabling lun %jx\n", (uintmax_t)lun);
929 	if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
930 		ccb->ccb_h.status = CAM_LUN_INVALID;
931 		xpt_done(ccb);
932 		return;
933 	}
934 
935 	/* Find the state pointer. */
936 	if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) {
937 		ccb->ccb_h.status = CAM_PATH_INVALID;
938 		xpt_done(ccb);
939 		return;
940 	}
941 
942 	destroy_lun_state(isp, bus, tptr);
943 	ccb->ccb_h.status = CAM_REQ_CMP;
944 	xpt_done(ccb);
945 }
946 
947 static void
948 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
949 {
950 	int fctape, sendstatus, resid;
951 	fcparam *fcp;
952 	atio_private_data_t *atp;
953 	struct ccb_scsiio *cso;
954 	struct isp_ccbq *waitq;
955 	uint32_t dmaresult, handle, xfrlen, sense_length, tmp;
956 	ct7_entry_t local, *cto = &local;
957 
958 	isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len,
959 	    (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0));
960 
961 	waitq = &ISP_FC_PC(isp, XS_CHANNEL(ccb))->waitq;
962 	switch (how) {
963 	case FROM_CAM:
964 		/*
965 		 * Insert at the tail of the list, if any, waiting CTIO CCBs
966 		 */
967 		TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe);
968 		break;
969 	case FROM_TIMER:
970 	case FROM_SRR:
971 	case FROM_CTIO_DONE:
972 		TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
973 		break;
974 	}
975 
976 	while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) {
977 		TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe);
978 
979 		cso = &ccb->csio;
980 		xfrlen = cso->dxfer_len;
981 		if (xfrlen == 0) {
982 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
983 				ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n");
984 				ccb->ccb_h.status = CAM_REQ_INVALID;
985 				xpt_done(ccb);
986 				continue;
987 			}
988 		}
989 
990 		atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id);
991 		if (atp == NULL) {
992 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__);
993 			isp_dump_atpd(isp, XS_CHANNEL(ccb));
994 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
995 			xpt_done(ccb);
996 			continue;
997 		}
998 
999 		/*
1000 		 * Is this command a dead duck?
1001 		 */
1002 		if (atp->dead) {
1003 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id);
1004 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1005 			xpt_done(ccb);
1006 			continue;
1007 		}
1008 
1009 		/*
1010 		 * Check to make sure we're still in target mode.
1011 		 */
1012 		fcp = FCPARAM(isp, XS_CHANNEL(ccb));
1013 		if ((fcp->role & ISP_ROLE_TARGET) == 0) {
1014 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id);
1015 			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1016 			xpt_done(ccb);
1017 			continue;
1018 		}
1019 
1020 		/*
1021 		 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which
1022 		 * could be split into two CTIOs to split data and status).
1023 		 */
1024 		if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) {
1025 			isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags);
1026 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1027 			break;
1028 		}
1029 
1030 		/*
1031 		 * Does the initiator expect FC-Tape style responses?
1032 		 */
1033 		if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) {
1034 			fctape = 1;
1035 		} else {
1036 			fctape = 0;
1037 		}
1038 
1039 		/*
1040 		 * If we already did the data xfer portion of a CTIO that sends data
1041 		 * and status, don't do it again and do the status portion now.
1042 		 */
1043 		if (atp->sendst) {
1044 			isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
1045 			    cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit);
1046 			xfrlen = 0;	/* we already did the data transfer */
1047 			atp->sendst = 0;
1048 		}
1049 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1050 			sendstatus = 1;
1051 		} else {
1052 			sendstatus = 0;
1053 		}
1054 
1055 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1056 			KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?"));
1057 			/*
1058 			 * Sense length is not the entire sense data structure size. Periph
1059 			 * drivers don't seem to be setting sense_len to reflect the actual
1060 			 * size. We'll peek inside to get the right amount.
1061 			 */
1062 			sense_length = cso->sense_len;
1063 
1064 			/*
1065 			 * This 'cannot' happen
1066 			 */
1067 			if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) {
1068 				sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE;
1069 			}
1070 		} else {
1071 			sense_length = 0;
1072 		}
1073 
1074 		/*
1075 		 * Check for overflow
1076 		 */
1077 		tmp = atp->bytes_xfered + atp->bytes_in_transit;
1078 		if (xfrlen > 0 && tmp > atp->orig_datalen) {
1079 			isp_prt(isp, ISP_LOGERR,
1080 			    "%s: [0x%x] data overflow by %u bytes", __func__,
1081 			    cso->tag_id, tmp + xfrlen - atp->orig_datalen);
1082 			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1083 			xpt_done(ccb);
1084 			continue;
1085 		}
1086 		if (xfrlen > atp->orig_datalen - tmp) {
1087 			xfrlen = atp->orig_datalen - tmp;
1088 			if (xfrlen == 0 && !sendstatus) {
1089 				cso->resid = cso->dxfer_len;
1090 				ccb->ccb_h.status = CAM_REQ_CMP;
1091 				xpt_done(ccb);
1092 				continue;
1093 			}
1094 		}
1095 
1096 		memset(cto, 0, QENTRY_LEN);
1097 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1098 		cto->ct_header.rqs_entry_count = 1;
1099 		cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM;
1100 		ATPD_SET_SEQNO(cto, atp);
1101 		cto->ct_nphdl = atp->nphdl;
1102 		cto->ct_rxid = atp->tag;
1103 		cto->ct_iid_lo = atp->sid;
1104 		cto->ct_iid_hi = atp->sid >> 16;
1105 		cto->ct_oxid = atp->oxid;
1106 		cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb));
1107 		cto->ct_timeout = XS_TIME(ccb);
1108 		cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT;
1109 
1110 		/*
1111 		 * Mode 1, status, no data. Only possible when we are sending status, have
1112 		 * no data to transfer, and any sense data can fit into a ct7_entry_t.
1113 		 *
1114 		 * Mode 2, status, no data. We have to use this in the case that
1115 		 * the sense data won't fit into a ct7_entry_t.
1116 		 *
1117 		 */
1118 		if (sendstatus && xfrlen == 0) {
1119 			cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA;
1120 			resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit;
1121 			if (sense_length <= MAXRESPLEN_24XX) {
1122 				cto->ct_flags |= CT7_FLAG_MODE1;
1123 				cto->ct_scsi_status = cso->scsi_status;
1124 				if (resid < 0) {
1125 					cto->ct_resid = -resid;
1126 					cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8);
1127 				} else if (resid > 0) {
1128 					cto->ct_resid = resid;
1129 					cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8);
1130 				}
1131 				if (fctape) {
1132 					cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1133 				}
1134 				if (sense_length) {
1135 					cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
1136 					cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length;
1137 					memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length);
1138 				}
1139 			} else {
1140 				bus_addr_t addr;
1141 				fcp_rsp_iu_t rp;
1142 
1143 				if (atp->ests == NULL) {
1144 					atp->ests = isp_get_ecmd(isp);
1145 					if (atp->ests == NULL) {
1146 						TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1147 						break;
1148 					}
1149 				}
1150 				memset(&rp, 0, sizeof(rp));
1151 				if (fctape) {
1152 					cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1153 					rp.fcp_rsp_bits |= FCP_CONF_REQ;
1154 				}
1155 				cto->ct_flags |= CT7_FLAG_MODE2;
1156 				rp.fcp_rsp_scsi_status = cso->scsi_status;
1157 				if (resid < 0) {
1158 					rp.fcp_rsp_resid = -resid;
1159 					rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW;
1160 				} else if (resid > 0) {
1161 					rp.fcp_rsp_resid = resid;
1162 					rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW;
1163 				}
1164 				if (sense_length) {
1165 					rp.fcp_rsp_snslen = sense_length;
1166 					cto->ct_senselen = sense_length;
1167 					rp.fcp_rsp_bits |= FCP_SNSLEN_VALID;
1168 					isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1169 					memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length);
1170 				} else {
1171 					isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1172 				}
1173 				if (isp->isp_dblev & ISP_LOGTDEBUG1) {
1174 					isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests);
1175 				}
1176 				bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE);
1177 				addr = isp->isp_osinfo.ecmd_dma;
1178 				addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE);
1179 				isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests,
1180 				    (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length);
1181 				cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length;
1182 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr);
1183 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr);
1184 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
1185 			}
1186 			if (sense_length) {
1187 				isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__,
1188 				    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length,
1189 				    cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]);
1190 			} else {
1191 				isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__,
1192 				    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid);
1193 			}
1194 			atp->state = ATPD_STATE_LAST_CTIO;
1195 		}
1196 
1197 		/*
1198 		 * Mode 0 data transfers, *possibly* with status.
1199 		 */
1200 		if (xfrlen != 0) {
1201 			cto->ct_flags |= CT7_FLAG_MODE0;
1202 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1203 				cto->ct_flags |= CT7_DATA_IN;
1204 			} else {
1205 				cto->ct_flags |= CT7_DATA_OUT;
1206 			}
1207 
1208 			cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit;
1209 			cto->rsp.m0.ct_xfrlen = xfrlen;
1210 
1211 #ifdef	DEBUG
1212 			if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) {
1213 				isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2));
1214 				ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0;
1215 				cto->rsp.m0.ct_xfrlen -= xfrlen >> 2;
1216 			}
1217 #endif
1218 			if (sendstatus) {
1219 				resid = atp->orig_datalen - atp->bytes_xfered - xfrlen;
1220 				if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) {
1221 					cto->ct_flags |= CT7_SENDSTATUS;
1222 					atp->state = ATPD_STATE_LAST_CTIO;
1223 					if (fctape) {
1224 						cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1225 					}
1226 				} else {
1227 					atp->sendst = 1;	/* send status later */
1228 					cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM;
1229 					atp->state = ATPD_STATE_CTIO;
1230 				}
1231 			} else {
1232 				atp->state = ATPD_STATE_CTIO;
1233 			}
1234 			isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__,
1235 			    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered);
1236 		}
1237 
1238 		if (isp_get_pcmd(isp, ccb)) {
1239 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n");
1240 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1241 			break;
1242 		}
1243 		handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET);
1244 		if (handle == 0) {
1245 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
1246 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1247 			isp_free_pcmd(isp, ccb);
1248 			break;
1249 		}
1250 		atp->bytes_in_transit += xfrlen;
1251 		PISP_PCMD(ccb)->datalen = xfrlen;
1252 
1253 		/*
1254 		 * Call the dma setup routines for this entry (and any subsequent
1255 		 * CTIOs) if there's data to move, and then tell the f/w it's got
1256 		 * new things to play with. As with isp_start's usage of DMA setup,
1257 		 * any swizzling is done in the machine dependent layer. Because
1258 		 * of this, we put the request onto the queue area first in native
1259 		 * format.
1260 		 */
1261 		cto->ct_syshandle = handle;
1262 		dmaresult = ISP_DMASETUP(isp, cso, cto);
1263 		if (dmaresult != 0) {
1264 			isp_destroy_handle(isp, handle);
1265 			isp_free_pcmd(isp, ccb);
1266 			if (dmaresult == CMD_EAGAIN) {
1267 				TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1268 				break;
1269 			}
1270 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1271 			xpt_done(ccb);
1272 			continue;
1273 		}
1274 		ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
1275 		if (xfrlen) {
1276 			ccb->ccb_h.spriv_field0 = atp->bytes_xfered;
1277 		} else {
1278 			ccb->ccb_h.spriv_field0 = ~0;
1279 		}
1280 		atp->ctcnt++;
1281 		atp->seqno++;
1282 	}
1283 }
1284 
1285 static void
1286 isp_refire_notify_ack(void *arg)
1287 {
1288 	isp_tna_t *tp  = arg;
1289 	ispsoftc_t *isp = tp->isp;
1290 
1291 	ISP_ASSERT_LOCKED(isp);
1292 	if (isp_notify_ack(isp, tp->not)) {
1293 		callout_schedule(&tp->timer, 5);
1294 	} else {
1295 		free(tp, M_DEVBUF);
1296 	}
1297 }
1298 
1299 
1300 static void
1301 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb)
1302 {
1303 
1304 	isp_rq_check_below(isp);
1305 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1306 	xpt_done(ccb);
1307 }
1308 
1309 static void
1310 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
1311 {
1312 	int cdbxlen;
1313 	lun_id_t lun;
1314 	uint16_t chan, nphdl = NIL_HANDLE;
1315 	uint32_t did, sid;
1316 	fcportdb_t *lp;
1317 	tstate_t *tptr;
1318 	struct ccb_accept_tio *atiop;
1319 	atio_private_data_t *atp = NULL;
1320 	atio_private_data_t *oatp;
1321 	inot_private_data_t *ntp;
1322 
1323 	did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
1324 	sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1325 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun));
1326 
1327 	if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
1328 		/* Channel has to be derived from D_ID */
1329 		isp_find_chan_by_did(isp, did, &chan);
1330 		if (chan == ISP_NOCHAN) {
1331 			isp_prt(isp, ISP_LOGWARN,
1332 			    "%s: [RX_ID 0x%x] D_ID %x not found on any channel",
1333 			    __func__, aep->at_rxid, did);
1334 			isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN,
1335 			    ECMD_TERMINATE, 0);
1336 			return;
1337 		}
1338 	} else {
1339 		chan = 0;
1340 	}
1341 
1342 	/*
1343 	 * Find the PDB entry for this initiator
1344 	 */
1345 	if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) {
1346 		/*
1347 		 * If we're not in the port database terminate the exchange.
1348 		 */
1349 		isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already",
1350 		    __func__, aep->at_rxid, did, chan, sid);
1351 		isp_dump_portdb(isp, chan);
1352 		isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0);
1353 		return;
1354 	}
1355 	nphdl = lp->handle;
1356 
1357 	/*
1358 	 * Get the tstate pointer
1359 	 */
1360 	tptr = get_lun_statep(isp, chan, lun);
1361 	if (tptr == NULL) {
1362 		tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD);
1363 		if (tptr == NULL) {
1364 			isp_prt(isp, ISP_LOGWARN,
1365 			    "%s: [0x%x] no state pointer for lun %jx or wildcard",
1366 			    __func__, aep->at_rxid, (uintmax_t)lun);
1367 			if (lun == 0) {
1368 				isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1369 			} else {
1370 				isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
1371 			}
1372 			return;
1373 		}
1374 	}
1375 
1376 	/*
1377 	 * Start any commands pending resources first.
1378 	 */
1379 	if (isp_atio_restart(isp, chan, tptr))
1380 		goto noresrc;
1381 
1382 	/*
1383 	 * If the f/w is out of resources, just send a BUSY status back.
1384 	 */
1385 	if (aep->at_rxid == AT7_NORESRC_RXID) {
1386 		isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1387 		return;
1388 	}
1389 
1390 	/*
1391 	 * If we're out of resources, just send a BUSY status back.
1392 	 */
1393 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1394 	if (atiop == NULL) {
1395 		isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid);
1396 		goto noresrc;
1397 	}
1398 
1399 	oatp = isp_find_atpd(isp, chan, aep->at_rxid);
1400 	if (oatp) {
1401 		isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 :
1402 		    ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle "
1403 		    "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d",
1404 		    aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state);
1405 		/*
1406 		 * It's not a "no resource" condition- but we can treat it like one
1407 		 */
1408 		goto noresrc;
1409 	}
1410 	atp = isp_get_atpd(isp, chan, aep->at_rxid);
1411 	if (atp == NULL) {
1412 		isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
1413 		isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1414 		return;
1415 	}
1416 	atp->word3 = lp->prli_word3;
1417 	atp->state = ATPD_STATE_ATIO;
1418 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1419 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n");
1420 	atiop->init_id = FC_PORTDB_TGT(isp, chan, lp);
1421 	atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1422 	atiop->ccb_h.target_lun = lun;
1423 	atiop->sense_len = 0;
1424 	cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT;
1425 	if (cdbxlen) {
1426 		isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored");
1427 	}
1428 	cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb);
1429 	ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen);
1430 	atiop->cdb_len = cdbxlen;
1431 	atiop->ccb_h.status = CAM_CDB_RECVD;
1432 	atiop->tag_id = atp->tag;
1433 	switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) {
1434 	case FCP_CMND_TASK_ATTR_SIMPLE:
1435 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1436 		atiop->tag_action = MSG_SIMPLE_TASK;
1437 		break;
1438 	case FCP_CMND_TASK_ATTR_HEAD:
1439 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1440 		atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK;
1441 		break;
1442 	case FCP_CMND_TASK_ATTR_ORDERED:
1443 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1444 		atiop->tag_action = MSG_ORDERED_TASK;
1445 		break;
1446 	case FCP_CMND_TASK_ATTR_ACA:
1447 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1448 		atiop->tag_action = MSG_ACA_TASK;
1449 		break;
1450 	case FCP_CMND_TASK_ATTR_UNTAGGED:
1451 	default:
1452 		atiop->tag_action = 0;
1453 		break;
1454 	}
1455 	atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute &
1456 	    FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT;
1457 	atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl;
1458 	atp->bytes_xfered = 0;
1459 	atp->lun = lun;
1460 	atp->nphdl = nphdl;
1461 	atp->sid = sid;
1462 	atp->did = did;
1463 	atp->oxid = aep->at_hdr.ox_id;
1464 	atp->rxid = aep->at_hdr.rx_id;
1465 	atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
1466 	atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK;
1467 	atp->state = ATPD_STATE_CAM;
1468 	isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u",
1469 	    aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen);
1470 	xpt_done((union ccb *)atiop);
1471 	return;
1472 noresrc:
1473 	KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__));
1474 	ntp = isp_get_ntpd(isp, chan);
1475 	if (ntp == NULL) {
1476 		isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1477 		return;
1478 	}
1479 	memcpy(ntp->data, aep, QENTRY_LEN);
1480 	STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next);
1481 }
1482 
1483 
1484 /*
1485  * Handle starting an SRR (sequence retransmit request)
1486  * We get here when we've gotten the immediate notify
1487  * and the return of all outstanding CTIOs for this
1488  * transaction.
1489  */
1490 static void
1491 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp)
1492 {
1493 	in_fcentry_24xx_t *inot;
1494 	uint32_t srr_off, ccb_off, ccb_len, ccb_end;
1495 	union ccb *ccb;
1496 
1497 	inot = (in_fcentry_24xx_t *)atp->srr;
1498 	srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16);
1499 	ccb = atp->srr_ccb;
1500 	atp->srr_ccb = NULL;
1501 	atp->nsrr++;
1502 	if (ccb == NULL) {
1503 		isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag);
1504 		goto fail;
1505 	}
1506 
1507 	ccb_off = ccb->ccb_h.spriv_field0;
1508 	ccb_len = ccb->csio.dxfer_len;
1509         ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len;
1510 
1511 	switch (inot->in_srr_iu) {
1512 	case R_CTL_INFO_SOLICITED_DATA:
1513 		/*
1514 		 * We have to restart a FCP_DATA data out transaction
1515 		 */
1516 		atp->sendst = 0;
1517 		atp->bytes_xfered = srr_off;
1518 		if (ccb_len == 0) {
1519 			isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off);
1520 			goto mdp;
1521 		}
1522  		if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) {
1523 			isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1524 			goto mdp;
1525 		}
1526 		isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1527 		break;
1528 	case R_CTL_INFO_COMMAND_STATUS:
1529 		isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag);
1530 		atp->sendst = 1;
1531 		/*
1532 		 * We have to restart a FCP_RSP IU transaction
1533 		 */
1534 		break;
1535 	case R_CTL_INFO_DATA_DESCRIPTOR:
1536 		/*
1537 		 * We have to restart an FCP DATA in transaction
1538 		 */
1539 		isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping");
1540 		goto fail;
1541 
1542 	default:
1543 		isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu);
1544 		goto fail;
1545 	}
1546 
1547 	/*
1548 	 * We can't do anything until this is acked, so we might as well start it now.
1549 	 * We aren't going to do the usual asynchronous ack issue because we need
1550 	 * to make sure this gets on the wire first.
1551 	 */
1552 	if (isp_notify_ack(isp, inot)) {
1553 		isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1554 		goto fail;
1555 	}
1556 	isp_target_start_ctio(isp, ccb, FROM_SRR);
1557 	return;
1558 fail:
1559 	inot->in_reserved = 1;
1560 	isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1561 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1562 	ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1563 	isp_complete_ctio(isp, ccb);
1564 	return;
1565 mdp:
1566 	if (isp_notify_ack(isp, inot)) {
1567 		isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1568 		goto fail;
1569 	}
1570 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1571 	ccb->ccb_h.status |= CAM_MESSAGE_RECV;
1572 	/*
1573 	 * This is not a strict interpretation of MDP, but it's close
1574 	 */
1575 	ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16];
1576 	ccb->csio.msg_len = 7;
1577 	ccb->csio.msg_ptr[0] = MSG_EXTENDED;
1578 	ccb->csio.msg_ptr[1] = 5;
1579 	ccb->csio.msg_ptr[2] = 0;	/* modify data pointer */
1580 	ccb->csio.msg_ptr[3] = srr_off >> 24;
1581 	ccb->csio.msg_ptr[4] = srr_off >> 16;
1582 	ccb->csio.msg_ptr[5] = srr_off >> 8;
1583 	ccb->csio.msg_ptr[6] = srr_off;
1584 	isp_complete_ctio(isp, ccb);
1585 }
1586 
1587 
1588 static void
1589 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify)
1590 {
1591 	in_fcentry_24xx_t *inot = notify->nt_lreserved;
1592 	atio_private_data_t *atp;
1593 	uint32_t tag = notify->nt_tagval & 0xffffffff;
1594 
1595 	atp = isp_find_atpd(isp, notify->nt_channel, tag);
1596 	if (atp == NULL) {
1597 		isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify",
1598 		    __func__, tag);
1599 		isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1600 		return;
1601 	}
1602 	atp->srr_notify_rcvd = 1;
1603 	memcpy(atp->srr, inot, sizeof (atp->srr));
1604 	isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x",
1605 	    inot->in_rxid, inot->in_flags, inot->in_srr_iu,
1606 	    ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo);
1607 	if (atp->srr_ccb)
1608 		isp_handle_srr_start(isp, atp);
1609 }
1610 
1611 static void
1612 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct)
1613 {
1614 	union ccb *ccb;
1615 	int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0;
1616 	atio_private_data_t *atp = NULL;
1617 	int bus;
1618 	uint32_t handle, data_requested, resid;
1619 
1620 	handle = ct->ct_syshandle;
1621 	ccb = isp_find_xs(isp, handle);
1622 	if (ccb == NULL) {
1623 		isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct);
1624 		return;
1625 	}
1626 	isp_destroy_handle(isp, handle);
1627 	resid = data_requested = PISP_PCMD(ccb)->datalen;
1628 	isp_free_pcmd(isp, ccb);
1629 
1630 	bus = XS_CHANNEL(ccb);
1631 	atp = isp_find_atpd(isp, bus, ct->ct_rxid);
1632 	if (atp == NULL) {
1633 		/*
1634 		 * XXX: isp_clear_commands() generates fake CTIO with zero
1635 		 * ct_rxid value, filling only ct_syshandle.  Workaround
1636 		 * that using tag_id from the CCB, pointed by ct_syshandle.
1637 		 */
1638 		atp = isp_find_atpd(isp, bus, ccb->csio.tag_id);
1639 	}
1640 	if (atp == NULL) {
1641 		isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id);
1642 		return;
1643 	}
1644 	KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero"));
1645 	atp->bytes_in_transit -= data_requested;
1646 	atp->ctcnt -= 1;
1647 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1648 
1649 	if (ct->ct_nphdl == CT7_SRR) {
1650 		atp->srr_ccb = ccb;
1651 		if (atp->srr_notify_rcvd)
1652 			isp_handle_srr_start(isp, atp);
1653 		return;
1654 	}
1655 	if (ct->ct_nphdl == CT_HBA_RESET) {
1656 		sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1657 		    (atp->sendst == 0);
1658 		failure = CAM_UNREC_HBA_ERROR;
1659 	} else {
1660 		sentstatus = ct->ct_flags & CT7_SENDSTATUS;
1661 		ok = (ct->ct_nphdl == CT7_OK);
1662 		notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0;
1663 		if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA)
1664 			resid = ct->ct_resid;
1665 	}
1666 	isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct),
1667 	   notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
1668 	if (ok) {
1669 		if (data_requested > 0) {
1670 			atp->bytes_xfered += data_requested - resid;
1671 			ccb->csio.resid = ccb->csio.dxfer_len -
1672 			    (data_requested - resid);
1673 		}
1674 		if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE))
1675 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1676 		ccb->ccb_h.status |= CAM_REQ_CMP;
1677 	} else {
1678 		notify_cam = 1;
1679 		if (failure == CAM_UNREC_HBA_ERROR)
1680 			ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
1681 		else
1682 			ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1683 	}
1684 	atp->state = ATPD_STATE_PDON;
1685 
1686 	/*
1687 	 * We never *not* notify CAM when there has been any error (ok == 0),
1688 	 * so we never need to do an ATIO putback if we're not notifying CAM.
1689 	 */
1690 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)",
1691 	    (sentstatus)? "  FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0);
1692 	if (notify_cam == 0) {
1693 		if (atp->sendst) {
1694 			isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE);
1695 		}
1696 		return;
1697 	}
1698 
1699 	/*
1700 	 * We are done with this ATIO if we successfully sent status.
1701 	 * In all other cases expect either another CTIO or XPT_ABORT.
1702 	 */
1703 	if (ok && sentstatus)
1704 		isp_put_atpd(isp, bus, atp);
1705 
1706 	/*
1707 	 * We're telling CAM we're done with this CTIO transaction.
1708 	 *
1709 	 * 24XX cards never need an ATIO put back.
1710 	 */
1711 	isp_complete_ctio(isp, ccb);
1712 }
1713 
1714 static int
1715 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp)
1716 {
1717 	ct7_entry_t local, *cto = &local;
1718 
1719 	if (isp->isp_state != ISP_RUNSTATE) {
1720 		isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL);
1721 		return (0);
1722 	}
1723 
1724 	/*
1725 	 * This case is for a Task Management Function, which shows up as an ATIO7 entry.
1726 	 */
1727 	if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) {
1728 		at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved;
1729 		fcportdb_t *lp;
1730 		uint32_t sid;
1731 		uint16_t nphdl;
1732 
1733 		sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1734 		if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) {
1735 			nphdl = lp->handle;
1736 		} else {
1737 			nphdl = NIL_HANDLE;
1738 		}
1739 		ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1740 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1741 		cto->ct_header.rqs_entry_count = 1;
1742 		cto->ct_nphdl = nphdl;
1743 		cto->ct_rxid = aep->at_rxid;
1744 		cto->ct_vpidx = mp->nt_channel;
1745 		cto->ct_iid_lo = sid;
1746 		cto->ct_iid_hi = sid >> 16;
1747 		cto->ct_oxid = aep->at_hdr.ox_id;
1748 		cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
1749 		cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
1750 		if (rsp != 0) {
1751 			cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
1752 			cto->rsp.m1.ct_resplen = 4;
1753 			ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
1754 			cto->rsp.m1.ct_resp[0] = rsp & 0xff;
1755 			cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff;
1756 			cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff;
1757 			cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff;
1758 		}
1759 		return (isp_target_put_entry(isp, &cto));
1760 	}
1761 
1762 	/*
1763 	 * This case is for a responding to an ABTS frame
1764 	 */
1765 	if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1766 
1767 		/*
1768 		 * Overload nt_need_ack here to mark whether we've terminated the associated command.
1769 		 */
1770 		if (mp->nt_need_ack) {
1771 			abts_t *abts = (abts_t *)mp->nt_lreserved;
1772 
1773 			ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1774 			isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task);
1775 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1776 			cto->ct_header.rqs_entry_count = 1;
1777 			cto->ct_nphdl = mp->nt_nphdl;
1778 			cto->ct_rxid = abts->abts_rxid_task;
1779 			cto->ct_iid_lo = mp->nt_sid;
1780 			cto->ct_iid_hi = mp->nt_sid >> 16;
1781 			cto->ct_oxid = abts->abts_ox_id;
1782 			cto->ct_vpidx = mp->nt_channel;
1783 			cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
1784 			if (isp_target_put_entry(isp, cto)) {
1785 				return (ENOMEM);
1786 			}
1787 			mp->nt_need_ack = 0;
1788 		}
1789 		if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) {
1790 			return (ENOMEM);
1791 		} else {
1792 			return (0);
1793 		}
1794 	}
1795 
1796 	/*
1797 	 * General purpose acknowledgement
1798 	 */
1799 	if (mp->nt_need_ack) {
1800 		isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL);
1801 		/*
1802 		 * Don't need to use the guaranteed send because the caller can retry
1803 		 */
1804 		return (isp_notify_ack(isp, mp->nt_lreserved));
1805 	}
1806 	return (0);
1807 }
1808 
1809 /*
1810  * Handle task management functions.
1811  *
1812  * We show up here with a notify structure filled out.
1813  *
1814  * The nt_lreserved tag points to the original queue entry
1815  */
1816 static void
1817 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
1818 {
1819 	tstate_t *tptr;
1820 	fcportdb_t *lp;
1821 	struct ccb_immediate_notify *inot;
1822 	inot_private_data_t *ntp = NULL;
1823 	atio_private_data_t *atp;
1824 	lun_id_t lun;
1825 
1826 	isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid  0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode,
1827 	    notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun);
1828 	if (notify->nt_lun == LUN_ANY) {
1829 		if (notify->nt_tagval == TAG_ANY) {
1830 			lun = CAM_LUN_WILDCARD;
1831 		} else {
1832 			atp = isp_find_atpd(isp, notify->nt_channel,
1833 			    notify->nt_tagval & 0xffffffff);
1834 			lun = atp ? atp->lun : CAM_LUN_WILDCARD;
1835 		}
1836 	} else {
1837 		lun = notify->nt_lun;
1838 	}
1839 	tptr = get_lun_statep(isp, notify->nt_channel, lun);
1840 	if (tptr == NULL) {
1841 		tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD);
1842 		if (tptr == NULL) {
1843 			isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1844 			goto bad;
1845 		}
1846 	}
1847 	inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
1848 	if (inot == NULL) {
1849 		isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1850 		goto bad;
1851 	}
1852 
1853 	inot->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1854 	inot->ccb_h.target_lun = lun;
1855 	if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 &&
1856 	    isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) {
1857 		inot->initiator_id = CAM_TARGET_WILDCARD;
1858 	} else {
1859 		inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp);
1860 	}
1861 	inot->seq_id = notify->nt_tagval;
1862 	inot->tag_id = notify->nt_tagval >> 32;
1863 
1864 	switch (notify->nt_ncode) {
1865 	case NT_ABORT_TASK:
1866 		isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id);
1867 		inot->arg = MSG_ABORT_TASK;
1868 		break;
1869 	case NT_ABORT_TASK_SET:
1870 		isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY);
1871 		inot->arg = MSG_ABORT_TASK_SET;
1872 		break;
1873 	case NT_CLEAR_ACA:
1874 		inot->arg = MSG_CLEAR_ACA;
1875 		break;
1876 	case NT_CLEAR_TASK_SET:
1877 		inot->arg = MSG_CLEAR_TASK_SET;
1878 		break;
1879 	case NT_LUN_RESET:
1880 		inot->arg = MSG_LOGICAL_UNIT_RESET;
1881 		break;
1882 	case NT_TARGET_RESET:
1883 		inot->arg = MSG_TARGET_RESET;
1884 		break;
1885 	case NT_QUERY_TASK_SET:
1886 		inot->arg = MSG_QUERY_TASK_SET;
1887 		break;
1888 	case NT_QUERY_ASYNC_EVENT:
1889 		inot->arg = MSG_QUERY_ASYNC_EVENT;
1890 		break;
1891 	default:
1892 		isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun);
1893 		goto bad;
1894 	}
1895 
1896 	ntp = isp_get_ntpd(isp, notify->nt_channel);
1897 	if (ntp == NULL) {
1898 		isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__);
1899 		goto bad;
1900 	}
1901 	ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t));
1902 	if (notify->nt_lreserved) {
1903 		ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN);
1904 		ntp->nt.nt_lreserved = &ntp->data;
1905 	}
1906 	ntp->seq_id = notify->nt_tagval;
1907 	ntp->tag_id = notify->nt_tagval >> 32;
1908 
1909 	SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
1910 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n");
1911 	inot->ccb_h.status = CAM_MESSAGE_RECV;
1912 	xpt_done((union ccb *)inot);
1913 	return;
1914 bad:
1915 	if (notify->nt_need_ack) {
1916 		if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1917 			if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) {
1918 				isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK");
1919 			}
1920 		} else {
1921 			isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved);
1922 		}
1923 	}
1924 }
1925 
1926 static void
1927 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id)
1928 {
1929 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
1930 	atio_private_data_t *atp;
1931 	inot_private_data_t *ntp, *tmp;
1932 	uint32_t this_tag_id;
1933 
1934 	/*
1935 	 * First, clean any commands pending restart
1936 	 */
1937 	STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) {
1938 		this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid;
1939 		if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
1940 			isp_endcmd(isp, ntp->data, NIL_HANDLE, chan,
1941 			    ECMD_TERMINATE, 0);
1942 			isp_put_ntpd(isp, chan, ntp);
1943 			STAILQ_REMOVE(&tptr->restart_queue, ntp,
1944 			    inot_private_data, next);
1945 		}
1946 	}
1947 
1948 	/*
1949 	 * Now mark other ones dead as well.
1950 	 */
1951 	for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
1952 		if (atp->lun != tptr->ts_lun)
1953 			continue;
1954 		if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id)
1955 			atp->dead = 1;
1956 	}
1957 }
1958 #endif
1959 
1960 static void
1961 isp_poll(struct cam_sim *sim)
1962 {
1963 	ispsoftc_t *isp = cam_sim_softc(sim);
1964 
1965 	ISP_RUN_ISR(isp);
1966 }
1967 
1968 
1969 static void
1970 isp_watchdog(void *arg)
1971 {
1972 	struct ccb_scsiio *xs = arg;
1973 	ispsoftc_t *isp;
1974 	uint32_t ohandle = ISP_HANDLE_FREE, handle;
1975 
1976 	isp = XS_ISP(xs);
1977 
1978 	handle = isp_find_handle(isp, xs);
1979 
1980 	/*
1981 	 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere.
1982 	 */
1983 	if (handle != ISP_HANDLE_FREE) {
1984 		ISP_RUN_ISR(isp);
1985 		ohandle = handle;
1986 		handle = isp_find_handle(isp, xs);
1987 	}
1988 	if (handle != ISP_HANDLE_FREE) {
1989 		/*
1990 		 * Try and make sure the command is really dead before
1991 		 * we release the handle (and DMA resources) for reuse.
1992 		 *
1993 		 * If we are successful in aborting the command then
1994 		 * we're done here because we'll get the command returned
1995 		 * back separately.
1996 		 */
1997 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
1998 			return;
1999 		}
2000 
2001 		/*
2002 		 * Note that after calling the above, the command may in
2003 		 * fact have been completed.
2004 		 */
2005 		xs = isp_find_xs(isp, handle);
2006 
2007 		/*
2008 		 * If the command no longer exists, then we won't
2009 		 * be able to find the xs again with this handle.
2010 		 */
2011 		if (xs == NULL) {
2012 			return;
2013 		}
2014 
2015 		/*
2016 		 * After this point, the command is really dead.
2017 		 */
2018 		ISP_DMAFREE(isp, xs);
2019 		isp_destroy_handle(isp, handle);
2020 		isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
2021 		XS_SETERR(xs, CAM_CMD_TIMEOUT);
2022 		isp_done(xs);
2023 	} else {
2024 		if (ohandle != ISP_HANDLE_FREE) {
2025 			isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle);
2026 		} else {
2027 			isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__);
2028 		}
2029 	}
2030 }
2031 
2032 static void
2033 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2034 {
2035 	union ccb *ccb;
2036 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2037 
2038 	/*
2039 	 * Allocate a CCB, create a wildcard path for this target and schedule a rescan.
2040 	 */
2041 	ccb = xpt_alloc_ccb_nowait();
2042 	if (ccb == NULL) {
2043 		isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan);
2044 		return;
2045 	}
2046 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim),
2047 	    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2048 		isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan");
2049 		xpt_free_ccb(ccb);
2050 		return;
2051 	}
2052 	xpt_rescan(ccb);
2053 }
2054 
2055 static void
2056 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2057 {
2058 	struct cam_path *tp;
2059 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2060 
2061 	if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2062 		xpt_async(AC_LOST_DEVICE, tp, NULL);
2063 		xpt_free_path(tp);
2064 	}
2065 }
2066 
2067 /*
2068  * Gone Device Timer Function- when we have decided that a device has gone
2069  * away, we wait a specific period of time prior to telling the OS it has
2070  * gone away.
2071  *
2072  * This timer function fires once a second and then scans the port database
2073  * for devices that are marked dead but still have a virtual target assigned.
2074  * We decrement a counter for that port database entry, and when it hits zero,
2075  * we tell the OS the device has gone away.
2076  */
2077 static void
2078 isp_gdt(void *arg)
2079 {
2080 	struct isp_fc *fc = arg;
2081 	taskqueue_enqueue(taskqueue_thread, &fc->gtask);
2082 }
2083 
2084 static void
2085 isp_gdt_task(void *arg, int pending)
2086 {
2087 	struct isp_fc *fc = arg;
2088 	ispsoftc_t *isp = fc->isp;
2089 	int chan = fc - ISP_FC_PC(isp, 0);
2090 	fcportdb_t *lp;
2091 	struct ac_contract ac;
2092 	struct ac_device_changed *adc;
2093 	int dbidx, more_to_do = 0;
2094 
2095 	ISP_LOCK(isp);
2096 	isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan);
2097 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2098 		lp = &FCPARAM(isp, chan)->portdb[dbidx];
2099 
2100 		if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
2101 			continue;
2102 		}
2103 		if (lp->gone_timer != 0) {
2104 			lp->gone_timer -= 1;
2105 			more_to_do++;
2106 			continue;
2107 		}
2108 		isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout");
2109 		if (lp->is_target) {
2110 			lp->is_target = 0;
2111 			isp_make_gone(isp, lp, chan, dbidx);
2112 		}
2113 		if (lp->is_initiator) {
2114 			lp->is_initiator = 0;
2115 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2116 			adc = (struct ac_device_changed *) ac.contract_data;
2117 			adc->wwpn = lp->port_wwn;
2118 			adc->port = lp->portid;
2119 			adc->target = dbidx;
2120 			adc->arrived = 0;
2121 			xpt_async(AC_CONTRACT, fc->path, &ac);
2122 		}
2123 		lp->state = FC_PORTDB_STATE_NIL;
2124 	}
2125 	if (fc->ready) {
2126 		if (more_to_do) {
2127 			callout_reset(&fc->gdt, hz, isp_gdt, fc);
2128 		} else {
2129 			callout_deactivate(&fc->gdt);
2130 			isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime);
2131 		}
2132 	}
2133 	ISP_UNLOCK(isp);
2134 }
2135 
2136 /*
2137  * When loop goes down we remember the time and freeze CAM command queue.
2138  * During some time period we are trying to reprobe the loop.  But if we
2139  * fail, we tell the OS that devices have gone away and drop the freeze.
2140  *
2141  * We don't clear the devices out of our port database because, when loop
2142  * come back up, we have to do some actual cleanup with the chip at that
2143  * point (implicit PLOGO, e.g., to get the chip's port database state right).
2144  */
2145 static void
2146 isp_loop_changed(ispsoftc_t *isp, int chan)
2147 {
2148 	fcparam *fcp = FCPARAM(isp, chan);
2149 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2150 
2151 	if (fc->loop_down_time)
2152 		return;
2153 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan);
2154 	if (fcp->role & ISP_ROLE_INITIATOR)
2155 		isp_freeze_loopdown(isp, chan);
2156 	fc->loop_down_time = time_uptime;
2157 	wakeup(fc);
2158 }
2159 
2160 static void
2161 isp_loop_up(ispsoftc_t *isp, int chan)
2162 {
2163 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2164 
2165 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan);
2166 	fc->loop_seen_once = 1;
2167 	fc->loop_down_time = 0;
2168 	isp_unfreeze_loopdown(isp, chan);
2169 }
2170 
2171 static void
2172 isp_loop_dead(ispsoftc_t *isp, int chan)
2173 {
2174 	fcparam *fcp = FCPARAM(isp, chan);
2175 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2176 	fcportdb_t *lp;
2177 	struct ac_contract ac;
2178 	struct ac_device_changed *adc;
2179 	int dbidx, i;
2180 
2181 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan);
2182 
2183 	/*
2184 	 * Notify to the OS all targets who we now consider have departed.
2185 	 */
2186 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2187 		lp = &fcp->portdb[dbidx];
2188 
2189 		if (lp->state == FC_PORTDB_STATE_NIL)
2190 			continue;
2191 
2192 		for (i = 0; i < ISP_HANDLE_NUM(isp); i++) {
2193 			struct ccb_scsiio *xs;
2194 
2195 			if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) {
2196 				continue;
2197 			}
2198 			if ((xs = isp->isp_xflist[i].cmd) == NULL) {
2199 				continue;
2200                         }
2201 			if (dbidx != XS_TGT(xs)) {
2202 				continue;
2203 			}
2204 			isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout",
2205 			    isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2206 			    (uintmax_t)XS_LUN(xs));
2207 
2208 			/*
2209 			 * Just like in isp_watchdog, abort the outstanding
2210 			 * command or immediately free its resources if it is
2211 			 * not active
2212 			 */
2213 			if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
2214 				continue;
2215 			}
2216 
2217 			ISP_DMAFREE(isp, xs);
2218 			isp_destroy_handle(isp, isp->isp_xflist[i].handle);
2219 			isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed",
2220 			    isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2221 			    (uintmax_t)XS_LUN(xs));
2222 			XS_SETERR(xs, HBA_BUSRESET);
2223 			isp_done(xs);
2224 		}
2225 
2226 		isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout");
2227 		if (lp->is_target) {
2228 			lp->is_target = 0;
2229 			isp_make_gone(isp, lp, chan, dbidx);
2230 		}
2231 		if (lp->is_initiator) {
2232 			lp->is_initiator = 0;
2233 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2234 			adc = (struct ac_device_changed *) ac.contract_data;
2235 			adc->wwpn = lp->port_wwn;
2236 			adc->port = lp->portid;
2237 			adc->target = dbidx;
2238 			adc->arrived = 0;
2239 			xpt_async(AC_CONTRACT, fc->path, &ac);
2240 		}
2241 	}
2242 
2243 	isp_unfreeze_loopdown(isp, chan);
2244 	fc->loop_down_time = 0;
2245 }
2246 
2247 static void
2248 isp_kthread(void *arg)
2249 {
2250 	struct isp_fc *fc = arg;
2251 	ispsoftc_t *isp = fc->isp;
2252 	int chan = fc - ISP_FC_PC(isp, 0);
2253 	int slp = 0, d;
2254 	int lb, lim;
2255 
2256 	ISP_LOCK(isp);
2257 	while (isp->isp_osinfo.is_exiting == 0) {
2258 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2259 		    "Chan %d Checking FC state", chan);
2260 		lb = isp_fc_runstate(isp, chan, 250000);
2261 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2262 		    "Chan %d FC got to %s state", chan,
2263 		    isp_fc_loop_statename(lb));
2264 
2265 		/*
2266 		 * Our action is different based upon whether we're supporting
2267 		 * Initiator mode or not. If we are, we might freeze the simq
2268 		 * when loop is down and set all sorts of different delays to
2269 		 * check again.
2270 		 *
2271 		 * If not, we simply just wait for loop to come up.
2272 		 */
2273 		if (lb == LOOP_READY || lb < 0) {
2274 			slp = 0;
2275 		} else {
2276 			/*
2277 			 * If we've never seen loop up and we've waited longer
2278 			 * than quickboot time, or we've seen loop up but we've
2279 			 * waited longer than loop_down_limit, give up and go
2280 			 * to sleep until loop comes up.
2281 			 */
2282 			if (fc->loop_seen_once == 0)
2283 				lim = isp_quickboot_time;
2284 			else
2285 				lim = fc->loop_down_limit;
2286 			d = time_uptime - fc->loop_down_time;
2287 			if (d >= lim)
2288 				slp = 0;
2289 			else if (d < 10)
2290 				slp = 1;
2291 			else if (d < 30)
2292 				slp = 5;
2293 			else if (d < 60)
2294 				slp = 10;
2295 			else if (d < 120)
2296 				slp = 20;
2297 			else
2298 				slp = 30;
2299 		}
2300 
2301 		if (slp == 0) {
2302 			if (lb == LOOP_READY)
2303 				isp_loop_up(isp, chan);
2304 			else
2305 				isp_loop_dead(isp, chan);
2306 		}
2307 
2308 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2309 		    "Chan %d sleep for %d seconds", chan, slp);
2310 		msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz);
2311 	}
2312 	fc->num_threads -= 1;
2313 	wakeup(&fc->num_threads);
2314 	ISP_UNLOCK(isp);
2315 	kthread_exit();
2316 }
2317 
2318 #ifdef	ISP_TARGET_MODE
2319 static void
2320 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
2321 {
2322 	atio_private_data_t *atp;
2323 	union ccb *accb = ccb->cab.abort_ccb;
2324 	struct ccb_hdr *sccb;
2325 	tstate_t *tptr;
2326 
2327 	tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2328 	if (tptr != NULL) {
2329 		/* Search for the ATIO among queueued. */
2330 		SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) {
2331 			if (sccb != &accb->ccb_h)
2332 				continue;
2333 			SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle);
2334 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2335 			    "Abort FREE ATIO\n");
2336 			accb->ccb_h.status = CAM_REQ_ABORTED;
2337 			xpt_done(accb);
2338 			ccb->ccb_h.status = CAM_REQ_CMP;
2339 			return;
2340 		}
2341 	}
2342 
2343 	/* Search for the ATIO among running. */
2344 	atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id);
2345 	if (atp != NULL) {
2346 		/* Send TERMINATE to firmware. */
2347 		if (!atp->dead) {
2348 			uint8_t storage[QENTRY_LEN];
2349 			ct7_entry_t *cto = (ct7_entry_t *) storage;
2350 
2351 			ISP_MEMZERO(cto, sizeof (ct7_entry_t));
2352 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2353 			cto->ct_header.rqs_entry_count = 1;
2354 			cto->ct_nphdl = atp->nphdl;
2355 			cto->ct_rxid = atp->tag;
2356 			cto->ct_iid_lo = atp->sid;
2357 			cto->ct_iid_hi = atp->sid >> 16;
2358 			cto->ct_oxid = atp->oxid;
2359 			cto->ct_vpidx = XS_CHANNEL(accb);
2360 			cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
2361 			isp_target_put_entry(isp, cto);
2362 		}
2363 		isp_put_atpd(isp, XS_CHANNEL(accb), atp);
2364 		ccb->ccb_h.status = CAM_REQ_CMP;
2365 	} else {
2366 		ccb->ccb_h.status = CAM_UA_ABORT;
2367 	}
2368 }
2369 
2370 static void
2371 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb)
2372 {
2373 	inot_private_data_t *ntp;
2374 	union ccb *accb = ccb->cab.abort_ccb;
2375 	struct ccb_hdr *sccb;
2376 	tstate_t *tptr;
2377 
2378 	tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2379 	if (tptr != NULL) {
2380 		/* Search for the INOT among queueued. */
2381 		SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) {
2382 			if (sccb != &accb->ccb_h)
2383 				continue;
2384 			SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle);
2385 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2386 			    "Abort FREE INOT\n");
2387 			accb->ccb_h.status = CAM_REQ_ABORTED;
2388 			xpt_done(accb);
2389 			ccb->ccb_h.status = CAM_REQ_CMP;
2390 			return;
2391 		}
2392 	}
2393 
2394 	/* Search for the INOT among running. */
2395 	ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id);
2396 	if (ntp != NULL) {
2397 		if (ntp->nt.nt_need_ack) {
2398 			isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK,
2399 			    ntp->nt.nt_lreserved);
2400 		}
2401 		isp_put_ntpd(isp, XS_CHANNEL(accb), ntp);
2402 		ccb->ccb_h.status = CAM_REQ_CMP;
2403 	} else {
2404 		ccb->ccb_h.status = CAM_UA_ABORT;
2405 		return;
2406 	}
2407 }
2408 #endif
2409 
2410 static void
2411 isp_action(struct cam_sim *sim, union ccb *ccb)
2412 {
2413 	int bus, tgt, error;
2414 	ispsoftc_t *isp;
2415 	fcparam *fcp;
2416 	struct ccb_trans_settings *cts;
2417 	sbintime_t ts;
2418 
2419 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2420 
2421 	isp = (ispsoftc_t *)cam_sim_softc(sim);
2422 	ISP_ASSERT_LOCKED(isp);
2423 	bus = cam_sim_bus(sim);
2424 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2425 	ISP_PCMD(ccb) = NULL;
2426 
2427 	switch (ccb->ccb_h.func_code) {
2428 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2429 		/*
2430 		 * Do a couple of preliminary checks...
2431 		 */
2432 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2433 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2434 				ccb->ccb_h.status = CAM_REQ_INVALID;
2435 				isp_done((struct ccb_scsiio *) ccb);
2436 				break;
2437 			}
2438 		}
2439 #ifdef	DIAGNOSTIC
2440 		if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) {
2441 			xpt_print(ccb->ccb_h.path, "invalid target\n");
2442 			ccb->ccb_h.status = CAM_PATH_INVALID;
2443 		}
2444 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2445 			xpt_done(ccb);
2446 			break;
2447 		}
2448 #endif
2449 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2450 		if (isp_get_pcmd(isp, ccb)) {
2451 			isp_prt(isp, ISP_LOGWARN, "out of PCMDs");
2452 			cam_freeze_devq(ccb->ccb_h.path);
2453 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
2454 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2455 			xpt_done(ccb);
2456 			break;
2457 		}
2458 		error = isp_start((XS_T *) ccb);
2459 		isp_rq_check_above(isp);
2460 		switch (error) {
2461 		case 0:
2462 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2463 			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
2464 				break;
2465 			/* Give firmware extra 10s to handle timeout. */
2466 			ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S;
2467 			callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0,
2468 			    isp_watchdog, ccb, 0);
2469 			break;
2470 		case CMD_RQLATER:
2471 			isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later",
2472 			    XS_TGT(ccb), (uintmax_t)XS_LUN(ccb));
2473 			cam_freeze_devq(ccb->ccb_h.path);
2474 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2475 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2476 			isp_free_pcmd(isp, ccb);
2477 			xpt_done(ccb);
2478 			break;
2479 		case CMD_EAGAIN:
2480 			isp_free_pcmd(isp, ccb);
2481 			cam_freeze_devq(ccb->ccb_h.path);
2482 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2483 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2484 			xpt_done(ccb);
2485 			break;
2486 		case CMD_COMPLETE:
2487 			isp_done((struct ccb_scsiio *) ccb);
2488 			break;
2489 		default:
2490 			isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__);
2491 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2492 			isp_free_pcmd(isp, ccb);
2493 			xpt_done(ccb);
2494 		}
2495 		break;
2496 
2497 #ifdef	ISP_TARGET_MODE
2498 	case XPT_EN_LUN:		/* Enable/Disable LUN as a target */
2499 		if (ccb->cel.enable) {
2500 			isp_enable_lun(isp, ccb);
2501 		} else {
2502 			isp_disable_lun(isp, ccb);
2503 		}
2504 		break;
2505 	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
2506 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2507 	{
2508 		tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2509 		if (tptr == NULL) {
2510 			const char *str;
2511 
2512 			if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
2513 				str = "XPT_IMMEDIATE_NOTIFY";
2514 			else
2515 				str = "XPT_ACCEPT_TARGET_IO";
2516 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path,
2517 			    "%s: no state pointer found for %s\n",
2518 			    __func__, str);
2519 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2520 			xpt_done(ccb);
2521 			break;
2522 		}
2523 
2524 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2525 			ccb->atio.tag_id = 0;
2526 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle);
2527 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2528 			    "Put FREE ATIO\n");
2529 		} else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
2530 			ccb->cin1.seq_id = ccb->cin1.tag_id = 0;
2531 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
2532 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2533 			    "Put FREE INOT\n");
2534 		}
2535 		ccb->ccb_h.status = CAM_REQ_INPROG;
2536 		break;
2537 	}
2538 	case XPT_NOTIFY_ACKNOWLEDGE:		/* notify ack */
2539 	{
2540 		inot_private_data_t *ntp;
2541 
2542 		/*
2543 		 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb
2544 		 * XXX: matches that for the immediate notify, we have to *search* for the notify structure
2545 		 */
2546 		/*
2547 		 * All the relevant path information is in the associated immediate notify
2548 		 */
2549 		ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2550 		ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id);
2551 		if (ntp == NULL) {
2552 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__,
2553 			     ccb->cna2.tag_id, ccb->cna2.seq_id);
2554 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2555 			xpt_done(ccb);
2556 			break;
2557 		}
2558 		if (isp_handle_platform_target_notify_ack(isp, &ntp->nt,
2559 		    (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) {
2560 			cam_freeze_devq(ccb->ccb_h.path);
2561 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2562 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2563 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2564 			break;
2565 		}
2566 		isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp);
2567 		ccb->ccb_h.status = CAM_REQ_CMP;
2568 		ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2569 		xpt_done(ccb);
2570 		break;
2571 	}
2572 	case XPT_CONT_TARGET_IO:
2573 		isp_target_start_ctio(isp, ccb, FROM_CAM);
2574 		isp_rq_check_above(isp);
2575 		break;
2576 #endif
2577 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2578 		tgt = ccb->ccb_h.target_id;
2579 		tgt |= (bus << 16);
2580 
2581 		error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt);
2582 		if (error) {
2583 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2584 		} else {
2585 			/*
2586 			 * If we have a FC device, reset the Command
2587 			 * Reference Number, because the target will expect
2588 			 * that we re-start the CRN at 1 after a reset.
2589 			 */
2590 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2591 
2592 			ccb->ccb_h.status = CAM_REQ_CMP;
2593 		}
2594 		xpt_done(ccb);
2595 		break;
2596 	case XPT_ABORT:			/* Abort the specified CCB */
2597 	{
2598 		union ccb *accb = ccb->cab.abort_ccb;
2599 		switch (accb->ccb_h.func_code) {
2600 #ifdef	ISP_TARGET_MODE
2601 		case XPT_ACCEPT_TARGET_IO:
2602 			isp_abort_atio(isp, ccb);
2603 			break;
2604 		case XPT_IMMEDIATE_NOTIFY:
2605 			isp_abort_inot(isp, ccb);
2606 			break;
2607 #endif
2608 		case XPT_SCSI_IO:
2609 			error = isp_control(isp, ISPCTL_ABORT_CMD, accb);
2610 			if (error) {
2611 				ccb->ccb_h.status = CAM_UA_ABORT;
2612 			} else {
2613 				ccb->ccb_h.status = CAM_REQ_CMP;
2614 			}
2615 			break;
2616 		default:
2617 			ccb->ccb_h.status = CAM_REQ_INVALID;
2618 			break;
2619 		}
2620 		/*
2621 		 * This is not a queued CCB, so the caller expects it to be
2622 		 * complete when control is returned.
2623 		 */
2624 		break;
2625 	}
2626 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2627 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2628 		cts = &ccb->cts;
2629 		if (!IS_CURRENT_SETTINGS(cts)) {
2630 			ccb->ccb_h.status = CAM_REQ_INVALID;
2631 			xpt_done(ccb);
2632 			break;
2633 		}
2634 		ccb->ccb_h.status = CAM_REQ_CMP;
2635 		xpt_done(ccb);
2636 		break;
2637 	case XPT_GET_TRAN_SETTINGS:
2638 	{
2639 		struct ccb_trans_settings_scsi *scsi;
2640 		struct ccb_trans_settings_fc *fc;
2641 
2642 		cts = &ccb->cts;
2643 		scsi = &cts->proto_specific.scsi;
2644 		fc = &cts->xport_specific.fc;
2645 		tgt = cts->ccb_h.target_id;
2646 		fcp = FCPARAM(isp, bus);
2647 
2648 		cts->protocol = PROTO_SCSI;
2649 		cts->protocol_version = SCSI_REV_2;
2650 		cts->transport = XPORT_FC;
2651 		cts->transport_version = 0;
2652 
2653 		scsi->valid = CTS_SCSI_VALID_TQ;
2654 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2655 		fc->valid = CTS_FC_VALID_SPEED;
2656 		fc->bitrate = fcp->isp_gbspeed * 100000;
2657 		if (tgt < MAX_FC_TARG) {
2658 			fcportdb_t *lp = &fcp->portdb[tgt];
2659 			fc->wwnn = lp->node_wwn;
2660 			fc->wwpn = lp->port_wwn;
2661 			fc->port = lp->portid;
2662 			fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2663 		}
2664 		ccb->ccb_h.status = CAM_REQ_CMP;
2665 		xpt_done(ccb);
2666 		break;
2667 	}
2668 	case XPT_CALC_GEOMETRY:
2669 		cam_calc_geometry(&ccb->ccg, 1);
2670 		xpt_done(ccb);
2671 		break;
2672 
2673 	case XPT_RESET_BUS:		/* Reset the specified bus */
2674 		error = isp_control(isp, ISPCTL_RESET_BUS, bus);
2675 		if (error) {
2676 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2677 			xpt_done(ccb);
2678 			break;
2679 		}
2680 		if (bootverbose) {
2681 			xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus);
2682 		}
2683 		xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0);
2684 		ccb->ccb_h.status = CAM_REQ_CMP;
2685 		xpt_done(ccb);
2686 		break;
2687 
2688 	case XPT_TERM_IO:		/* Terminate the I/O process */
2689 		ccb->ccb_h.status = CAM_REQ_INVALID;
2690 		xpt_done(ccb);
2691 		break;
2692 
2693 	case XPT_SET_SIM_KNOB:		/* Set SIM knobs */
2694 	{
2695 		struct ccb_sim_knob *kp = &ccb->knob;
2696 		fcparam *fcp = FCPARAM(isp, bus);
2697 
2698 		if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
2699 			fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn;
2700 			fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn;
2701 			isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn);
2702 		}
2703 		ccb->ccb_h.status = CAM_REQ_CMP;
2704 		if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) {
2705 			int rchange = 0;
2706 			int newrole = 0;
2707 
2708 			switch (kp->xport_specific.fc.role) {
2709 			case KNOB_ROLE_NONE:
2710 				if (fcp->role != ISP_ROLE_NONE) {
2711 					rchange = 1;
2712 					newrole = ISP_ROLE_NONE;
2713 				}
2714 				break;
2715 			case KNOB_ROLE_TARGET:
2716 				if (fcp->role != ISP_ROLE_TARGET) {
2717 					rchange = 1;
2718 					newrole = ISP_ROLE_TARGET;
2719 				}
2720 				break;
2721 			case KNOB_ROLE_INITIATOR:
2722 				if (fcp->role != ISP_ROLE_INITIATOR) {
2723 					rchange = 1;
2724 					newrole = ISP_ROLE_INITIATOR;
2725 				}
2726 				break;
2727 			case KNOB_ROLE_BOTH:
2728 				if (fcp->role != ISP_ROLE_BOTH) {
2729 					rchange = 1;
2730 					newrole = ISP_ROLE_BOTH;
2731 				}
2732 				break;
2733 			}
2734 			if (rchange) {
2735 				ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole);
2736 				if (isp_control(isp, ISPCTL_CHANGE_ROLE,
2737 				    bus, newrole) != 0) {
2738 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2739 					xpt_done(ccb);
2740 					break;
2741 				}
2742 			}
2743 		}
2744 		xpt_done(ccb);
2745 		break;
2746 	}
2747 	case XPT_GET_SIM_KNOB_OLD:	/* Get SIM knobs -- compat value */
2748 	case XPT_GET_SIM_KNOB:		/* Get SIM knobs */
2749 	{
2750 		struct ccb_sim_knob *kp = &ccb->knob;
2751 		fcparam *fcp = FCPARAM(isp, bus);
2752 
2753 		kp->xport_specific.fc.wwnn = fcp->isp_wwnn;
2754 		kp->xport_specific.fc.wwpn = fcp->isp_wwpn;
2755 		switch (fcp->role) {
2756 		case ISP_ROLE_NONE:
2757 			kp->xport_specific.fc.role = KNOB_ROLE_NONE;
2758 			break;
2759 		case ISP_ROLE_TARGET:
2760 			kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
2761 			break;
2762 		case ISP_ROLE_INITIATOR:
2763 			kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
2764 			break;
2765 		case ISP_ROLE_BOTH:
2766 			kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
2767 			break;
2768 		}
2769 		kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
2770 		ccb->ccb_h.status = CAM_REQ_CMP;
2771 		xpt_done(ccb);
2772 		break;
2773 	}
2774 	case XPT_PATH_INQ:		/* Path routing inquiry */
2775 	{
2776 		struct ccb_pathinq *cpi = &ccb->cpi;
2777 
2778 		cpi->version_num = 1;
2779 #ifdef	ISP_TARGET_MODE
2780 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2781 #else
2782 		cpi->target_sprt = 0;
2783 #endif
2784 		cpi->hba_eng_cnt = 0;
2785 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2786 		cpi->max_lun = 255;
2787 		cpi->bus_id = cam_sim_bus(sim);
2788 		cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE;
2789 
2790 		fcp = FCPARAM(isp, bus);
2791 
2792 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
2793 		cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
2794 
2795 		/*
2796 		 * Because our loop ID can shift from time to time,
2797 		 * make our initiator ID out of range of our bus.
2798 		 */
2799 		cpi->initiator_id = cpi->max_target + 1;
2800 
2801 		/*
2802 		 * Set base transfer capabilities for Fibre Channel, for this HBA.
2803 		 */
2804 		if (IS_25XX(isp))
2805 			cpi->base_transfer_speed = 8000000;
2806 		else
2807 			cpi->base_transfer_speed = 4000000;
2808 		cpi->hba_inquiry = PI_TAG_ABLE;
2809 		cpi->transport = XPORT_FC;
2810 		cpi->transport_version = 0;
2811 		cpi->xport_specific.fc.wwnn = fcp->isp_wwnn;
2812 		cpi->xport_specific.fc.wwpn = fcp->isp_wwpn;
2813 		cpi->xport_specific.fc.port = fcp->isp_portid;
2814 		cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000;
2815 		cpi->protocol = PROTO_SCSI;
2816 		cpi->protocol_version = SCSI_REV_2;
2817 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2818 		strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2819 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2820 		cpi->unit_number = cam_sim_unit(sim);
2821 		cpi->ccb_h.status = CAM_REQ_CMP;
2822 		xpt_done(ccb);
2823 		break;
2824 	}
2825 	default:
2826 		ccb->ccb_h.status = CAM_REQ_INVALID;
2827 		xpt_done(ccb);
2828 		break;
2829 	}
2830 }
2831 
2832 void
2833 isp_done(XS_T *sccb)
2834 {
2835 	ispsoftc_t *isp = XS_ISP(sccb);
2836 	uint32_t status;
2837 
2838 	if (XS_NOERR(sccb))
2839 		XS_SETERR(sccb, CAM_REQ_CMP);
2840 
2841 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) {
2842 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2843 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2844 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2845 		} else {
2846 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2847 		}
2848 	}
2849 
2850 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2851 	status = sccb->ccb_h.status & CAM_STATUS_MASK;
2852 	if (status != CAM_REQ_CMP &&
2853 	    (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2854 		sccb->ccb_h.status |= CAM_DEV_QFRZN;
2855 		xpt_freeze_devq(sccb->ccb_h.path, 1);
2856 	}
2857 
2858 	if (ISP_PCMD(sccb)) {
2859 		if (callout_active(&PISP_PCMD(sccb)->wdog))
2860 			callout_stop(&PISP_PCMD(sccb)->wdog);
2861 		isp_free_pcmd(isp, (union ccb *) sccb);
2862 	}
2863 	isp_rq_check_below(isp);
2864 	xpt_done((union ccb *) sccb);
2865 }
2866 
2867 void
2868 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
2869 {
2870 	int bus;
2871 	static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s";
2872 	char buf[64];
2873 	char *msg = NULL;
2874 	target_id_t tgt = 0;
2875 	fcportdb_t *lp;
2876 	struct isp_fc *fc;
2877 	struct ac_contract ac;
2878 	struct ac_device_changed *adc;
2879 	va_list ap;
2880 
2881 	switch (cmd) {
2882 	case ISPASYNC_LOOP_RESET:
2883 	{
2884 		uint16_t lipp;
2885 		fcparam *fcp;
2886 		va_start(ap, cmd);
2887 		bus = va_arg(ap, int);
2888 		va_end(ap);
2889 
2890 		lipp = ISP_READ(isp, OUTMAILBOX1);
2891 		fcp = FCPARAM(isp, bus);
2892 
2893 		isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp);
2894 		/*
2895 		 * Per FCP-4, a Reset LIP should result in a CRN reset. Other
2896 		 * LIPs and loop up/down events should never reset the CRN. For
2897 		 * an as of yet unknown reason, 24xx series cards (and
2898 		 * potentially others) can interrupt with a LIP Reset status
2899 		 * when no LIP reset came down the wire. Additionally, the LIP
2900 		 * primitive accompanying this status would not be a valid LIP
2901 		 * Reset primitive, but some variation of an invalid AL_PA
2902 		 * LIP. As a result, we have to verify the AL_PD in the LIP
2903 		 * addresses our port before blindly resetting.
2904 		*/
2905 		if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF)))
2906 			isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0);
2907 		isp_loop_changed(isp, bus);
2908 		break;
2909 	}
2910 	case ISPASYNC_LIP:
2911 		if (msg == NULL)
2912 			msg = "LIP Received";
2913 		/* FALLTHROUGH */
2914 	case ISPASYNC_LOOP_DOWN:
2915 		if (msg == NULL)
2916 			msg = "LOOP Down";
2917 		/* FALLTHROUGH */
2918 	case ISPASYNC_LOOP_UP:
2919 		if (msg == NULL)
2920 			msg = "LOOP Up";
2921 		va_start(ap, cmd);
2922 		bus = va_arg(ap, int);
2923 		va_end(ap);
2924 		isp_loop_changed(isp, bus);
2925 		isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
2926 		break;
2927 	case ISPASYNC_DEV_ARRIVED:
2928 		va_start(ap, cmd);
2929 		bus = va_arg(ap, int);
2930 		lp = va_arg(ap, fcportdb_t *);
2931 		va_end(ap);
2932 		fc = ISP_FC_PC(isp, bus);
2933 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2934 		isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2935 		isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived");
2936 		if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2937 		    (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) {
2938 			lp->is_target = 1;
2939 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2940 			isp_make_here(isp, lp, bus, tgt);
2941 		}
2942 		if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2943 		    (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) {
2944 			lp->is_initiator = 1;
2945 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2946 			adc = (struct ac_device_changed *) ac.contract_data;
2947 			adc->wwpn = lp->port_wwn;
2948 			adc->port = lp->portid;
2949 			adc->target = tgt;
2950 			adc->arrived = 1;
2951 			xpt_async(AC_CONTRACT, fc->path, &ac);
2952 		}
2953 		break;
2954 	case ISPASYNC_DEV_CHANGED:
2955 	case ISPASYNC_DEV_STAYED:
2956 	{
2957 		int crn_reset_done;
2958 
2959 		crn_reset_done = 0;
2960 		va_start(ap, cmd);
2961 		bus = va_arg(ap, int);
2962 		lp = va_arg(ap, fcportdb_t *);
2963 		va_end(ap);
2964 		fc = ISP_FC_PC(isp, bus);
2965 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2966 		isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3);
2967 		if (cmd == ISPASYNC_DEV_CHANGED)
2968 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed");
2969 		else
2970 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed");
2971 
2972 		if (lp->is_target !=
2973 		    ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2974 		     (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) {
2975 			lp->is_target = !lp->is_target;
2976 			if (lp->is_target) {
2977 				if (cmd == ISPASYNC_DEV_CHANGED) {
2978 					isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2979 					crn_reset_done = 1;
2980 				}
2981 				isp_make_here(isp, lp, bus, tgt);
2982 			} else {
2983 				isp_make_gone(isp, lp, bus, tgt);
2984 				if (cmd == ISPASYNC_DEV_CHANGED) {
2985 					isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2986 					crn_reset_done = 1;
2987 				}
2988 			}
2989 		}
2990 		if (lp->is_initiator !=
2991 		    ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2992 		     (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) {
2993 			lp->is_initiator = !lp->is_initiator;
2994 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2995 			adc = (struct ac_device_changed *) ac.contract_data;
2996 			adc->wwpn = lp->port_wwn;
2997 			adc->port = lp->portid;
2998 			adc->target = tgt;
2999 			adc->arrived = lp->is_initiator;
3000 			xpt_async(AC_CONTRACT, fc->path, &ac);
3001 		}
3002 
3003 		if ((cmd == ISPASYNC_DEV_CHANGED) &&
3004 		    (crn_reset_done == 0))
3005 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
3006 
3007 		break;
3008 	}
3009 	case ISPASYNC_DEV_GONE:
3010 		va_start(ap, cmd);
3011 		bus = va_arg(ap, int);
3012 		lp = va_arg(ap, fcportdb_t *);
3013 		va_end(ap);
3014 		fc = ISP_FC_PC(isp, bus);
3015 		tgt = FC_PORTDB_TGT(isp, bus, lp);
3016 		/*
3017 		 * If this has a virtual target or initiator set the isp_gdt
3018 		 * timer running on it to delay its departure.
3019 		 */
3020 		isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
3021 		if (lp->is_target || lp->is_initiator) {
3022 			lp->state = FC_PORTDB_STATE_ZOMBIE;
3023 			lp->gone_timer = fc->gone_device_time;
3024 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie");
3025 			if (fc->ready && !callout_active(&fc->gdt)) {
3026 				isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime);
3027 				callout_reset(&fc->gdt, hz, isp_gdt, fc);
3028 			}
3029 			break;
3030 		}
3031 		isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone");
3032 		break;
3033 	case ISPASYNC_CHANGE_NOTIFY:
3034 	{
3035 		char *msg;
3036 		int evt, nphdl, nlstate, portid, reason;
3037 
3038 		va_start(ap, cmd);
3039 		bus = va_arg(ap, int);
3040 		evt = va_arg(ap, int);
3041 		if (evt == ISPASYNC_CHANGE_PDB) {
3042 			nphdl = va_arg(ap, int);
3043 			nlstate = va_arg(ap, int);
3044 			reason = va_arg(ap, int);
3045 		} else if (evt == ISPASYNC_CHANGE_SNS) {
3046 			portid = va_arg(ap, int);
3047 		} else {
3048 			nphdl = NIL_HANDLE;
3049 			nlstate = reason = 0;
3050 		}
3051 		va_end(ap);
3052 
3053 		if (evt == ISPASYNC_CHANGE_PDB) {
3054 			int tgt_set = 0;
3055 			msg = "Port Database Changed";
3056 			isp_prt(isp, ISP_LOGINFO,
3057 			    "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)",
3058 			    bus, msg, nphdl, nlstate, reason);
3059 			/*
3060 			 * Port database syncs are not sufficient for
3061 			 * determining that logins or logouts are done on the
3062 			 * loop, but this information is directly available from
3063 			 * the reason code from the incoming mbox. We must reset
3064 			 * the fcp crn on these events according to FCP-4
3065 			 */
3066 			switch (reason) {
3067 			case PDB24XX_AE_IMPL_LOGO_1:
3068 			case PDB24XX_AE_IMPL_LOGO_2:
3069 			case PDB24XX_AE_IMPL_LOGO_3:
3070 			case PDB24XX_AE_PLOGI_RCVD:
3071 			case PDB24XX_AE_PRLI_RCVD:
3072 			case PDB24XX_AE_PRLO_RCVD:
3073 			case PDB24XX_AE_LOGO_RCVD:
3074 			case PDB24XX_AE_PLOGI_DONE:
3075 			case PDB24XX_AE_PRLI_DONE:
3076 				/*
3077 				 * If the event is not global, twiddle tgt and
3078 				 * tgt_set to nominate only the target
3079 				 * associated with the nphdl.
3080 				 */
3081 				if (nphdl != PDB24XX_AE_GLOBAL) {
3082 					/* Break if we don't yet have the pdb */
3083 					if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp))
3084 						break;
3085 					tgt = FC_PORTDB_TGT(isp, bus, lp);
3086 					tgt_set = 1;
3087 				}
3088 				isp_fcp_reset_crn(isp, bus, tgt, tgt_set);
3089 				break;
3090 			default:
3091 				break; /* NOP */
3092 			}
3093 		} else if (evt == ISPASYNC_CHANGE_SNS) {
3094 			msg = "Name Server Database Changed";
3095 			isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)",
3096 			    bus, msg, portid);
3097 		} else {
3098 			msg = "Other Change Notify";
3099 			isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
3100 		}
3101 		isp_loop_changed(isp, bus);
3102 		break;
3103 	}
3104 #ifdef	ISP_TARGET_MODE
3105 	case ISPASYNC_TARGET_NOTIFY:
3106 	{
3107 		isp_notify_t *notify;
3108 		va_start(ap, cmd);
3109 		notify = va_arg(ap, isp_notify_t *);
3110 		va_end(ap);
3111 		switch (notify->nt_ncode) {
3112 		case NT_ABORT_TASK:
3113 		case NT_ABORT_TASK_SET:
3114 		case NT_CLEAR_ACA:
3115 		case NT_CLEAR_TASK_SET:
3116 		case NT_LUN_RESET:
3117 		case NT_TARGET_RESET:
3118 		case NT_QUERY_TASK_SET:
3119 		case NT_QUERY_ASYNC_EVENT:
3120 			/*
3121 			 * These are task management functions.
3122 			 */
3123 			isp_handle_platform_target_tmf(isp, notify);
3124 			break;
3125 		case NT_LIP_RESET:
3126 		case NT_LINK_UP:
3127 		case NT_LINK_DOWN:
3128 		case NT_HBA_RESET:
3129 			/*
3130 			 * No action need be taken here.
3131 			 */
3132 			break;
3133 		case NT_SRR:
3134 			isp_handle_platform_srr(isp, notify);
3135 			break;
3136 		default:
3137 			isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
3138 			isp_handle_platform_target_notify_ack(isp, notify, 0);
3139 			break;
3140 		}
3141 		break;
3142 	}
3143 	case ISPASYNC_TARGET_NOTIFY_ACK:
3144 	{
3145 		void *inot;
3146 		va_start(ap, cmd);
3147 		inot = va_arg(ap, void *);
3148 		va_end(ap);
3149 		if (isp_notify_ack(isp, inot)) {
3150 			isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT);
3151 			if (tp) {
3152 				tp->isp = isp;
3153 				memcpy(tp->data, inot, sizeof (tp->data));
3154 				tp->not = tp->data;
3155 				callout_init_mtx(&tp->timer, &isp->isp_lock, 0);
3156 				callout_reset(&tp->timer, 5,
3157 				    isp_refire_notify_ack, tp);
3158 			} else {
3159 				isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire");
3160 			}
3161 		}
3162 		break;
3163 	}
3164 	case ISPASYNC_TARGET_ACTION:
3165 	{
3166 		isphdr_t *hp;
3167 
3168 		va_start(ap, cmd);
3169 		hp = va_arg(ap, isphdr_t *);
3170 		va_end(ap);
3171 		switch (hp->rqs_entry_type) {
3172 		case RQSTYPE_ATIO:
3173 			isp_handle_platform_atio7(isp, (at7_entry_t *)hp);
3174 			break;
3175 		case RQSTYPE_CTIO7:
3176 			isp_handle_platform_ctio(isp, (ct7_entry_t *)hp);
3177 			break;
3178 		default:
3179 			isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x",
3180 			    __func__, hp->rqs_entry_type);
3181 			break;
3182 		}
3183 		break;
3184 	}
3185 #endif
3186 	case ISPASYNC_FW_CRASH:
3187 	{
3188 		uint16_t mbox1;
3189 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3190 		isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1);
3191 #if 0
3192 		isp_reinit(isp, 1);
3193 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3194 #endif
3195 		break;
3196 	}
3197 	default:
3198 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3199 		break;
3200 	}
3201 }
3202 
3203 uint64_t
3204 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
3205 {
3206 	uint64_t seed;
3207 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3208 
3209 	/* First try to use explicitly configured WWNs. */
3210 	seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
3211 	if (seed)
3212 		return (seed);
3213 
3214 	/* Otherwise try to use WWNs from NVRAM. */
3215 	if (isactive) {
3216 		seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram :
3217 		    FCPARAM(isp, chan)->isp_wwpn_nvram;
3218 		if (seed)
3219 			return (seed);
3220 	}
3221 
3222 	/* If still no WWNs, try to steal them from the first channel. */
3223 	if (chan > 0) {
3224 		seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn :
3225 		    ISP_FC_PC(isp, 0)->def_wwpn;
3226 		if (seed == 0) {
3227 			seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram :
3228 			    FCPARAM(isp, 0)->isp_wwpn_nvram;
3229 		}
3230 	}
3231 
3232 	/* If still nothing -- improvise. */
3233 	if (seed == 0) {
3234 		seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev);
3235 		if (!iswwnn)
3236 			seed ^= 0x0100000000000000ULL;
3237 	}
3238 
3239 	/* For additional channels we have to improvise even more. */
3240 	if (!iswwnn && chan > 0) {
3241 		/*
3242 		 * We'll stick our channel number plus one first into bits
3243 		 * 57..59 and thence into bits 52..55 which allows for 8 bits
3244 		 * of channel which is enough for our maximum of 255 channels.
3245 		 */
3246 		seed ^= 0x0100000000000000ULL;
3247 		seed ^= ((uint64_t) (chan + 1) & 0xf) << 56;
3248 		seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52;
3249 	}
3250 	return (seed);
3251 }
3252 
3253 void
3254 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
3255 {
3256 	int loc;
3257 	char lbuf[200];
3258 	va_list ap;
3259 
3260 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3261 		return;
3262 	}
3263 	snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev));
3264 	loc = strlen(lbuf);
3265 	va_start(ap, fmt);
3266 	vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap);
3267 	va_end(ap);
3268 	printf("%s\n", lbuf);
3269 }
3270 
3271 void
3272 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
3273 {
3274 	va_list ap;
3275 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3276 		return;
3277 	}
3278 	xpt_print_path(xs->ccb_h.path);
3279 	va_start(ap, fmt);
3280 	vprintf(fmt, ap);
3281 	va_end(ap);
3282 	printf("\n");
3283 }
3284 
3285 uint64_t
3286 isp_nanotime_sub(struct timespec *b, struct timespec *a)
3287 {
3288 	uint64_t elapsed;
3289 	struct timespec x;
3290 
3291 	timespecsub(b, a, &x);
3292 	elapsed = GET_NANOSEC(&x);
3293 	if (elapsed == 0)
3294 		elapsed++;
3295 	return (elapsed);
3296 }
3297 
3298 int
3299 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan)
3300 {
3301 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3302 
3303 	if (fc->fcbsy)
3304 		return (-1);
3305 	fc->fcbsy = 1;
3306 	return (0);
3307 }
3308 
3309 void
3310 isp_platform_intr(void *arg)
3311 {
3312 	ispsoftc_t *isp = arg;
3313 
3314 	ISP_LOCK(isp);
3315 	ISP_RUN_ISR(isp);
3316 	ISP_UNLOCK(isp);
3317 }
3318 
3319 void
3320 isp_platform_intr_resp(void *arg)
3321 {
3322 	ispsoftc_t *isp = arg;
3323 
3324 	ISP_LOCK(isp);
3325 	isp_intr_respq(isp);
3326 	ISP_UNLOCK(isp);
3327 
3328 	/* We have handshake enabled, so explicitly complete interrupt */
3329 	ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3330 }
3331 
3332 void
3333 isp_platform_intr_atio(void *arg)
3334 {
3335 	ispsoftc_t *isp = arg;
3336 
3337 	ISP_LOCK(isp);
3338 #ifdef	ISP_TARGET_MODE
3339 	isp_intr_atioq(isp);
3340 #endif
3341 	ISP_UNLOCK(isp);
3342 
3343 	/* We have handshake enabled, so explicitly complete interrupt */
3344 	ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3345 }
3346 
3347 typedef struct {
3348 	ispsoftc_t		*isp;
3349 	struct ccb_scsiio	*csio;
3350 	void			*qe;
3351 	int			error;
3352 } mush_t;
3353 
3354 static void
3355 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
3356 {
3357 	mush_t *mp = (mush_t *) arg;
3358 	ispsoftc_t *isp= mp->isp;
3359 	struct ccb_scsiio *csio = mp->csio;
3360 	bus_dmasync_op_t op;
3361 
3362 	if (error) {
3363 		mp->error = error;
3364 		return;
3365 	}
3366 	if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3367 	    ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3368 		op = BUS_DMASYNC_PREREAD;
3369 	else
3370 		op = BUS_DMASYNC_PREWRITE;
3371 	bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3372 
3373 	mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg);
3374 	if (mp->error)
3375 		isp_dmafree(isp, csio);
3376 }
3377 
3378 int
3379 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe)
3380 {
3381 	mush_t mp;
3382 	int error;
3383 
3384 	if (XS_XFRLEN(csio)) {
3385 		mp.isp = isp;
3386 		mp.csio = csio;
3387 		mp.qe = qe;
3388 		mp.error = 0;
3389 		error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
3390 		    (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT);
3391 		if (error == 0)
3392 			error = mp.error;
3393 	} else {
3394 		error = ISP_SEND_CMD(isp, qe, NULL, 0);
3395 	}
3396 	switch (error) {
3397 	case 0:
3398 	case CMD_COMPLETE:
3399 	case CMD_EAGAIN:
3400 	case CMD_RQLATER:
3401 		break;
3402 	case ENOMEM:
3403 		error = CMD_EAGAIN;
3404 		break;
3405 	case EINVAL:
3406 	case EFBIG:
3407 		csio->ccb_h.status = CAM_REQ_INVALID;
3408 		error = CMD_COMPLETE;
3409 		break;
3410 	default:
3411 		csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
3412 		error = CMD_COMPLETE;
3413 		break;
3414 	}
3415 	return (error);
3416 }
3417 
3418 void
3419 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio)
3420 {
3421 	bus_dmasync_op_t op;
3422 
3423 	if (XS_XFRLEN(csio) == 0)
3424 		return;
3425 
3426 	if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3427 	    ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3428 		op = BUS_DMASYNC_POSTREAD;
3429 	else
3430 		op = BUS_DMASYNC_POSTWRITE;
3431 	bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3432 	bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
3433 }
3434 
3435 /*
3436  * Reset the command reference number for all LUNs on a specific target
3437  * (needed when a target arrives again) or for all targets on a port
3438  * (needed for events like a LIP).
3439  */
3440 void
3441 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set)
3442 {
3443 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3444 	struct isp_nexus *nxp;
3445 	int i;
3446 
3447 	if (tgt_set == 0)
3448 		isp_prt(isp, ISP_LOGDEBUG0,
3449 		    "Chan %d resetting CRN on all targets", chan);
3450 	else
3451 		isp_prt(isp, ISP_LOGDEBUG0,
3452 		    "Chan %d resetting CRN on target %u", chan, tgt);
3453 
3454 	for (i = 0; i < NEXUS_HASH_WIDTH; i++) {
3455 		for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) {
3456 			if (tgt_set == 0 || tgt == nxp->tgt)
3457 				nxp->crnseed = 0;
3458 		}
3459 	}
3460 }
3461 
3462 int
3463 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd)
3464 {
3465 	lun_id_t lun;
3466 	uint32_t chan, tgt;
3467 	struct isp_fc *fc;
3468 	struct isp_nexus *nxp;
3469 	int idx;
3470 
3471 	chan = XS_CHANNEL(cmd);
3472 	tgt = XS_TGT(cmd);
3473 	lun = XS_LUN(cmd);
3474 	fc = ISP_FC_PC(isp, chan);
3475 	idx = NEXUS_HASH(tgt, lun);
3476 	nxp = fc->nexus_hash[idx];
3477 
3478 	while (nxp) {
3479 		if (nxp->tgt == tgt && nxp->lun == lun)
3480 			break;
3481 		nxp = nxp->next;
3482 	}
3483 	if (nxp == NULL) {
3484 		nxp = fc->nexus_free_list;
3485 		if (nxp == NULL) {
3486 			nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT);
3487 			if (nxp == NULL) {
3488 				return (-1);
3489 			}
3490 		} else {
3491 			fc->nexus_free_list = nxp->next;
3492 		}
3493 		nxp->tgt = tgt;
3494 		nxp->lun = lun;
3495 		nxp->next = fc->nexus_hash[idx];
3496 		fc->nexus_hash[idx] = nxp;
3497 	}
3498 	if (nxp->crnseed == 0)
3499 		nxp->crnseed = 1;
3500 	*crnp = nxp->crnseed++;
3501 	return (0);
3502 }
3503 
3504 /*
3505  * We enter with the lock held
3506  */
3507 void
3508 isp_timer(void *arg)
3509 {
3510 	ispsoftc_t *isp = arg;
3511 #ifdef	ISP_TARGET_MODE
3512 	isp_tmcmd_restart(isp);
3513 #endif
3514 	callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
3515 }
3516 
3517 #ifdef	ISP_TARGET_MODE
3518 isp_ecmd_t *
3519 isp_get_ecmd(ispsoftc_t *isp)
3520 {
3521 	isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free;
3522 	if (ecmd) {
3523 		isp->isp_osinfo.ecmd_free = ecmd->next;
3524 	}
3525 	return (ecmd);
3526 }
3527 
3528 void
3529 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd)
3530 {
3531 	ecmd->next = isp->isp_osinfo.ecmd_free;
3532 	isp->isp_osinfo.ecmd_free = ecmd;
3533 }
3534 #endif
3535