xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 1997-2009 by Matthew Jacob
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
32  */
33 #include <sys/cdefs.h>
34 #include <dev/isp/isp_freebsd.h>
35 #include <sys/unistd.h>
36 #include <sys/kthread.h>
37 #include <sys/conf.h>
38 #include <sys/module.h>
39 #include <sys/ioccom.h>
40 #include <dev/isp/isp_ioctl.h>
41 #include <sys/devicestat.h>
42 #include <cam/cam_periph.h>
43 #include <cam/cam_xpt_periph.h>
44 
45 MODULE_VERSION(isp, 1);
46 MODULE_DEPEND(isp, cam, 1, 1, 1);
47 int isp_announced = 0;
48 int isp_loop_down_limit = 60;	/* default loop down limit */
49 int isp_quickboot_time = 7;	/* don't wait more than N secs for loop up */
50 int isp_gone_device_time = 30;	/* grace time before reporting device lost */
51 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s";
52 
53 static void isp_freeze_loopdown(ispsoftc_t *, int);
54 static void isp_loop_changed(ispsoftc_t *isp, int chan);
55 static void isp_rq_check_above(ispsoftc_t *);
56 static void isp_rq_check_below(ispsoftc_t *);
57 static d_ioctl_t ispioctl;
58 static void isp_poll(struct cam_sim *);
59 static callout_func_t isp_watchdog;
60 static callout_func_t isp_gdt;
61 static task_fn_t isp_gdt_task;
62 static void isp_kthread(void *);
63 static void isp_action(struct cam_sim *, union ccb *);
64 static int isp_timer_count;
65 static void isp_timer(void *);
66 
67 static struct cdevsw isp_cdevsw = {
68 	.d_version =	D_VERSION,
69 	.d_ioctl =	ispioctl,
70 	.d_name =	"isp",
71 };
72 
73 static int
74 isp_role_sysctl(SYSCTL_HANDLER_ARGS)
75 {
76 	ispsoftc_t *isp = (ispsoftc_t *)arg1;
77 	int chan = arg2;
78 	int error, old, value;
79 
80 	value = FCPARAM(isp, chan)->role;
81 
82 	error = sysctl_handle_int(oidp, &value, 0, req);
83 	if ((error != 0) || (req->newptr == NULL))
84 		return (error);
85 
86 	if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH)
87 		return (EINVAL);
88 
89 	ISP_LOCK(isp);
90 	old = FCPARAM(isp, chan)->role;
91 
92 	/* We don't allow target mode switch from here. */
93 	value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR);
94 
95 	/* If nothing has changed -- we are done. */
96 	if (value == old) {
97 		ISP_UNLOCK(isp);
98 		return (0);
99 	}
100 
101 	/* Actually change the role. */
102 	error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value);
103 	ISP_UNLOCK(isp);
104 	return (error);
105 }
106 
107 static int
108 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
109 {
110 	fcparam *fcp = FCPARAM(isp, chan);
111 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
112 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev);
113 	struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
114 	char name[16];
115 	struct cam_sim *sim;
116 	struct cam_path *path;
117 #ifdef	ISP_TARGET_MODE
118 	int i;
119 #endif
120 
121 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
122 	    device_get_unit(isp->isp_dev), &isp->isp_lock,
123 	    isp->isp_maxcmds, isp->isp_maxcmds, devq);
124 	if (sim == NULL)
125 		return (ENOMEM);
126 
127 	if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) {
128 		cam_sim_free(sim, FALSE);
129 		return (EIO);
130 	}
131 	if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
132 		xpt_bus_deregister(cam_sim_path(sim));
133 		cam_sim_free(sim, FALSE);
134 		return (ENXIO);
135 	}
136 
137 	ISP_LOCK(isp);
138 	fc->sim = sim;
139 	fc->path = path;
140 	fc->isp = isp;
141 	fc->ready = 1;
142 	fcp->isp_use_gft_id = 1;
143 	fcp->isp_use_gff_id = 1;
144 
145 	callout_init_mtx(&fc->gdt, &isp->isp_lock, 0);
146 	TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc);
147 #ifdef	ISP_TARGET_MODE
148 	TAILQ_INIT(&fc->waitq);
149 	STAILQ_INIT(&fc->ntfree);
150 	for (i = 0; i < ATPDPSIZE; i++)
151 		STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next);
152 	LIST_INIT(&fc->atfree);
153 	for (i = ATPDPSIZE-1; i >= 0; i--)
154 		LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next);
155 	for (i = 0; i < ATPDPHASHSIZE; i++)
156 		LIST_INIT(&fc->atused[i]);
157 #endif
158 	isp_loop_changed(isp, chan);
159 	ISP_UNLOCK(isp);
160 	if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0,
161 	    "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
162 		xpt_free_path(fc->path);
163 		xpt_bus_deregister(cam_sim_path(fc->sim));
164 		cam_sim_free(fc->sim, FALSE);
165 		return (ENOMEM);
166 	}
167 	fc->num_threads += 1;
168 	if (chan > 0) {
169 		snprintf(name, sizeof(name), "chan%d", chan);
170 		tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
171 		    OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
172 		    "Virtual channel");
173 	}
174 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
175 	    "wwnn", CTLFLAG_RD, &fcp->isp_wwnn,
176 	    "World Wide Node Name");
177 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
178 	    "wwpn", CTLFLAG_RD, &fcp->isp_wwpn,
179 	    "World Wide Port Name");
180 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
181 	    "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0,
182 	    "Loop Down Limit");
183 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
184 	    "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0,
185 	    "Gone Device Time");
186 #if defined(ISP_TARGET_MODE) && defined(DEBUG)
187 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
188 	    "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0,
189 	    "Cause a Lost Frame on a Read");
190 #endif
191 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
192 	    "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
193 	    isp, chan, isp_role_sysctl, "I", "Current role");
194 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
195 	    "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0,
196 	    "Connection speed in gigabits");
197 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
198 	    "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0,
199 	    "Link state");
200 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
201 	    "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0,
202 	    "Firmware state");
203 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
204 	    "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0,
205 	    "Loop state");
206 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
207 	    "topo", CTLFLAG_RD, &fcp->isp_topo, 0,
208 	    "Connection topology");
209 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
210 	    "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0,
211 	    "Use GFT_ID during fabric scan");
212 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
213 	    "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0,
214 	    "Use GFF_ID during fabric scan");
215 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
216 	    "fw_version_flash", CTLFLAG_RD, fcp->fw_version_flash, 0,
217 	    "Firmware version in (active) flash region");
218 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
219 	    "fw_version_ispfw", CTLFLAG_RD, fcp->fw_version_ispfw, 0,
220 	    "Firmware version loaded from ispfw(4)");
221 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
222 	    "fw_version_run", CTLFLAG_RD, fcp->fw_version_run, 0,
223 	    "Firmware version currently running");
224 	return (0);
225 }
226 
227 static void
228 isp_detach_chan(ispsoftc_t *isp, int chan)
229 {
230 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
231 
232 	xpt_free_path(fc->path);
233 	xpt_bus_deregister(cam_sim_path(fc->sim));
234 	cam_sim_free(fc->sim, FALSE);
235 
236 	/* Wait for the channel's spawned threads to exit. */
237 	wakeup(fc);
238 	while (fc->num_threads != 0)
239 		mtx_sleep(&fc->num_threads, &isp->isp_lock, PRIBIO, "isp_reap", 0);
240 }
241 
242 int
243 isp_attach(ispsoftc_t *isp)
244 {
245 	const char *nu = device_get_nameunit(isp->isp_osinfo.dev);
246 	int du = device_get_unit(isp->isp_dev);
247 	int chan;
248 
249 	/*
250 	 * Create the device queue for our SIM(s).
251 	 */
252 	isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds);
253 	if (isp->isp_osinfo.devq == NULL) {
254 		return (EIO);
255 	}
256 
257 	for (chan = 0; chan < isp->isp_nchan; chan++) {
258 		if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) {
259 			goto unwind;
260 		}
261 	}
262 
263 	callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0);
264 	isp_timer_count = hz >> 2;
265 	callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
266 
267 	isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu);
268 	if (isp->isp_osinfo.cdev) {
269 		isp->isp_osinfo.cdev->si_drv1 = isp;
270 	}
271 	return (0);
272 
273 unwind:
274 	ISP_LOCK(isp);
275 	isp->isp_osinfo.is_exiting = 1;
276 	while (--chan >= 0)
277 		isp_detach_chan(isp, chan);
278 	ISP_UNLOCK(isp);
279 	cam_simq_free(isp->isp_osinfo.devq);
280 	isp->isp_osinfo.devq = NULL;
281 	return (-1);
282 }
283 
284 int
285 isp_detach(ispsoftc_t *isp)
286 {
287 	int chan;
288 
289 	if (isp->isp_osinfo.cdev) {
290 		destroy_dev(isp->isp_osinfo.cdev);
291 		isp->isp_osinfo.cdev = NULL;
292 	}
293 	ISP_LOCK(isp);
294 	/* Tell spawned threads that we're exiting. */
295 	isp->isp_osinfo.is_exiting = 1;
296 	for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1)
297 		isp_detach_chan(isp, chan);
298 	ISP_UNLOCK(isp);
299 	callout_drain(&isp->isp_osinfo.tmo);
300 	cam_simq_free(isp->isp_osinfo.devq);
301 	return (0);
302 }
303 
304 static void
305 isp_freeze_loopdown(ispsoftc_t *isp, int chan)
306 {
307 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
308 
309 	if (fc->sim == NULL)
310 		return;
311 	if (fc->simqfrozen == 0) {
312 		isp_prt(isp, ISP_LOGDEBUG0,
313 		    "Chan %d Freeze simq (loopdown)", chan);
314 		fc->simqfrozen = SIMQFRZ_LOOPDOWN;
315 		xpt_hold_boot();
316 		xpt_freeze_simq(fc->sim, 1);
317 	} else {
318 		isp_prt(isp, ISP_LOGDEBUG0,
319 		    "Chan %d Mark simq frozen (loopdown)", chan);
320 		fc->simqfrozen |= SIMQFRZ_LOOPDOWN;
321 	}
322 }
323 
324 static void
325 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
326 {
327 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
328 
329 	if (fc->sim == NULL)
330 		return;
331 	int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN;
332 	fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN;
333 	if (wasfrozen && fc->simqfrozen == 0) {
334 		isp_prt(isp, ISP_LOGDEBUG0,
335 		    "Chan %d Release simq", chan);
336 		xpt_release_simq(fc->sim, 1);
337 		xpt_release_boot();
338 	}
339 }
340 
341 /*
342  * Functions to protect from request queue overflow by freezing SIM queue.
343  * XXX: freezing only one arbitrary SIM, since they all share the queue.
344  */
345 static void
346 isp_rq_check_above(ispsoftc_t *isp)
347 {
348 	struct isp_fc *fc = ISP_FC_PC(isp, 0);
349 
350 	if (isp->isp_rqovf || fc->sim == NULL)
351 		return;
352 	if (!isp_rqentry_avail(isp, QENTRY_MAX)) {
353 		xpt_freeze_simq(fc->sim, 1);
354 		isp->isp_rqovf = 1;
355 	}
356 }
357 
358 static void
359 isp_rq_check_below(ispsoftc_t *isp)
360 {
361 	struct isp_fc *fc = ISP_FC_PC(isp, 0);
362 
363 	if (!isp->isp_rqovf || fc->sim == NULL)
364 		return;
365 	if (isp_rqentry_avail(isp, QENTRY_MAX)) {
366 		xpt_release_simq(fc->sim, 0);
367 		isp->isp_rqovf = 0;
368 	}
369 }
370 
371 static int
372 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
373 {
374 	ispsoftc_t *isp;
375 	int nr, chan, retval = ENOTTY;
376 
377 	isp = dev->si_drv1;
378 
379 	switch (c) {
380 	case ISP_SDBLEV:
381 	{
382 		int olddblev = isp->isp_dblev;
383 		isp->isp_dblev = *(int *)addr;
384 		*(int *)addr = olddblev;
385 		retval = 0;
386 		break;
387 	}
388 	case ISP_GETROLE:
389 		chan = *(int *)addr;
390 		if (chan < 0 || chan >= isp->isp_nchan) {
391 			retval = -ENXIO;
392 			break;
393 		}
394 		*(int *)addr = FCPARAM(isp, chan)->role;
395 		retval = 0;
396 		break;
397 	case ISP_SETROLE:
398 		nr = *(int *)addr;
399 		chan = nr >> 8;
400 		if (chan < 0 || chan >= isp->isp_nchan) {
401 			retval = -ENXIO;
402 			break;
403 		}
404 		nr &= 0xff;
405 		if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
406 			retval = EINVAL;
407 			break;
408 		}
409 		ISP_LOCK(isp);
410 		*(int *)addr = FCPARAM(isp, chan)->role;
411 		retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr);
412 		ISP_UNLOCK(isp);
413 		break;
414 
415 	case ISP_RESETHBA:
416 		ISP_LOCK(isp);
417 		isp_reinit(isp, 0);
418 		ISP_UNLOCK(isp);
419 		retval = 0;
420 		break;
421 
422 	case ISP_RESCAN:
423 		chan = *(intptr_t *)addr;
424 		if (chan < 0 || chan >= isp->isp_nchan) {
425 			retval = -ENXIO;
426 			break;
427 		}
428 		ISP_LOCK(isp);
429 		if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) {
430 			retval = EIO;
431 		} else {
432 			retval = 0;
433 		}
434 		ISP_UNLOCK(isp);
435 		break;
436 
437 	case ISP_FC_LIP:
438 		chan = *(intptr_t *)addr;
439 		if (chan < 0 || chan >= isp->isp_nchan) {
440 			retval = -ENXIO;
441 			break;
442 		}
443 		ISP_LOCK(isp);
444 		if (isp_control(isp, ISPCTL_SEND_LIP, chan)) {
445 			retval = EIO;
446 		} else {
447 			retval = 0;
448 		}
449 		ISP_UNLOCK(isp);
450 		break;
451 	case ISP_FC_GETDINFO:
452 	{
453 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
454 		fcportdb_t *lp;
455 
456 		if (ifc->loopid >= MAX_FC_TARG) {
457 			retval = EINVAL;
458 			break;
459 		}
460 		lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
461 		if (lp->state != FC_PORTDB_STATE_NIL) {
462 			ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT;
463 			ifc->loopid = lp->handle;
464 			ifc->portid = lp->portid;
465 			ifc->node_wwn = lp->node_wwn;
466 			ifc->port_wwn = lp->port_wwn;
467 			retval = 0;
468 		} else {
469 			retval = ENODEV;
470 		}
471 		break;
472 	}
473 	case ISP_FC_GETHINFO:
474 	{
475 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
476 		int chan = hba->fc_channel;
477 
478 		if (chan < 0 || chan >= isp->isp_nchan) {
479 			retval = ENXIO;
480 			break;
481 		}
482 		hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
483 		hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
484 		hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
485 		hba->fc_nchannels = isp->isp_nchan;
486 		hba->fc_nports = MAX_FC_TARG;
487 		hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed;
488 		hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1;
489 		hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid;
490 		hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram;
491 		hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram;
492 		hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn;
493 		hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn;
494 		retval = 0;
495 		break;
496 	}
497 	case ISP_TSK_MGMT:
498 	{
499 		int needmarker;
500 		struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
501 		uint16_t nphdl;
502 		isp24xx_tmf_t tmf;
503 		isp24xx_statusreq_t sp;
504 		fcparam *fcp;
505 		fcportdb_t *lp;
506 		int i;
507 
508 		chan = fct->chan;
509 		if (chan < 0 || chan >= isp->isp_nchan) {
510 			retval = -ENXIO;
511 			break;
512 		}
513 
514 		needmarker = retval = 0;
515 		nphdl = fct->loopid;
516 		ISP_LOCK(isp);
517 		fcp = FCPARAM(isp, chan);
518 
519 		for (i = 0; i < MAX_FC_TARG; i++) {
520 			lp = &fcp->portdb[i];
521 			if (lp->handle == nphdl) {
522 				break;
523 			}
524 		}
525 		if (i == MAX_FC_TARG) {
526 			retval = ENXIO;
527 			ISP_UNLOCK(isp);
528 			break;
529 		}
530 		ISP_MEMZERO(&tmf, sizeof(tmf));
531 		tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT;
532 		tmf.tmf_header.rqs_entry_count = 1;
533 		tmf.tmf_nphdl = lp->handle;
534 		tmf.tmf_delay = 2;
535 		tmf.tmf_timeout = 4;
536 		tmf.tmf_tidlo = lp->portid;
537 		tmf.tmf_tidhi = lp->portid >> 16;
538 		tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan);
539 		tmf.tmf_lun[1] = fct->lun & 0xff;
540 		if (fct->lun >= 256) {
541 			tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8);
542 		}
543 		switch (fct->action) {
544 		case IPT_CLEAR_ACA:
545 			tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA;
546 			break;
547 		case IPT_TARGET_RESET:
548 			tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET;
549 			needmarker = 1;
550 			break;
551 		case IPT_LUN_RESET:
552 			tmf.tmf_flags = ISP24XX_TMF_LUN_RESET;
553 			needmarker = 1;
554 			break;
555 		case IPT_CLEAR_TASK_SET:
556 			tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET;
557 			needmarker = 1;
558 			break;
559 		case IPT_ABORT_TASK_SET:
560 			tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET;
561 			needmarker = 1;
562 			break;
563 		default:
564 			retval = EINVAL;
565 			break;
566 		}
567 		if (retval) {
568 			ISP_UNLOCK(isp);
569 			break;
570 		}
571 
572 		retval = isp_exec_entry_queue(isp, &tmf, &sp, 5);
573 		if (retval != 0) {
574 			isp_prt(isp, ISP_LOGERR, "%s: TMF of chan %d error %d",
575 			    __func__, chan, retval);
576 			ISP_UNLOCK(isp);
577 			break;
578 		}
579 
580 		if (sp.req_completion_status != 0)
581 			retval = EIO;
582 		else if (needmarker)
583 			fcp->sendmarker = 1;
584 		ISP_UNLOCK(isp);
585 		break;
586 	}
587 	default:
588 		break;
589 	}
590 	return (retval);
591 }
592 
593 /*
594  * Local Inlines
595  */
596 
597 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *);
598 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *);
599 
600 static ISP_INLINE int
601 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb)
602 {
603 	ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free;
604 	if (ISP_PCMD(ccb) == NULL) {
605 		return (-1);
606 	}
607 	isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next;
608 	return (0);
609 }
610 
611 static ISP_INLINE void
612 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
613 {
614 	if (ISP_PCMD(ccb)) {
615 #ifdef	ISP_TARGET_MODE
616 		PISP_PCMD(ccb)->datalen = 0;
617 #endif
618 		PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free;
619 		isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb);
620 		ISP_PCMD(ccb) = NULL;
621 	}
622 }
623 
624 /*
625  * Put the target mode functions here, because some are inlines
626  */
627 #ifdef	ISP_TARGET_MODE
628 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
629 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t, void *);
630 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t);
631 static atio_private_data_t *isp_find_atpd_ccb(ispsoftc_t *, int, uint32_t, void *);
632 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *);
633 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int);
634 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t);
635 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *);
636 static tstate_t *create_lun_state(ispsoftc_t *, int, struct cam_path *);
637 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *);
638 static void isp_enable_lun(ispsoftc_t *, union ccb *);
639 static void isp_disable_lun(ispsoftc_t *, union ccb *);
640 static callout_func_t isp_refire_notify_ack;
641 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *);
642 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE };
643 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How);
644 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
645 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *);
646 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp);
647 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
648 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t);
649 
650 static ISP_INLINE tstate_t *
651 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
652 {
653 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
654 	tstate_t *tptr;
655 
656 	SLIST_FOREACH(tptr, &fc->lun_hash[LUN_HASH_FUNC(lun)], next) {
657 		if (tptr->ts_lun == lun)
658 			return (tptr);
659 	}
660 	return (NULL);
661 }
662 
663 static int
664 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr)
665 {
666 	inot_private_data_t *ntp;
667 	struct ntpdlist rq;
668 
669 	if (STAILQ_EMPTY(&tptr->restart_queue))
670 		return (0);
671 	STAILQ_INIT(&rq);
672 	STAILQ_CONCAT(&rq, &tptr->restart_queue);
673 	while ((ntp = STAILQ_FIRST(&rq)) != NULL) {
674 		STAILQ_REMOVE_HEAD(&rq, next);
675 		isp_prt(isp, ISP_LOGTDEBUG0,
676 		    "%s: restarting resrc deprived %x", __func__,
677 		    ((at7_entry_t *)ntp->data)->at_rxid);
678 		isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data);
679 		isp_put_ntpd(isp, bus, ntp);
680 		if (!STAILQ_EMPTY(&tptr->restart_queue))
681 			break;
682 	}
683 	if (!STAILQ_EMPTY(&rq)) {
684 		STAILQ_CONCAT(&rq, &tptr->restart_queue);
685 		STAILQ_CONCAT(&tptr->restart_queue, &rq);
686 	}
687 	return (!STAILQ_EMPTY(&tptr->restart_queue));
688 }
689 
690 static void
691 isp_tmcmd_restart(ispsoftc_t *isp)
692 {
693 	struct isp_fc *fc;
694 	tstate_t *tptr;
695 	union ccb *ccb;
696 	int bus, i;
697 
698 	for (bus = 0; bus < isp->isp_nchan; bus++) {
699 		fc = ISP_FC_PC(isp, bus);
700 		for (i = 0; i < LUN_HASH_SIZE; i++) {
701 			SLIST_FOREACH(tptr, &fc->lun_hash[i], next)
702 				isp_atio_restart(isp, bus, tptr);
703 		}
704 
705 		/*
706 		 * We only need to do this once per channel.
707 		 */
708 		ccb = (union ccb *)TAILQ_FIRST(&fc->waitq);
709 		if (ccb != NULL) {
710 			TAILQ_REMOVE(&fc->waitq, &ccb->ccb_h, sim_links.tqe);
711 			isp_target_start_ctio(isp, ccb, FROM_TIMER);
712 		}
713 	}
714 	isp_rq_check_above(isp);
715 	isp_rq_check_below(isp);
716 }
717 
718 static atio_private_data_t *
719 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag, void *ccb)
720 {
721 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
722 	atio_private_data_t *atp;
723 
724 	atp = LIST_FIRST(&fc->atfree);
725 	if (atp) {
726 		LIST_REMOVE(atp, next);
727 		atp->ccb = ccb;
728 		atp->tag = tag;
729 		LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next);
730 	}
731 	return (atp);
732 }
733 
734 static atio_private_data_t *
735 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
736 {
737 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
738 	atio_private_data_t *atp;
739 
740 	LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) {
741 		if (atp->tag == tag)
742 			return (atp);
743 	}
744 	return (NULL);
745 }
746 
747 /*
748  * Similar to above, but in addition to tag searches for opaque CCB pointer,
749  * It can be used in situations when the tag alone may already be reused.
750  */
751 static atio_private_data_t *
752 isp_find_atpd_ccb(ispsoftc_t *isp, int chan, uint32_t tag, void *ccb)
753 {
754 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
755 	atio_private_data_t *atp;
756 
757 	LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) {
758 		if (atp->tag == tag && atp->ccb == ccb)
759 			return (atp);
760 	}
761 	return (NULL);
762 }
763 
764 static void
765 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
766 {
767 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
768 
769 	if (atp->ests)
770 		isp_put_ecmd(isp, atp->ests);
771 	LIST_REMOVE(atp, next);
772 	memset(atp, 0, sizeof (*atp));
773 	LIST_INSERT_HEAD(&fc->atfree, atp, next);
774 }
775 
776 static void
777 isp_dump_atpd(ispsoftc_t *isp, int chan)
778 {
779 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
780 	atio_private_data_t *atp;
781 	const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" };
782 
783 	for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
784 		if (atp->state == ATPD_STATE_FREE)
785 			continue;
786 		isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s",
787 		    chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]);
788 	}
789 }
790 
791 static inot_private_data_t *
792 isp_get_ntpd(ispsoftc_t *isp, int chan)
793 {
794 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
795 	inot_private_data_t *ntp;
796 
797 	ntp = STAILQ_FIRST(&fc->ntfree);
798 	if (ntp)
799 		STAILQ_REMOVE_HEAD(&fc->ntfree, next);
800 	return (ntp);
801 }
802 
803 static inot_private_data_t *
804 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id)
805 {
806 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
807 	inot_private_data_t *ntp;
808 
809 	for (ntp = fc->ntpool; ntp < &fc->ntpool[ATPDPSIZE]; ntp++) {
810 		if (ntp->tag_id == tag_id && ntp->seq_id == seq_id)
811 			return (ntp);
812 	}
813 	return (NULL);
814 }
815 
816 static void
817 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp)
818 {
819 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
820 
821 	ntp->tag_id = ntp->seq_id = 0;
822 	STAILQ_INSERT_HEAD(&fc->ntfree, ntp, next);
823 }
824 
825 tstate_t *
826 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path)
827 {
828 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
829 	lun_id_t lun;
830 	tstate_t *tptr;
831 
832 	lun = xpt_path_lun_id(path);
833 	tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
834 	if (tptr == NULL)
835 		return (NULL);
836 	tptr->ts_lun = lun;
837 	SLIST_INIT(&tptr->atios);
838 	SLIST_INIT(&tptr->inots);
839 	STAILQ_INIT(&tptr->restart_queue);
840 	SLIST_INSERT_HEAD(&fc->lun_hash[LUN_HASH_FUNC(lun)], tptr, next);
841 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n");
842 	return (tptr);
843 }
844 
845 static void
846 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr)
847 {
848 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
849 	union ccb *ccb;
850 	inot_private_data_t *ntp;
851 
852 	while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) {
853 		SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
854 		ccb->ccb_h.status = CAM_REQ_ABORTED;
855 		xpt_done(ccb);
856 	};
857 	while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) {
858 		SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
859 		ccb->ccb_h.status = CAM_REQ_ABORTED;
860 		xpt_done(ccb);
861 	}
862 	while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) {
863 		isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0);
864 		STAILQ_REMOVE_HEAD(&tptr->restart_queue, next);
865 		isp_put_ntpd(isp, bus, ntp);
866 	}
867 	SLIST_REMOVE(&fc->lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], tptr, tstate, next);
868 	free(tptr, M_DEVBUF);
869 }
870 
871 static void
872 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb)
873 {
874 	tstate_t *tptr;
875 	int bus = XS_CHANNEL(ccb);
876 	target_id_t target = ccb->ccb_h.target_id;
877 	lun_id_t lun = ccb->ccb_h.target_lun;
878 
879 	/*
880 	 * We only support either target and lun both wildcard
881 	 * or target and lun both non-wildcard.
882 	 */
883 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
884 	    "enabling lun %jx\n", (uintmax_t)lun);
885 	if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
886 		ccb->ccb_h.status = CAM_LUN_INVALID;
887 		xpt_done(ccb);
888 		return;
889 	}
890 
891 	/* Create the state pointer. It should not already exist. */
892 	tptr = get_lun_statep(isp, bus, lun);
893 	if (tptr) {
894 		ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
895 		xpt_done(ccb);
896 		return;
897 	}
898 	tptr = create_lun_state(isp, bus, ccb->ccb_h.path);
899 	if (tptr == NULL) {
900 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
901 		xpt_done(ccb);
902 		return;
903 	}
904 
905 	ccb->ccb_h.status = CAM_REQ_CMP;
906 	xpt_done(ccb);
907 }
908 
909 static void
910 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb)
911 {
912 	tstate_t *tptr;
913 	int bus = XS_CHANNEL(ccb);
914 	target_id_t target = ccb->ccb_h.target_id;
915 	lun_id_t lun = ccb->ccb_h.target_lun;
916 
917 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
918 	    "disabling lun %jx\n", (uintmax_t)lun);
919 	if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
920 		ccb->ccb_h.status = CAM_LUN_INVALID;
921 		xpt_done(ccb);
922 		return;
923 	}
924 
925 	/* Find the state pointer. */
926 	if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) {
927 		ccb->ccb_h.status = CAM_PATH_INVALID;
928 		xpt_done(ccb);
929 		return;
930 	}
931 
932 	destroy_lun_state(isp, bus, tptr);
933 	ccb->ccb_h.status = CAM_REQ_CMP;
934 	xpt_done(ccb);
935 }
936 
937 static void
938 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
939 {
940 	int fctape, sendstatus, resid;
941 	fcparam *fcp;
942 	atio_private_data_t *atp;
943 	struct ccb_scsiio *cso;
944 	struct isp_ccbq *waitq;
945 	uint32_t dmaresult, handle, xfrlen, sense_length, tmp;
946 	ct7_entry_t local, *cto = &local;
947 
948 	isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len,
949 	    (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0));
950 
951 	waitq = &ISP_FC_PC(isp, XS_CHANNEL(ccb))->waitq;
952 	switch (how) {
953 	case FROM_CAM:
954 		/*
955 		 * Insert at the tail of the list, if any, waiting CTIO CCBs
956 		 */
957 		TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe);
958 		break;
959 	case FROM_TIMER:
960 	case FROM_SRR:
961 	case FROM_CTIO_DONE:
962 		TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
963 		break;
964 	}
965 
966 	while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) {
967 		TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe);
968 
969 		cso = &ccb->csio;
970 		xfrlen = cso->dxfer_len;
971 		if (xfrlen == 0) {
972 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
973 				ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n");
974 				ccb->ccb_h.status = CAM_REQ_INVALID;
975 				xpt_done(ccb);
976 				continue;
977 			}
978 		}
979 
980 		atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id);
981 		if (atp == NULL) {
982 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__);
983 			isp_dump_atpd(isp, XS_CHANNEL(ccb));
984 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
985 			xpt_done(ccb);
986 			continue;
987 		}
988 
989 		/*
990 		 * Is this command a dead duck?
991 		 */
992 		if (atp->dead) {
993 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id);
994 			ccb->ccb_h.status = CAM_REQ_ABORTED;
995 			xpt_done(ccb);
996 			continue;
997 		}
998 
999 		/*
1000 		 * Check to make sure we're still in target mode.
1001 		 */
1002 		fcp = FCPARAM(isp, XS_CHANNEL(ccb));
1003 		if ((fcp->role & ISP_ROLE_TARGET) == 0) {
1004 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id);
1005 			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1006 			xpt_done(ccb);
1007 			continue;
1008 		}
1009 
1010 		/*
1011 		 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which
1012 		 * could be split into two CTIOs to split data and status).
1013 		 */
1014 		if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) {
1015 			isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags);
1016 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1017 			break;
1018 		}
1019 
1020 		/*
1021 		 * Does the initiator expect FC-Tape style responses?
1022 		 */
1023 		if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) {
1024 			fctape = 1;
1025 		} else {
1026 			fctape = 0;
1027 		}
1028 
1029 		/*
1030 		 * If we already did the data xfer portion of a CTIO that sends data
1031 		 * and status, don't do it again and do the status portion now.
1032 		 */
1033 		if (atp->sendst) {
1034 			isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
1035 			    cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit);
1036 			xfrlen = 0;	/* we already did the data transfer */
1037 			atp->sendst = 0;
1038 		}
1039 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1040 			sendstatus = 1;
1041 		} else {
1042 			sendstatus = 0;
1043 		}
1044 
1045 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1046 			KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?"));
1047 			/*
1048 			 * Sense length is not the entire sense data structure size. Periph
1049 			 * drivers don't seem to be setting sense_len to reflect the actual
1050 			 * size. We'll peek inside to get the right amount.
1051 			 */
1052 			sense_length = cso->sense_len;
1053 
1054 			/*
1055 			 * This 'cannot' happen
1056 			 */
1057 			if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) {
1058 				sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE;
1059 			}
1060 		} else {
1061 			sense_length = 0;
1062 		}
1063 
1064 		/*
1065 		 * Check for overflow
1066 		 */
1067 		tmp = atp->bytes_xfered + atp->bytes_in_transit;
1068 		if (xfrlen > 0 && tmp > atp->orig_datalen) {
1069 			isp_prt(isp, ISP_LOGERR,
1070 			    "%s: [0x%x] data overflow by %u bytes", __func__,
1071 			    cso->tag_id, tmp + xfrlen - atp->orig_datalen);
1072 			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1073 			xpt_done(ccb);
1074 			continue;
1075 		}
1076 		if (xfrlen > atp->orig_datalen - tmp) {
1077 			xfrlen = atp->orig_datalen - tmp;
1078 			if (xfrlen == 0 && !sendstatus) {
1079 				cso->resid = cso->dxfer_len;
1080 				ccb->ccb_h.status = CAM_REQ_CMP;
1081 				xpt_done(ccb);
1082 				continue;
1083 			}
1084 		}
1085 
1086 		memset(cto, 0, QENTRY_LEN);
1087 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1088 		cto->ct_header.rqs_entry_count = 1;
1089 		cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM;
1090 		ATPD_SET_SEQNO(cto, atp);
1091 		cto->ct_nphdl = atp->nphdl;
1092 		cto->ct_rxid = atp->tag;
1093 		cto->ct_iid_lo = atp->sid;
1094 		cto->ct_iid_hi = atp->sid >> 16;
1095 		cto->ct_oxid = atp->oxid;
1096 		cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb));
1097 		cto->ct_timeout = XS_TIME(ccb);
1098 		cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT;
1099 
1100 		/*
1101 		 * Mode 1, status, no data. Only possible when we are sending status, have
1102 		 * no data to transfer, and any sense data can fit into a ct7_entry_t.
1103 		 *
1104 		 * Mode 2, status, no data. We have to use this in the case that
1105 		 * the sense data won't fit into a ct7_entry_t.
1106 		 *
1107 		 */
1108 		if (sendstatus && xfrlen == 0) {
1109 			cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA;
1110 			resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit;
1111 			if (sense_length <= MAXRESPLEN_24XX) {
1112 				cto->ct_flags |= CT7_FLAG_MODE1;
1113 				cto->ct_scsi_status = cso->scsi_status;
1114 				if (resid < 0) {
1115 					cto->ct_resid = -resid;
1116 					cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8);
1117 				} else if (resid > 0) {
1118 					cto->ct_resid = resid;
1119 					cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8);
1120 				}
1121 				if (fctape) {
1122 					cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1123 				}
1124 				if (sense_length) {
1125 					cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
1126 					cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length;
1127 					memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length);
1128 				}
1129 			} else {
1130 				bus_addr_t addr;
1131 				fcp_rsp_iu_t rp;
1132 
1133 				if (atp->ests == NULL) {
1134 					atp->ests = isp_get_ecmd(isp);
1135 					if (atp->ests == NULL) {
1136 						TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1137 						break;
1138 					}
1139 				}
1140 				memset(&rp, 0, sizeof(rp));
1141 				if (fctape) {
1142 					cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1143 					rp.fcp_rsp_bits |= FCP_CONF_REQ;
1144 				}
1145 				cto->ct_flags |= CT7_FLAG_MODE2;
1146 				rp.fcp_rsp_scsi_status = cso->scsi_status;
1147 				if (resid < 0) {
1148 					rp.fcp_rsp_resid = -resid;
1149 					rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW;
1150 				} else if (resid > 0) {
1151 					rp.fcp_rsp_resid = resid;
1152 					rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW;
1153 				}
1154 				if (sense_length) {
1155 					rp.fcp_rsp_snslen = sense_length;
1156 					cto->ct_senselen = sense_length;
1157 					rp.fcp_rsp_bits |= FCP_SNSLEN_VALID;
1158 					isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1159 					memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length);
1160 				} else {
1161 					isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1162 				}
1163 				if (isp->isp_dblev & ISP_LOGTDEBUG1) {
1164 					isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests);
1165 				}
1166 				bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE);
1167 				addr = isp->isp_osinfo.ecmd_dma;
1168 				addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE);
1169 				isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests,
1170 				    (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length);
1171 				cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length;
1172 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr);
1173 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr);
1174 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
1175 			}
1176 			if (sense_length) {
1177 				isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__,
1178 				    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length,
1179 				    cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]);
1180 			} else {
1181 				isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__,
1182 				    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid);
1183 			}
1184 			atp->state = ATPD_STATE_LAST_CTIO;
1185 		}
1186 
1187 		/*
1188 		 * Mode 0 data transfers, *possibly* with status.
1189 		 */
1190 		if (xfrlen != 0) {
1191 			cto->ct_flags |= CT7_FLAG_MODE0;
1192 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1193 				cto->ct_flags |= CT7_DATA_IN;
1194 			} else {
1195 				cto->ct_flags |= CT7_DATA_OUT;
1196 			}
1197 
1198 			cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit;
1199 			cto->rsp.m0.ct_xfrlen = xfrlen;
1200 
1201 #ifdef	DEBUG
1202 			if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) {
1203 				isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2));
1204 				ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0;
1205 				cto->rsp.m0.ct_xfrlen -= xfrlen >> 2;
1206 			}
1207 #endif
1208 			if (sendstatus) {
1209 				resid = atp->orig_datalen - atp->bytes_xfered - xfrlen;
1210 				if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) {
1211 					cto->ct_flags |= CT7_SENDSTATUS;
1212 					atp->state = ATPD_STATE_LAST_CTIO;
1213 					if (fctape) {
1214 						cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1215 					}
1216 				} else {
1217 					atp->sendst = 1;	/* send status later */
1218 					cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM;
1219 					atp->state = ATPD_STATE_CTIO;
1220 				}
1221 			} else {
1222 				atp->state = ATPD_STATE_CTIO;
1223 			}
1224 			isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__,
1225 			    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered);
1226 		}
1227 
1228 		if (isp_get_pcmd(isp, ccb)) {
1229 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n");
1230 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1231 			break;
1232 		}
1233 		handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET);
1234 		if (handle == 0) {
1235 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
1236 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1237 			isp_free_pcmd(isp, ccb);
1238 			break;
1239 		}
1240 		atp->bytes_in_transit += xfrlen;
1241 		PISP_PCMD(ccb)->datalen = xfrlen;
1242 
1243 		/*
1244 		 * Call the dma setup routines for this entry (and any subsequent
1245 		 * CTIOs) if there's data to move, and then tell the f/w it's got
1246 		 * new things to play with. As with isp_start's usage of DMA setup,
1247 		 * any swizzling is done in the machine dependent layer. Because
1248 		 * of this, we put the request onto the queue area first in native
1249 		 * format.
1250 		 */
1251 		cto->ct_syshandle = handle;
1252 		dmaresult = ISP_DMASETUP(isp, cso, cto);
1253 		if (dmaresult != 0) {
1254 			isp_destroy_handle(isp, handle);
1255 			isp_free_pcmd(isp, ccb);
1256 			if (dmaresult == CMD_EAGAIN) {
1257 				TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1258 				break;
1259 			}
1260 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1261 			xpt_done(ccb);
1262 			continue;
1263 		}
1264 		ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
1265 		if (xfrlen) {
1266 			ccb->ccb_h.spriv_field0 = atp->bytes_xfered;
1267 		} else {
1268 			ccb->ccb_h.spriv_field0 = ~0;
1269 		}
1270 		atp->ctcnt++;
1271 		atp->seqno++;
1272 	}
1273 }
1274 
1275 static void
1276 isp_refire_notify_ack(void *arg)
1277 {
1278 	isp_tna_t *tp  = arg;
1279 	ispsoftc_t *isp = tp->isp;
1280 
1281 	ISP_ASSERT_LOCKED(isp);
1282 	if (isp_notify_ack(isp, tp->not)) {
1283 		callout_schedule(&tp->timer, 5);
1284 	} else {
1285 		free(tp, M_DEVBUF);
1286 	}
1287 }
1288 
1289 
1290 static void
1291 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb)
1292 {
1293 
1294 	isp_rq_check_below(isp);
1295 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1296 	xpt_done(ccb);
1297 }
1298 
1299 static void
1300 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
1301 {
1302 	int cdbxlen;
1303 	lun_id_t lun;
1304 	uint16_t chan, nphdl = NIL_HANDLE;
1305 	uint32_t did, sid;
1306 	fcportdb_t *lp;
1307 	tstate_t *tptr;
1308 	struct ccb_accept_tio *atiop;
1309 	atio_private_data_t *atp = NULL;
1310 	atio_private_data_t *oatp;
1311 	inot_private_data_t *ntp;
1312 
1313 	did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
1314 	sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1315 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun));
1316 
1317 	if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
1318 		/* Channel has to be derived from D_ID */
1319 		isp_find_chan_by_did(isp, did, &chan);
1320 		if (chan == ISP_NOCHAN) {
1321 			isp_prt(isp, ISP_LOGWARN,
1322 			    "%s: [RX_ID 0x%x] D_ID %x not found on any channel",
1323 			    __func__, aep->at_rxid, did);
1324 			isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN,
1325 			    ECMD_TERMINATE, 0);
1326 			return;
1327 		}
1328 	} else {
1329 		chan = 0;
1330 	}
1331 
1332 	/*
1333 	 * Find the PDB entry for this initiator
1334 	 */
1335 	if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) {
1336 		/*
1337 		 * If we're not in the port database terminate the exchange.
1338 		 */
1339 		isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already",
1340 		    __func__, aep->at_rxid, did, chan, sid);
1341 		isp_dump_portdb(isp, chan);
1342 		isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0);
1343 		return;
1344 	}
1345 	nphdl = lp->handle;
1346 
1347 	/*
1348 	 * Get the tstate pointer
1349 	 */
1350 	tptr = get_lun_statep(isp, chan, lun);
1351 	if (tptr == NULL) {
1352 		tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD);
1353 		if (tptr == NULL) {
1354 			isp_prt(isp, ISP_LOGWARN,
1355 			    "%s: [0x%x] no state pointer for lun %jx or wildcard",
1356 			    __func__, aep->at_rxid, (uintmax_t)lun);
1357 			if (lun == 0) {
1358 				isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1359 			} else {
1360 				isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
1361 			}
1362 			return;
1363 		}
1364 	}
1365 
1366 	/*
1367 	 * Start any commands pending resources first.
1368 	 */
1369 	if (isp_atio_restart(isp, chan, tptr))
1370 		goto noresrc;
1371 
1372 	/*
1373 	 * If the f/w is out of resources, just send a BUSY status back.
1374 	 */
1375 	if (aep->at_rxid == AT7_NORESRC_RXID) {
1376 		isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1377 		return;
1378 	}
1379 
1380 	/*
1381 	 * If we're out of resources, just send a BUSY status back.
1382 	 */
1383 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1384 	if (atiop == NULL) {
1385 		isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid);
1386 		goto noresrc;
1387 	}
1388 
1389 	oatp = isp_find_atpd(isp, chan, aep->at_rxid);
1390 	if (oatp) {
1391 		isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 :
1392 		    ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle "
1393 		    "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d",
1394 		    aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state);
1395 		/*
1396 		 * It's not a "no resource" condition- but we can treat it like one
1397 		 */
1398 		goto noresrc;
1399 	}
1400 	atp = isp_get_atpd(isp, chan, aep->at_rxid, atiop);
1401 	if (atp == NULL) {
1402 		isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
1403 		isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1404 		return;
1405 	}
1406 	atp->word3 = lp->prli_word3;
1407 	atp->state = ATPD_STATE_ATIO;
1408 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1409 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n");
1410 	atiop->init_id = FC_PORTDB_TGT(isp, chan, lp);
1411 	atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1412 	atiop->ccb_h.target_lun = lun;
1413 	atiop->sense_len = 0;
1414 	cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT;
1415 	if (cdbxlen) {
1416 		isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored");
1417 	}
1418 	cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb);
1419 	ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen);
1420 	atiop->cdb_len = cdbxlen;
1421 	atiop->ccb_h.status = CAM_CDB_RECVD;
1422 	atiop->tag_id = atp->tag;
1423 	switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) {
1424 	case FCP_CMND_TASK_ATTR_SIMPLE:
1425 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1426 		atiop->tag_action = MSG_SIMPLE_TASK;
1427 		break;
1428 	case FCP_CMND_TASK_ATTR_HEAD:
1429 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1430 		atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK;
1431 		break;
1432 	case FCP_CMND_TASK_ATTR_ORDERED:
1433 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1434 		atiop->tag_action = MSG_ORDERED_TASK;
1435 		break;
1436 	case FCP_CMND_TASK_ATTR_ACA:
1437 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1438 		atiop->tag_action = MSG_ACA_TASK;
1439 		break;
1440 	case FCP_CMND_TASK_ATTR_UNTAGGED:
1441 	default:
1442 		atiop->tag_action = 0;
1443 		break;
1444 	}
1445 	atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute &
1446 	    FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT;
1447 	atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl;
1448 	atp->bytes_xfered = 0;
1449 	atp->lun = lun;
1450 	atp->nphdl = nphdl;
1451 	atp->sid = sid;
1452 	atp->did = did;
1453 	atp->oxid = aep->at_hdr.ox_id;
1454 	atp->rxid = aep->at_hdr.rx_id;
1455 	atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
1456 	atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK;
1457 	atp->state = ATPD_STATE_CAM;
1458 	isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u",
1459 	    aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen);
1460 	xpt_done((union ccb *)atiop);
1461 	return;
1462 noresrc:
1463 	KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__));
1464 	ntp = isp_get_ntpd(isp, chan);
1465 	if (ntp == NULL) {
1466 		isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1467 		return;
1468 	}
1469 	memcpy(ntp->data, aep, QENTRY_LEN);
1470 	STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next);
1471 }
1472 
1473 
1474 /*
1475  * Handle starting an SRR (sequence retransmit request)
1476  * We get here when we've gotten the immediate notify
1477  * and the return of all outstanding CTIOs for this
1478  * transaction.
1479  */
1480 static void
1481 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp)
1482 {
1483 	in_fcentry_24xx_t *inot;
1484 	uint32_t srr_off, ccb_off, ccb_len, ccb_end;
1485 	union ccb *ccb;
1486 
1487 	inot = (in_fcentry_24xx_t *)atp->srr;
1488 	srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16);
1489 	ccb = atp->srr_ccb;
1490 	atp->srr_ccb = NULL;
1491 	atp->nsrr++;
1492 	if (ccb == NULL) {
1493 		isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag);
1494 		goto fail;
1495 	}
1496 
1497 	ccb_off = ccb->ccb_h.spriv_field0;
1498 	ccb_len = ccb->csio.dxfer_len;
1499         ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len;
1500 
1501 	switch (inot->in_srr_iu) {
1502 	case R_CTL_INFO_SOLICITED_DATA:
1503 		/*
1504 		 * We have to restart a FCP_DATA data out transaction
1505 		 */
1506 		atp->sendst = 0;
1507 		atp->bytes_xfered = srr_off;
1508 		if (ccb_len == 0) {
1509 			isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off);
1510 			goto mdp;
1511 		}
1512  		if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) {
1513 			isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1514 			goto mdp;
1515 		}
1516 		isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1517 		break;
1518 	case R_CTL_INFO_COMMAND_STATUS:
1519 		isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag);
1520 		atp->sendst = 1;
1521 		/*
1522 		 * We have to restart a FCP_RSP IU transaction
1523 		 */
1524 		break;
1525 	case R_CTL_INFO_DATA_DESCRIPTOR:
1526 		/*
1527 		 * We have to restart an FCP DATA in transaction
1528 		 */
1529 		isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping");
1530 		goto fail;
1531 
1532 	default:
1533 		isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu);
1534 		goto fail;
1535 	}
1536 
1537 	/*
1538 	 * We can't do anything until this is acked, so we might as well start it now.
1539 	 * We aren't going to do the usual asynchronous ack issue because we need
1540 	 * to make sure this gets on the wire first.
1541 	 */
1542 	if (isp_notify_ack(isp, inot)) {
1543 		isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1544 		goto fail;
1545 	}
1546 	isp_target_start_ctio(isp, ccb, FROM_SRR);
1547 	return;
1548 fail:
1549 	inot->in_reserved = 1;
1550 	isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1551 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1552 	ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1553 	isp_complete_ctio(isp, ccb);
1554 	return;
1555 mdp:
1556 	if (isp_notify_ack(isp, inot)) {
1557 		isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1558 		goto fail;
1559 	}
1560 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1561 	ccb->ccb_h.status |= CAM_MESSAGE_RECV;
1562 	/*
1563 	 * This is not a strict interpretation of MDP, but it's close
1564 	 */
1565 	ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16];
1566 	ccb->csio.msg_len = 7;
1567 	ccb->csio.msg_ptr[0] = MSG_EXTENDED;
1568 	ccb->csio.msg_ptr[1] = 5;
1569 	ccb->csio.msg_ptr[2] = 0;	/* modify data pointer */
1570 	ccb->csio.msg_ptr[3] = srr_off >> 24;
1571 	ccb->csio.msg_ptr[4] = srr_off >> 16;
1572 	ccb->csio.msg_ptr[5] = srr_off >> 8;
1573 	ccb->csio.msg_ptr[6] = srr_off;
1574 	isp_complete_ctio(isp, ccb);
1575 }
1576 
1577 
1578 static void
1579 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify)
1580 {
1581 	in_fcentry_24xx_t *inot = notify->nt_lreserved;
1582 	atio_private_data_t *atp;
1583 	uint32_t tag = notify->nt_tagval & 0xffffffff;
1584 
1585 	atp = isp_find_atpd(isp, notify->nt_channel, tag);
1586 	if (atp == NULL) {
1587 		isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify",
1588 		    __func__, tag);
1589 		isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1590 		return;
1591 	}
1592 	atp->srr_notify_rcvd = 1;
1593 	memcpy(atp->srr, inot, sizeof (atp->srr));
1594 	isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x",
1595 	    inot->in_rxid, inot->in_flags, inot->in_srr_iu,
1596 	    ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo);
1597 	if (atp->srr_ccb)
1598 		isp_handle_srr_start(isp, atp);
1599 }
1600 
1601 static void
1602 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct)
1603 {
1604 	union ccb *ccb;
1605 	int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0;
1606 	atio_private_data_t *atp = NULL;
1607 	int bus;
1608 	uint32_t handle, data_requested, resid;
1609 
1610 	handle = ct->ct_syshandle;
1611 	ccb = isp_find_xs(isp, handle);
1612 	if (ccb == NULL) {
1613 		isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct);
1614 		return;
1615 	}
1616 	isp_destroy_handle(isp, handle);
1617 	resid = data_requested = PISP_PCMD(ccb)->datalen;
1618 	isp_free_pcmd(isp, ccb);
1619 
1620 	bus = XS_CHANNEL(ccb);
1621 	atp = isp_find_atpd(isp, bus, ct->ct_rxid);
1622 	if (atp == NULL) {
1623 		/*
1624 		 * XXX: isp_clear_commands() generates fake CTIO with zero
1625 		 * ct_rxid value, filling only ct_syshandle.  Workaround
1626 		 * that using tag_id from the CCB, pointed by ct_syshandle.
1627 		 */
1628 		atp = isp_find_atpd(isp, bus, ccb->csio.tag_id);
1629 	}
1630 	if (atp == NULL) {
1631 		isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id);
1632 		return;
1633 	}
1634 	KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero"));
1635 	atp->bytes_in_transit -= data_requested;
1636 	atp->ctcnt -= 1;
1637 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1638 
1639 	if (ct->ct_nphdl == CT7_SRR) {
1640 		atp->srr_ccb = ccb;
1641 		if (atp->srr_notify_rcvd)
1642 			isp_handle_srr_start(isp, atp);
1643 		return;
1644 	}
1645 	if (ct->ct_nphdl == CT_HBA_RESET) {
1646 		sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1647 		    (atp->sendst == 0);
1648 		failure = CAM_UNREC_HBA_ERROR;
1649 	} else {
1650 		sentstatus = ct->ct_flags & CT7_SENDSTATUS;
1651 		ok = (ct->ct_nphdl == CT7_OK);
1652 		notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0;
1653 		if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA)
1654 			resid = ct->ct_resid;
1655 	}
1656 	isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct),
1657 	   notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
1658 	if (ok) {
1659 		if (data_requested > 0) {
1660 			atp->bytes_xfered += data_requested - resid;
1661 			ccb->csio.resid = ccb->csio.dxfer_len -
1662 			    (data_requested - resid);
1663 		}
1664 		if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE))
1665 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1666 		ccb->ccb_h.status |= CAM_REQ_CMP;
1667 	} else {
1668 		notify_cam = 1;
1669 		if (failure == CAM_UNREC_HBA_ERROR)
1670 			ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
1671 		else
1672 			ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1673 	}
1674 	atp->state = ATPD_STATE_PDON;
1675 
1676 	/*
1677 	 * We never *not* notify CAM when there has been any error (ok == 0),
1678 	 * so we never need to do an ATIO putback if we're not notifying CAM.
1679 	 */
1680 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)",
1681 	    (sentstatus)? "  FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0);
1682 	if (notify_cam == 0) {
1683 		if (atp->sendst) {
1684 			isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE);
1685 		}
1686 		return;
1687 	}
1688 
1689 	/*
1690 	 * We are done with this ATIO if we successfully sent status.
1691 	 * In all other cases expect either another CTIO or XPT_ABORT.
1692 	 */
1693 	if (ok && sentstatus)
1694 		isp_put_atpd(isp, bus, atp);
1695 
1696 	/*
1697 	 * We're telling CAM we're done with this CTIO transaction.
1698 	 *
1699 	 * 24XX cards never need an ATIO put back.
1700 	 */
1701 	isp_complete_ctio(isp, ccb);
1702 }
1703 
1704 static int
1705 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp)
1706 {
1707 	ct7_entry_t local, *cto = &local;
1708 
1709 	if (isp->isp_state != ISP_RUNSTATE) {
1710 		isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL);
1711 		return (0);
1712 	}
1713 
1714 	/*
1715 	 * This case is for a Task Management Function, which shows up as an ATIO7 entry.
1716 	 */
1717 	if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) {
1718 		at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved;
1719 		fcportdb_t *lp;
1720 		uint32_t sid;
1721 		uint16_t nphdl;
1722 
1723 		sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1724 		if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) {
1725 			nphdl = lp->handle;
1726 		} else {
1727 			nphdl = NIL_HANDLE;
1728 		}
1729 		ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1730 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1731 		cto->ct_header.rqs_entry_count = 1;
1732 		cto->ct_nphdl = nphdl;
1733 		cto->ct_rxid = aep->at_rxid;
1734 		cto->ct_vpidx = mp->nt_channel;
1735 		cto->ct_iid_lo = sid;
1736 		cto->ct_iid_hi = sid >> 16;
1737 		cto->ct_oxid = aep->at_hdr.ox_id;
1738 		cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
1739 		cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
1740 		if (rsp != 0) {
1741 			cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
1742 			cto->rsp.m1.ct_resplen = 4;
1743 			ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
1744 			cto->rsp.m1.ct_resp[0] = rsp & 0xff;
1745 			cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff;
1746 			cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff;
1747 			cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff;
1748 		}
1749 		return (isp_send_entry(isp, cto));
1750 	}
1751 
1752 	/*
1753 	 * This case is for a responding to an ABTS frame
1754 	 */
1755 	if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD)
1756 		return (isp_acknak_abts(isp, mp->nt_lreserved, (rsp == 0) ? 0 : EINVAL));
1757 
1758 	/*
1759 	 * General purpose acknowledgement
1760 	 */
1761 	if (mp->nt_need_ack) {
1762 		isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL);
1763 		/*
1764 		 * Don't need to use the guaranteed send because the caller can retry
1765 		 */
1766 		return (isp_notify_ack(isp, mp->nt_lreserved));
1767 	}
1768 	return (0);
1769 }
1770 
1771 /*
1772  * Handle task management functions.
1773  *
1774  * We show up here with a notify structure filled out.
1775  *
1776  * The nt_lreserved tag points to the original queue entry
1777  */
1778 static void
1779 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
1780 {
1781 	tstate_t *tptr;
1782 	fcportdb_t *lp;
1783 	struct ccb_immediate_notify *inot;
1784 	inot_private_data_t *ntp = NULL;
1785 	atio_private_data_t *atp;
1786 	lun_id_t lun;
1787 
1788 	isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid  0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode,
1789 	    notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun);
1790 	if (notify->nt_lun == LUN_ANY) {
1791 		if (notify->nt_tagval == TAG_ANY) {
1792 			lun = CAM_LUN_WILDCARD;
1793 		} else {
1794 			atp = isp_find_atpd(isp, notify->nt_channel,
1795 			    notify->nt_tagval & 0xffffffff);
1796 			lun = atp ? atp->lun : CAM_LUN_WILDCARD;
1797 		}
1798 	} else {
1799 		lun = notify->nt_lun;
1800 	}
1801 	tptr = get_lun_statep(isp, notify->nt_channel, lun);
1802 	if (tptr == NULL) {
1803 		tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD);
1804 		if (tptr == NULL) {
1805 			isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1806 			goto bad;
1807 		}
1808 	}
1809 	inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
1810 	if (inot == NULL) {
1811 		isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1812 		goto bad;
1813 	}
1814 
1815 	inot->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1816 	inot->ccb_h.target_lun = lun;
1817 	if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 &&
1818 	    isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) {
1819 		inot->initiator_id = CAM_TARGET_WILDCARD;
1820 	} else {
1821 		inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp);
1822 	}
1823 	inot->seq_id = notify->nt_tagval;
1824 	inot->tag_id = notify->nt_tagval >> 32;
1825 
1826 	switch (notify->nt_ncode) {
1827 	case NT_ABORT_TASK:
1828 		isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id);
1829 		inot->arg = MSG_ABORT_TASK;
1830 		break;
1831 	case NT_ABORT_TASK_SET:
1832 		isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY);
1833 		inot->arg = MSG_ABORT_TASK_SET;
1834 		break;
1835 	case NT_CLEAR_ACA:
1836 		inot->arg = MSG_CLEAR_ACA;
1837 		break;
1838 	case NT_CLEAR_TASK_SET:
1839 		inot->arg = MSG_CLEAR_TASK_SET;
1840 		break;
1841 	case NT_LUN_RESET:
1842 		inot->arg = MSG_LOGICAL_UNIT_RESET;
1843 		break;
1844 	case NT_TARGET_RESET:
1845 		inot->arg = MSG_TARGET_RESET;
1846 		break;
1847 	case NT_QUERY_TASK_SET:
1848 		inot->arg = MSG_QUERY_TASK_SET;
1849 		break;
1850 	case NT_QUERY_ASYNC_EVENT:
1851 		inot->arg = MSG_QUERY_ASYNC_EVENT;
1852 		break;
1853 	default:
1854 		isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun);
1855 		goto bad;
1856 	}
1857 
1858 	ntp = isp_get_ntpd(isp, notify->nt_channel);
1859 	if (ntp == NULL) {
1860 		isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__);
1861 		goto bad;
1862 	}
1863 	ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t));
1864 	if (notify->nt_lreserved) {
1865 		ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN);
1866 		ntp->nt.nt_lreserved = &ntp->data;
1867 	}
1868 	ntp->seq_id = notify->nt_tagval;
1869 	ntp->tag_id = notify->nt_tagval >> 32;
1870 
1871 	SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
1872 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n");
1873 	inot->ccb_h.status = CAM_MESSAGE_RECV;
1874 	xpt_done((union ccb *)inot);
1875 	return;
1876 bad:
1877 	if (notify->nt_need_ack) {
1878 		if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1879 			if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) {
1880 				isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK");
1881 			}
1882 		} else {
1883 			isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved);
1884 		}
1885 	}
1886 }
1887 
1888 /*
1889  * Clean aborted commands pending restart
1890  */
1891 static void
1892 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id)
1893 {
1894 	inot_private_data_t *ntp, *tmp;
1895 	uint32_t this_tag_id;
1896 
1897 	STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) {
1898 		this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid;
1899 		if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
1900 			STAILQ_REMOVE(&tptr->restart_queue, ntp,
1901 			    inot_private_data, next);
1902 			isp_endcmd(isp, ntp->data, NIL_HANDLE, chan,
1903 			    ECMD_TERMINATE, 0);
1904 			isp_put_ntpd(isp, chan, ntp);
1905 		}
1906 	}
1907 }
1908 #endif
1909 
1910 static void
1911 isp_poll(struct cam_sim *sim)
1912 {
1913 	ispsoftc_t *isp = cam_sim_softc(sim);
1914 
1915 	ISP_RUN_ISR(isp);
1916 }
1917 
1918 
1919 static void
1920 isp_watchdog(void *arg)
1921 {
1922 	struct ccb_scsiio *xs = arg;
1923 	ispsoftc_t *isp;
1924 	uint32_t ohandle = ISP_HANDLE_FREE, handle;
1925 
1926 	isp = XS_ISP(xs);
1927 
1928 	handle = isp_find_handle(isp, xs);
1929 
1930 	/*
1931 	 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere.
1932 	 */
1933 	if (handle != ISP_HANDLE_FREE) {
1934 		ISP_RUN_ISR(isp);
1935 		ohandle = handle;
1936 		handle = isp_find_handle(isp, xs);
1937 	}
1938 	if (handle != ISP_HANDLE_FREE) {
1939 		/*
1940 		 * Try and make sure the command is really dead before
1941 		 * we release the handle (and DMA resources) for reuse.
1942 		 *
1943 		 * If we are successful in aborting the command then
1944 		 * we're done here because we'll get the command returned
1945 		 * back separately.
1946 		 */
1947 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
1948 			return;
1949 		}
1950 
1951 		/*
1952 		 * Note that after calling the above, the command may in
1953 		 * fact have been completed.
1954 		 */
1955 		xs = isp_find_xs(isp, handle);
1956 
1957 		/*
1958 		 * If the command no longer exists, then we won't
1959 		 * be able to find the xs again with this handle.
1960 		 */
1961 		if (xs == NULL) {
1962 			return;
1963 		}
1964 
1965 		/*
1966 		 * After this point, the command is really dead.
1967 		 */
1968 		ISP_DMAFREE(isp, xs);
1969 		isp_destroy_handle(isp, handle);
1970 		isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
1971 		XS_SETERR(xs, CAM_CMD_TIMEOUT);
1972 		isp_done(xs);
1973 	} else {
1974 		if (ohandle != ISP_HANDLE_FREE) {
1975 			isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle);
1976 		} else {
1977 			isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__);
1978 		}
1979 	}
1980 }
1981 
1982 static void
1983 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
1984 {
1985 	union ccb *ccb;
1986 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
1987 
1988 	/*
1989 	 * Allocate a CCB, create a wildcard path for this target and schedule a rescan.
1990 	 */
1991 	ccb = xpt_alloc_ccb_nowait();
1992 	if (ccb == NULL) {
1993 		isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan);
1994 		return;
1995 	}
1996 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim),
1997 	    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1998 		isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan");
1999 		xpt_free_ccb(ccb);
2000 		return;
2001 	}
2002 	xpt_rescan(ccb);
2003 }
2004 
2005 static void
2006 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2007 {
2008 	struct cam_path *tp;
2009 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2010 
2011 	if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2012 		xpt_async(AC_LOST_DEVICE, tp, NULL);
2013 		xpt_free_path(tp);
2014 	}
2015 }
2016 
2017 /*
2018  * Gone Device Timer Function- when we have decided that a device has gone
2019  * away, we wait a specific period of time prior to telling the OS it has
2020  * gone away.
2021  *
2022  * This timer function fires once a second and then scans the port database
2023  * for devices that are marked dead but still have a virtual target assigned.
2024  * We decrement a counter for that port database entry, and when it hits zero,
2025  * we tell the OS the device has gone away.
2026  */
2027 static void
2028 isp_gdt(void *arg)
2029 {
2030 	struct isp_fc *fc = arg;
2031 	taskqueue_enqueue(taskqueue_thread, &fc->gtask);
2032 }
2033 
2034 static void
2035 isp_gdt_task(void *arg, int pending)
2036 {
2037 	struct isp_fc *fc = arg;
2038 	ispsoftc_t *isp = fc->isp;
2039 	int chan = fc - ISP_FC_PC(isp, 0);
2040 	fcportdb_t *lp;
2041 	struct ac_contract ac;
2042 	struct ac_device_changed *adc;
2043 	int dbidx, more_to_do = 0;
2044 
2045 	ISP_LOCK(isp);
2046 	isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan);
2047 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2048 		lp = &FCPARAM(isp, chan)->portdb[dbidx];
2049 
2050 		if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
2051 			continue;
2052 		}
2053 		if (lp->gone_timer != 0) {
2054 			lp->gone_timer -= 1;
2055 			more_to_do++;
2056 			continue;
2057 		}
2058 		isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout");
2059 		if (lp->is_target) {
2060 			lp->is_target = 0;
2061 			isp_make_gone(isp, lp, chan, dbidx);
2062 		}
2063 		if (lp->is_initiator) {
2064 			lp->is_initiator = 0;
2065 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2066 			adc = (struct ac_device_changed *) ac.contract_data;
2067 			adc->wwpn = lp->port_wwn;
2068 			adc->port = lp->portid;
2069 			adc->target = dbidx;
2070 			adc->arrived = 0;
2071 			xpt_async(AC_CONTRACT, fc->path, &ac);
2072 		}
2073 		lp->state = FC_PORTDB_STATE_NIL;
2074 	}
2075 	if (fc->ready) {
2076 		if (more_to_do) {
2077 			callout_reset(&fc->gdt, hz, isp_gdt, fc);
2078 		} else {
2079 			callout_deactivate(&fc->gdt);
2080 			isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime);
2081 		}
2082 	}
2083 	ISP_UNLOCK(isp);
2084 }
2085 
2086 /*
2087  * When loop goes down we remember the time and freeze CAM command queue.
2088  * During some time period we are trying to reprobe the loop.  But if we
2089  * fail, we tell the OS that devices have gone away and drop the freeze.
2090  *
2091  * We don't clear the devices out of our port database because, when loop
2092  * come back up, we have to do some actual cleanup with the chip at that
2093  * point (implicit PLOGO, e.g., to get the chip's port database state right).
2094  */
2095 static void
2096 isp_loop_changed(ispsoftc_t *isp, int chan)
2097 {
2098 	fcparam *fcp = FCPARAM(isp, chan);
2099 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2100 
2101 	if (fc->loop_down_time)
2102 		return;
2103 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan);
2104 	if (fcp->role & ISP_ROLE_INITIATOR)
2105 		isp_freeze_loopdown(isp, chan);
2106 	fc->loop_down_time = time_uptime;
2107 	wakeup(fc);
2108 }
2109 
2110 static void
2111 isp_loop_up(ispsoftc_t *isp, int chan)
2112 {
2113 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2114 
2115 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan);
2116 	fc->loop_seen_once = 1;
2117 	fc->loop_down_time = 0;
2118 	isp_unfreeze_loopdown(isp, chan);
2119 }
2120 
2121 static void
2122 isp_loop_dead(ispsoftc_t *isp, int chan)
2123 {
2124 	fcparam *fcp = FCPARAM(isp, chan);
2125 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2126 	fcportdb_t *lp;
2127 	struct ac_contract ac;
2128 	struct ac_device_changed *adc;
2129 	int dbidx, i;
2130 
2131 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan);
2132 
2133 	/*
2134 	 * Notify to the OS all targets who we now consider have departed.
2135 	 */
2136 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2137 		lp = &fcp->portdb[dbidx];
2138 
2139 		if (lp->state == FC_PORTDB_STATE_NIL)
2140 			continue;
2141 
2142 		for (i = 0; i < ISP_HANDLE_NUM(isp); i++) {
2143 			struct ccb_scsiio *xs;
2144 
2145 			if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) {
2146 				continue;
2147 			}
2148 			if ((xs = isp->isp_xflist[i].cmd) == NULL) {
2149 				continue;
2150                         }
2151 			if (dbidx != XS_TGT(xs)) {
2152 				continue;
2153 			}
2154 			isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout",
2155 			    isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2156 			    (uintmax_t)XS_LUN(xs));
2157 
2158 			/*
2159 			 * Just like in isp_watchdog, abort the outstanding
2160 			 * command or immediately free its resources if it is
2161 			 * not active
2162 			 */
2163 			if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
2164 				continue;
2165 			}
2166 
2167 			ISP_DMAFREE(isp, xs);
2168 			isp_destroy_handle(isp, isp->isp_xflist[i].handle);
2169 			isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed",
2170 			    isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2171 			    (uintmax_t)XS_LUN(xs));
2172 			XS_SETERR(xs, HBA_BUSRESET);
2173 			isp_done(xs);
2174 		}
2175 
2176 		isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout");
2177 		if (lp->is_target) {
2178 			lp->is_target = 0;
2179 			isp_make_gone(isp, lp, chan, dbidx);
2180 		}
2181 		if (lp->is_initiator) {
2182 			lp->is_initiator = 0;
2183 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2184 			adc = (struct ac_device_changed *) ac.contract_data;
2185 			adc->wwpn = lp->port_wwn;
2186 			adc->port = lp->portid;
2187 			adc->target = dbidx;
2188 			adc->arrived = 0;
2189 			xpt_async(AC_CONTRACT, fc->path, &ac);
2190 		}
2191 	}
2192 
2193 	isp_unfreeze_loopdown(isp, chan);
2194 	fc->loop_down_time = 0;
2195 }
2196 
2197 static void
2198 isp_kthread(void *arg)
2199 {
2200 	struct isp_fc *fc = arg;
2201 	ispsoftc_t *isp = fc->isp;
2202 	int chan = fc - ISP_FC_PC(isp, 0);
2203 	int slp = 0, d;
2204 	int lb, lim;
2205 
2206 	ISP_LOCK(isp);
2207 	while (isp->isp_osinfo.is_exiting == 0) {
2208 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2209 		    "Chan %d Checking FC state", chan);
2210 		lb = isp_fc_runstate(isp, chan, 250000);
2211 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2212 		    "Chan %d FC got to %s state", chan,
2213 		    isp_fc_loop_statename(lb));
2214 
2215 		/*
2216 		 * Our action is different based upon whether we're supporting
2217 		 * Initiator mode or not. If we are, we might freeze the simq
2218 		 * when loop is down and set all sorts of different delays to
2219 		 * check again.
2220 		 *
2221 		 * If not, we simply just wait for loop to come up.
2222 		 */
2223 		if (lb == LOOP_READY || lb < 0) {
2224 			slp = 0;
2225 		} else {
2226 			/*
2227 			 * If we've never seen loop up and we've waited longer
2228 			 * than quickboot time, or we've seen loop up but we've
2229 			 * waited longer than loop_down_limit, give up and go
2230 			 * to sleep until loop comes up.
2231 			 */
2232 			if (fc->loop_seen_once == 0)
2233 				lim = isp_quickboot_time;
2234 			else
2235 				lim = fc->loop_down_limit;
2236 			d = time_uptime - fc->loop_down_time;
2237 			if (d >= lim)
2238 				slp = 0;
2239 			else if (d < 10)
2240 				slp = 1;
2241 			else if (d < 30)
2242 				slp = 5;
2243 			else if (d < 60)
2244 				slp = 10;
2245 			else if (d < 120)
2246 				slp = 20;
2247 			else
2248 				slp = 30;
2249 		}
2250 
2251 		if (slp == 0) {
2252 			if (lb == LOOP_READY)
2253 				isp_loop_up(isp, chan);
2254 			else
2255 				isp_loop_dead(isp, chan);
2256 		}
2257 
2258 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2259 		    "Chan %d sleep for %d seconds", chan, slp);
2260 		msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz);
2261 	}
2262 	fc->num_threads -= 1;
2263 	wakeup(&fc->num_threads);
2264 	ISP_UNLOCK(isp);
2265 	kthread_exit();
2266 }
2267 
2268 #ifdef	ISP_TARGET_MODE
2269 static int
2270 isp_abort_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
2271 {
2272 	uint8_t storage[QENTRY_LEN];
2273 	ct7_entry_t *cto = (ct7_entry_t *) storage;
2274 
2275 	ISP_MEMZERO(cto, sizeof (ct7_entry_t));
2276 	cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2277 	cto->ct_header.rqs_entry_count = 1;
2278 	cto->ct_nphdl = atp->nphdl;
2279 	cto->ct_vpidx = chan;
2280 	cto->ct_iid_lo = atp->sid;
2281 	cto->ct_iid_hi = atp->sid >> 16;
2282 	cto->ct_rxid = atp->tag;
2283 	cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
2284 	cto->ct_oxid = atp->oxid;
2285 	return (isp_send_entry(isp, cto));
2286 }
2287 
2288 static void
2289 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
2290 {
2291 	atio_private_data_t *atp;
2292 	union ccb *accb = ccb->cab.abort_ccb;
2293 	struct ccb_hdr *sccb;
2294 	tstate_t *tptr;
2295 
2296 	tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2297 	if (tptr != NULL) {
2298 		/* Search for the ATIO among queueued. */
2299 		SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) {
2300 			if (sccb != &accb->ccb_h)
2301 				continue;
2302 			SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle);
2303 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2304 			    "Abort FREE ATIO\n");
2305 			accb->ccb_h.status = CAM_REQ_ABORTED;
2306 			xpt_done(accb);
2307 			ccb->ccb_h.status = CAM_REQ_CMP;
2308 			return;
2309 		}
2310 	}
2311 
2312 	/* Search for the ATIO among running. */
2313 	atp = isp_find_atpd_ccb(isp, XS_CHANNEL(accb), accb->atio.tag_id, accb);
2314 	if (atp != NULL) {
2315 		if (isp_abort_atpd(isp, XS_CHANNEL(accb), atp)) {
2316 			ccb->ccb_h.status = CAM_UA_ABORT;
2317 			return;
2318 		}
2319 		isp_put_atpd(isp, XS_CHANNEL(accb), atp);
2320 	}
2321 
2322 	ccb->ccb_h.status = CAM_REQ_CMP;
2323 }
2324 
2325 static void
2326 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb)
2327 {
2328 	inot_private_data_t *ntp;
2329 	union ccb *accb = ccb->cab.abort_ccb;
2330 	struct ccb_hdr *sccb;
2331 	tstate_t *tptr;
2332 
2333 	tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2334 	if (tptr != NULL) {
2335 		/* Search for the INOT among queueued. */
2336 		SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) {
2337 			if (sccb != &accb->ccb_h)
2338 				continue;
2339 			SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle);
2340 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2341 			    "Abort FREE INOT\n");
2342 			accb->ccb_h.status = CAM_REQ_ABORTED;
2343 			xpt_done(accb);
2344 			ccb->ccb_h.status = CAM_REQ_CMP;
2345 			return;
2346 		}
2347 	}
2348 
2349 	/* Search for the INOT among running. */
2350 	ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id);
2351 	if (ntp != NULL) {
2352 		if (ntp->nt.nt_need_ack) {
2353 			isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK,
2354 			    ntp->nt.nt_lreserved);
2355 		}
2356 		isp_put_ntpd(isp, XS_CHANNEL(accb), ntp);
2357 		ccb->ccb_h.status = CAM_REQ_CMP;
2358 	} else {
2359 		ccb->ccb_h.status = CAM_UA_ABORT;
2360 		return;
2361 	}
2362 }
2363 #endif
2364 
2365 static void
2366 isp_action(struct cam_sim *sim, union ccb *ccb)
2367 {
2368 	int bus, tgt, error;
2369 	ispsoftc_t *isp;
2370 	fcparam *fcp;
2371 	struct ccb_trans_settings *cts;
2372 	sbintime_t ts;
2373 
2374 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2375 
2376 	isp = (ispsoftc_t *)cam_sim_softc(sim);
2377 	ISP_ASSERT_LOCKED(isp);
2378 	bus = cam_sim_bus(sim);
2379 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2380 	ISP_PCMD(ccb) = NULL;
2381 
2382 	switch (ccb->ccb_h.func_code) {
2383 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2384 		/*
2385 		 * Do a couple of preliminary checks...
2386 		 */
2387 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2388 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2389 				ccb->ccb_h.status = CAM_REQ_INVALID;
2390 				isp_done((struct ccb_scsiio *) ccb);
2391 				break;
2392 			}
2393 		}
2394 #ifdef	DIAGNOSTIC
2395 		if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) {
2396 			xpt_print(ccb->ccb_h.path, "invalid target\n");
2397 			ccb->ccb_h.status = CAM_PATH_INVALID;
2398 		}
2399 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2400 			xpt_done(ccb);
2401 			break;
2402 		}
2403 #endif
2404 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2405 		if (isp_get_pcmd(isp, ccb)) {
2406 			isp_prt(isp, ISP_LOGWARN, "out of PCMDs");
2407 			cam_freeze_devq(ccb->ccb_h.path);
2408 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
2409 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2410 			xpt_done(ccb);
2411 			break;
2412 		}
2413 		error = isp_start((XS_T *) ccb);
2414 		isp_rq_check_above(isp);
2415 		switch (error) {
2416 		case 0:
2417 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2418 			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
2419 				break;
2420 			/* Give firmware extra 10s to handle timeout. */
2421 			ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S;
2422 			callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0,
2423 			    isp_watchdog, ccb, 0);
2424 			break;
2425 		case CMD_RQLATER:
2426 			isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later",
2427 			    XS_TGT(ccb), (uintmax_t)XS_LUN(ccb));
2428 			cam_freeze_devq(ccb->ccb_h.path);
2429 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2430 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2431 			isp_free_pcmd(isp, ccb);
2432 			xpt_done(ccb);
2433 			break;
2434 		case CMD_EAGAIN:
2435 			isp_free_pcmd(isp, ccb);
2436 			cam_freeze_devq(ccb->ccb_h.path);
2437 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2438 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2439 			xpt_done(ccb);
2440 			break;
2441 		case CMD_COMPLETE:
2442 			isp_done((struct ccb_scsiio *) ccb);
2443 			break;
2444 		default:
2445 			isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__);
2446 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2447 			isp_free_pcmd(isp, ccb);
2448 			xpt_done(ccb);
2449 		}
2450 		break;
2451 
2452 #ifdef	ISP_TARGET_MODE
2453 	case XPT_EN_LUN:		/* Enable/Disable LUN as a target */
2454 		if (ccb->cel.enable) {
2455 			isp_enable_lun(isp, ccb);
2456 		} else {
2457 			isp_disable_lun(isp, ccb);
2458 		}
2459 		break;
2460 	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
2461 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2462 	{
2463 		tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2464 		if (tptr == NULL) {
2465 			const char *str;
2466 
2467 			if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
2468 				str = "XPT_IMMEDIATE_NOTIFY";
2469 			else
2470 				str = "XPT_ACCEPT_TARGET_IO";
2471 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path,
2472 			    "%s: no state pointer found for %s\n",
2473 			    __func__, str);
2474 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2475 			xpt_done(ccb);
2476 			break;
2477 		}
2478 
2479 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2480 			ccb->atio.tag_id = 0;
2481 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle);
2482 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2483 			    "Put FREE ATIO\n");
2484 		} else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
2485 			ccb->cin1.seq_id = ccb->cin1.tag_id = 0;
2486 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
2487 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2488 			    "Put FREE INOT\n");
2489 		}
2490 		ccb->ccb_h.status = CAM_REQ_INPROG;
2491 		break;
2492 	}
2493 	case XPT_NOTIFY_ACKNOWLEDGE:		/* notify ack */
2494 	{
2495 		atio_private_data_t *atp;
2496 		inot_private_data_t *ntp;
2497 
2498 		/*
2499 		 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb
2500 		 * XXX: matches that for the immediate notify, we have to *search* for the notify structure
2501 		 */
2502 		/*
2503 		 * All the relevant path information is in the associated immediate notify
2504 		 */
2505 		ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2506 		ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id);
2507 		if (ntp == NULL) {
2508 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__,
2509 			     ccb->cna2.tag_id, ccb->cna2.seq_id);
2510 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2511 			xpt_done(ccb);
2512 			break;
2513 		}
2514 
2515 		/*
2516 		 * Target should abort all affected tasks before ACK-ing INOT,
2517 		 * but if/since it doesn't, add this hack to allow tag reuse.
2518 		 * We can not do it if some CTIOs are in progress, or we won't
2519 		 * handle the completions.  In such case just block new ones.
2520 		 */
2521 		uint32_t rsp = (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0;
2522 		if (ntp->nt.nt_ncode == NT_ABORT_TASK && (rsp & 0xff) == 0 &&
2523 		    (atp = isp_find_atpd(isp, XS_CHANNEL(ccb), ccb->cna2.seq_id)) != NULL) {
2524 			if (atp->ctcnt == 0 &&
2525 			    isp_abort_atpd(isp, XS_CHANNEL(ccb), atp) == 0)
2526 				isp_put_atpd(isp, XS_CHANNEL(ccb), atp);
2527 			else
2528 				atp->dead = 1;
2529 		}
2530 
2531 		if (isp_handle_platform_target_notify_ack(isp, &ntp->nt, rsp)) {
2532 			cam_freeze_devq(ccb->ccb_h.path);
2533 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2534 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2535 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2536 			break;
2537 		}
2538 		isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp);
2539 		ccb->ccb_h.status = CAM_REQ_CMP;
2540 		ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2541 		xpt_done(ccb);
2542 		break;
2543 	}
2544 	case XPT_CONT_TARGET_IO:
2545 		isp_target_start_ctio(isp, ccb, FROM_CAM);
2546 		isp_rq_check_above(isp);
2547 		break;
2548 #endif
2549 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2550 		tgt = ccb->ccb_h.target_id;
2551 		tgt |= (bus << 16);
2552 
2553 		error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt);
2554 		if (error) {
2555 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2556 		} else {
2557 			/*
2558 			 * If we have a FC device, reset the Command
2559 			 * Reference Number, because the target will expect
2560 			 * that we re-start the CRN at 1 after a reset.
2561 			 */
2562 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2563 
2564 			ccb->ccb_h.status = CAM_REQ_CMP;
2565 		}
2566 		xpt_done(ccb);
2567 		break;
2568 	case XPT_ABORT:			/* Abort the specified CCB */
2569 	{
2570 		union ccb *accb = ccb->cab.abort_ccb;
2571 		switch (accb->ccb_h.func_code) {
2572 #ifdef	ISP_TARGET_MODE
2573 		case XPT_ACCEPT_TARGET_IO:
2574 			isp_abort_atio(isp, ccb);
2575 			break;
2576 		case XPT_IMMEDIATE_NOTIFY:
2577 			isp_abort_inot(isp, ccb);
2578 			break;
2579 #endif
2580 		case XPT_SCSI_IO:
2581 			error = isp_control(isp, ISPCTL_ABORT_CMD, accb);
2582 			if (error) {
2583 				ccb->ccb_h.status = CAM_UA_ABORT;
2584 			} else {
2585 				ccb->ccb_h.status = CAM_REQ_CMP;
2586 			}
2587 			break;
2588 		default:
2589 			ccb->ccb_h.status = CAM_REQ_INVALID;
2590 			break;
2591 		}
2592 		/*
2593 		 * This is not a queued CCB, so the caller expects it to be
2594 		 * complete when control is returned.
2595 		 */
2596 		break;
2597 	}
2598 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2599 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2600 		cts = &ccb->cts;
2601 		if (!IS_CURRENT_SETTINGS(cts)) {
2602 			ccb->ccb_h.status = CAM_REQ_INVALID;
2603 			xpt_done(ccb);
2604 			break;
2605 		}
2606 		ccb->ccb_h.status = CAM_REQ_CMP;
2607 		xpt_done(ccb);
2608 		break;
2609 	case XPT_GET_TRAN_SETTINGS:
2610 	{
2611 		struct ccb_trans_settings_scsi *scsi;
2612 		struct ccb_trans_settings_fc *fc;
2613 
2614 		cts = &ccb->cts;
2615 		scsi = &cts->proto_specific.scsi;
2616 		fc = &cts->xport_specific.fc;
2617 		tgt = cts->ccb_h.target_id;
2618 		fcp = FCPARAM(isp, bus);
2619 
2620 		cts->protocol = PROTO_SCSI;
2621 		cts->protocol_version = SCSI_REV_2;
2622 		cts->transport = XPORT_FC;
2623 		cts->transport_version = 0;
2624 
2625 		scsi->valid = CTS_SCSI_VALID_TQ;
2626 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2627 		fc->valid = CTS_FC_VALID_SPEED;
2628 		fc->bitrate = fcp->isp_gbspeed * 100000;
2629 		if (tgt < MAX_FC_TARG) {
2630 			fcportdb_t *lp = &fcp->portdb[tgt];
2631 			fc->wwnn = lp->node_wwn;
2632 			fc->wwpn = lp->port_wwn;
2633 			fc->port = lp->portid;
2634 			fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2635 		}
2636 		ccb->ccb_h.status = CAM_REQ_CMP;
2637 		xpt_done(ccb);
2638 		break;
2639 	}
2640 	case XPT_CALC_GEOMETRY:
2641 		cam_calc_geometry(&ccb->ccg, 1);
2642 		xpt_done(ccb);
2643 		break;
2644 
2645 	case XPT_RESET_BUS:		/* Reset the specified bus */
2646 		error = isp_control(isp, ISPCTL_RESET_BUS, bus);
2647 		if (error) {
2648 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2649 			xpt_done(ccb);
2650 			break;
2651 		}
2652 		if (bootverbose) {
2653 			xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus);
2654 		}
2655 		xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0);
2656 		ccb->ccb_h.status = CAM_REQ_CMP;
2657 		xpt_done(ccb);
2658 		break;
2659 
2660 	case XPT_TERM_IO:		/* Terminate the I/O process */
2661 		ccb->ccb_h.status = CAM_REQ_INVALID;
2662 		xpt_done(ccb);
2663 		break;
2664 
2665 	case XPT_SET_SIM_KNOB:		/* Set SIM knobs */
2666 	{
2667 		struct ccb_sim_knob *kp = &ccb->knob;
2668 		fcparam *fcp = FCPARAM(isp, bus);
2669 
2670 		if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
2671 			fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn;
2672 			fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn;
2673 			isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn);
2674 		}
2675 		ccb->ccb_h.status = CAM_REQ_CMP;
2676 		if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) {
2677 			int rchange = 0;
2678 			int newrole = 0;
2679 
2680 			switch (kp->xport_specific.fc.role) {
2681 			case KNOB_ROLE_NONE:
2682 				if (fcp->role != ISP_ROLE_NONE) {
2683 					rchange = 1;
2684 					newrole = ISP_ROLE_NONE;
2685 				}
2686 				break;
2687 			case KNOB_ROLE_TARGET:
2688 				if (fcp->role != ISP_ROLE_TARGET) {
2689 					rchange = 1;
2690 					newrole = ISP_ROLE_TARGET;
2691 				}
2692 				break;
2693 			case KNOB_ROLE_INITIATOR:
2694 				if (fcp->role != ISP_ROLE_INITIATOR) {
2695 					rchange = 1;
2696 					newrole = ISP_ROLE_INITIATOR;
2697 				}
2698 				break;
2699 			case KNOB_ROLE_BOTH:
2700 				if (fcp->role != ISP_ROLE_BOTH) {
2701 					rchange = 1;
2702 					newrole = ISP_ROLE_BOTH;
2703 				}
2704 				break;
2705 			}
2706 			if (rchange) {
2707 				ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole);
2708 				if (isp_control(isp, ISPCTL_CHANGE_ROLE,
2709 				    bus, newrole) != 0) {
2710 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2711 					xpt_done(ccb);
2712 					break;
2713 				}
2714 			}
2715 		}
2716 		xpt_done(ccb);
2717 		break;
2718 	}
2719 	case XPT_GET_SIM_KNOB_OLD:	/* Get SIM knobs -- compat value */
2720 	case XPT_GET_SIM_KNOB:		/* Get SIM knobs */
2721 	{
2722 		struct ccb_sim_knob *kp = &ccb->knob;
2723 		fcparam *fcp = FCPARAM(isp, bus);
2724 
2725 		kp->xport_specific.fc.wwnn = fcp->isp_wwnn;
2726 		kp->xport_specific.fc.wwpn = fcp->isp_wwpn;
2727 		switch (fcp->role) {
2728 		case ISP_ROLE_NONE:
2729 			kp->xport_specific.fc.role = KNOB_ROLE_NONE;
2730 			break;
2731 		case ISP_ROLE_TARGET:
2732 			kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
2733 			break;
2734 		case ISP_ROLE_INITIATOR:
2735 			kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
2736 			break;
2737 		case ISP_ROLE_BOTH:
2738 			kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
2739 			break;
2740 		}
2741 		kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
2742 		ccb->ccb_h.status = CAM_REQ_CMP;
2743 		xpt_done(ccb);
2744 		break;
2745 	}
2746 	case XPT_PATH_INQ:		/* Path routing inquiry */
2747 	{
2748 		struct ccb_pathinq *cpi = &ccb->cpi;
2749 
2750 		cpi->version_num = 1;
2751 #ifdef	ISP_TARGET_MODE
2752 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2753 #else
2754 		cpi->target_sprt = 0;
2755 #endif
2756 		cpi->hba_eng_cnt = 0;
2757 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2758 		cpi->max_lun = 255;
2759 		cpi->bus_id = cam_sim_bus(sim);
2760 		cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE;
2761 
2762 		fcp = FCPARAM(isp, bus);
2763 
2764 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
2765 		cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
2766 
2767 		/*
2768 		 * Because our loop ID can shift from time to time,
2769 		 * make our initiator ID out of range of our bus.
2770 		 */
2771 		cpi->initiator_id = cpi->max_target + 1;
2772 
2773 		/*
2774 		 * Set base transfer capabilities for Fibre Channel, for this HBA.
2775 		 */
2776 		if (IS_25XX(isp))
2777 			cpi->base_transfer_speed = 8000000;
2778 		else
2779 			cpi->base_transfer_speed = 4000000;
2780 		cpi->hba_inquiry = PI_TAG_ABLE;
2781 		cpi->transport = XPORT_FC;
2782 		cpi->transport_version = 0;
2783 		cpi->xport_specific.fc.wwnn = fcp->isp_wwnn;
2784 		cpi->xport_specific.fc.wwpn = fcp->isp_wwpn;
2785 		cpi->xport_specific.fc.port = fcp->isp_portid;
2786 		cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000;
2787 		cpi->protocol = PROTO_SCSI;
2788 		cpi->protocol_version = SCSI_REV_2;
2789 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2790 		strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2791 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2792 		cpi->unit_number = cam_sim_unit(sim);
2793 		cpi->ccb_h.status = CAM_REQ_CMP;
2794 		xpt_done(ccb);
2795 		break;
2796 	}
2797 	default:
2798 		ccb->ccb_h.status = CAM_REQ_INVALID;
2799 		xpt_done(ccb);
2800 		break;
2801 	}
2802 }
2803 
2804 void
2805 isp_done(XS_T *sccb)
2806 {
2807 	ispsoftc_t *isp = XS_ISP(sccb);
2808 	uint32_t status;
2809 
2810 	if (XS_NOERR(sccb))
2811 		XS_SETERR(sccb, CAM_REQ_CMP);
2812 
2813 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) {
2814 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2815 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2816 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2817 		} else {
2818 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2819 		}
2820 	}
2821 
2822 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2823 	status = sccb->ccb_h.status & CAM_STATUS_MASK;
2824 	if (status != CAM_REQ_CMP &&
2825 	    (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2826 		sccb->ccb_h.status |= CAM_DEV_QFRZN;
2827 		xpt_freeze_devq(sccb->ccb_h.path, 1);
2828 	}
2829 
2830 	if (ISP_PCMD(sccb)) {
2831 		if (callout_active(&PISP_PCMD(sccb)->wdog))
2832 			callout_stop(&PISP_PCMD(sccb)->wdog);
2833 		isp_free_pcmd(isp, (union ccb *) sccb);
2834 	}
2835 	isp_rq_check_below(isp);
2836 	xpt_done((union ccb *) sccb);
2837 }
2838 
2839 void
2840 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
2841 {
2842 	int bus;
2843 	static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s";
2844 	char buf[64];
2845 	char *msg = NULL;
2846 	target_id_t tgt = 0;
2847 	fcportdb_t *lp;
2848 	struct isp_fc *fc;
2849 	struct ac_contract ac;
2850 	struct ac_device_changed *adc;
2851 	va_list ap;
2852 
2853 	switch (cmd) {
2854 	case ISPASYNC_LOOP_RESET:
2855 	{
2856 		uint16_t lipp;
2857 		fcparam *fcp;
2858 		va_start(ap, cmd);
2859 		bus = va_arg(ap, int);
2860 		va_end(ap);
2861 
2862 		lipp = ISP_READ(isp, OUTMAILBOX1);
2863 		fcp = FCPARAM(isp, bus);
2864 
2865 		isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp);
2866 		/*
2867 		 * Per FCP-4, a Reset LIP should result in a CRN reset. Other
2868 		 * LIPs and loop up/down events should never reset the CRN. For
2869 		 * an as of yet unknown reason, 24xx series cards (and
2870 		 * potentially others) can interrupt with a LIP Reset status
2871 		 * when no LIP reset came down the wire. Additionally, the LIP
2872 		 * primitive accompanying this status would not be a valid LIP
2873 		 * Reset primitive, but some variation of an invalid AL_PA
2874 		 * LIP. As a result, we have to verify the AL_PD in the LIP
2875 		 * addresses our port before blindly resetting.
2876 		*/
2877 		if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF)))
2878 			isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0);
2879 		isp_loop_changed(isp, bus);
2880 		break;
2881 	}
2882 	case ISPASYNC_LIP:
2883 		if (msg == NULL)
2884 			msg = "LIP Received";
2885 		/* FALLTHROUGH */
2886 	case ISPASYNC_LOOP_DOWN:
2887 		if (msg == NULL)
2888 			msg = "LOOP Down";
2889 		/* FALLTHROUGH */
2890 	case ISPASYNC_LOOP_UP:
2891 		if (msg == NULL)
2892 			msg = "LOOP Up";
2893 		va_start(ap, cmd);
2894 		bus = va_arg(ap, int);
2895 		va_end(ap);
2896 		isp_loop_changed(isp, bus);
2897 		isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
2898 		break;
2899 	case ISPASYNC_DEV_ARRIVED:
2900 		va_start(ap, cmd);
2901 		bus = va_arg(ap, int);
2902 		lp = va_arg(ap, fcportdb_t *);
2903 		va_end(ap);
2904 		fc = ISP_FC_PC(isp, bus);
2905 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2906 		isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2907 		isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived");
2908 		if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2909 		    (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) {
2910 			lp->is_target = 1;
2911 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2912 			isp_make_here(isp, lp, bus, tgt);
2913 		}
2914 		if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2915 		    (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) {
2916 			lp->is_initiator = 1;
2917 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2918 			adc = (struct ac_device_changed *) ac.contract_data;
2919 			adc->wwpn = lp->port_wwn;
2920 			adc->port = lp->portid;
2921 			adc->target = tgt;
2922 			adc->arrived = 1;
2923 			xpt_async(AC_CONTRACT, fc->path, &ac);
2924 		}
2925 		break;
2926 	case ISPASYNC_DEV_CHANGED:
2927 	case ISPASYNC_DEV_STAYED:
2928 	{
2929 		int crn_reset_done;
2930 
2931 		crn_reset_done = 0;
2932 		va_start(ap, cmd);
2933 		bus = va_arg(ap, int);
2934 		lp = va_arg(ap, fcportdb_t *);
2935 		va_end(ap);
2936 		fc = ISP_FC_PC(isp, bus);
2937 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2938 		isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3);
2939 		if (cmd == ISPASYNC_DEV_CHANGED)
2940 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed");
2941 		else
2942 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed");
2943 
2944 		if (lp->is_target !=
2945 		    ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2946 		     (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) {
2947 			lp->is_target = !lp->is_target;
2948 			if (lp->is_target) {
2949 				if (cmd == ISPASYNC_DEV_CHANGED) {
2950 					isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2951 					crn_reset_done = 1;
2952 				}
2953 				isp_make_here(isp, lp, bus, tgt);
2954 			} else {
2955 				isp_make_gone(isp, lp, bus, tgt);
2956 				if (cmd == ISPASYNC_DEV_CHANGED) {
2957 					isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2958 					crn_reset_done = 1;
2959 				}
2960 			}
2961 		}
2962 		if (lp->is_initiator !=
2963 		    ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2964 		     (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) {
2965 			lp->is_initiator = !lp->is_initiator;
2966 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2967 			adc = (struct ac_device_changed *) ac.contract_data;
2968 			adc->wwpn = lp->port_wwn;
2969 			adc->port = lp->portid;
2970 			adc->target = tgt;
2971 			adc->arrived = lp->is_initiator;
2972 			xpt_async(AC_CONTRACT, fc->path, &ac);
2973 		}
2974 
2975 		if ((cmd == ISPASYNC_DEV_CHANGED) &&
2976 		    (crn_reset_done == 0))
2977 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2978 
2979 		break;
2980 	}
2981 	case ISPASYNC_DEV_GONE:
2982 		va_start(ap, cmd);
2983 		bus = va_arg(ap, int);
2984 		lp = va_arg(ap, fcportdb_t *);
2985 		va_end(ap);
2986 		fc = ISP_FC_PC(isp, bus);
2987 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2988 		/*
2989 		 * If this has a virtual target or initiator set the isp_gdt
2990 		 * timer running on it to delay its departure.
2991 		 */
2992 		isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2993 		if (lp->is_target || lp->is_initiator) {
2994 			lp->state = FC_PORTDB_STATE_ZOMBIE;
2995 			lp->gone_timer = fc->gone_device_time;
2996 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie");
2997 			if (fc->ready && !callout_active(&fc->gdt)) {
2998 				isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime);
2999 				callout_reset(&fc->gdt, hz, isp_gdt, fc);
3000 			}
3001 			break;
3002 		}
3003 		isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone");
3004 		break;
3005 	case ISPASYNC_CHANGE_NOTIFY:
3006 	{
3007 		char *msg;
3008 		int evt, nphdl, nlstate, portid, reason;
3009 
3010 		va_start(ap, cmd);
3011 		bus = va_arg(ap, int);
3012 		evt = va_arg(ap, int);
3013 		if (evt == ISPASYNC_CHANGE_PDB) {
3014 			nphdl = va_arg(ap, int);
3015 			nlstate = va_arg(ap, int);
3016 			reason = va_arg(ap, int);
3017 		} else if (evt == ISPASYNC_CHANGE_SNS) {
3018 			portid = va_arg(ap, int);
3019 		} else {
3020 			nphdl = NIL_HANDLE;
3021 			nlstate = reason = 0;
3022 		}
3023 		va_end(ap);
3024 
3025 		if (evt == ISPASYNC_CHANGE_PDB) {
3026 			int tgt_set = 0;
3027 			msg = "Port Database Changed";
3028 			isp_prt(isp, ISP_LOGINFO,
3029 			    "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)",
3030 			    bus, msg, nphdl, nlstate, reason);
3031 			/*
3032 			 * Port database syncs are not sufficient for
3033 			 * determining that logins or logouts are done on the
3034 			 * loop, but this information is directly available from
3035 			 * the reason code from the incoming mbox. We must reset
3036 			 * the fcp crn on these events according to FCP-4
3037 			 */
3038 			switch (reason) {
3039 			case PDB24XX_AE_IMPL_LOGO_1:
3040 			case PDB24XX_AE_IMPL_LOGO_2:
3041 			case PDB24XX_AE_IMPL_LOGO_3:
3042 			case PDB24XX_AE_PLOGI_RCVD:
3043 			case PDB24XX_AE_PRLI_RCVD:
3044 			case PDB24XX_AE_PRLO_RCVD:
3045 			case PDB24XX_AE_LOGO_RCVD:
3046 			case PDB24XX_AE_PLOGI_DONE:
3047 			case PDB24XX_AE_PRLI_DONE:
3048 				/*
3049 				 * If the event is not global, twiddle tgt and
3050 				 * tgt_set to nominate only the target
3051 				 * associated with the nphdl.
3052 				 */
3053 				if (nphdl != PDB24XX_AE_GLOBAL) {
3054 					/* Break if we don't yet have the pdb */
3055 					if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp))
3056 						break;
3057 					tgt = FC_PORTDB_TGT(isp, bus, lp);
3058 					tgt_set = 1;
3059 				}
3060 				isp_fcp_reset_crn(isp, bus, tgt, tgt_set);
3061 				break;
3062 			default:
3063 				break; /* NOP */
3064 			}
3065 		} else if (evt == ISPASYNC_CHANGE_SNS) {
3066 			msg = "Name Server Database Changed";
3067 			isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)",
3068 			    bus, msg, portid);
3069 		} else {
3070 			msg = "Other Change Notify";
3071 			isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
3072 		}
3073 		isp_loop_changed(isp, bus);
3074 		break;
3075 	}
3076 #ifdef	ISP_TARGET_MODE
3077 	case ISPASYNC_TARGET_NOTIFY:
3078 	{
3079 		isp_notify_t *notify;
3080 		va_start(ap, cmd);
3081 		notify = va_arg(ap, isp_notify_t *);
3082 		va_end(ap);
3083 		switch (notify->nt_ncode) {
3084 		case NT_ABORT_TASK:
3085 		case NT_ABORT_TASK_SET:
3086 		case NT_CLEAR_ACA:
3087 		case NT_CLEAR_TASK_SET:
3088 		case NT_LUN_RESET:
3089 		case NT_TARGET_RESET:
3090 		case NT_QUERY_TASK_SET:
3091 		case NT_QUERY_ASYNC_EVENT:
3092 			/*
3093 			 * These are task management functions.
3094 			 */
3095 			isp_handle_platform_target_tmf(isp, notify);
3096 			break;
3097 		case NT_LIP_RESET:
3098 		case NT_LINK_UP:
3099 		case NT_LINK_DOWN:
3100 		case NT_HBA_RESET:
3101 			/*
3102 			 * No action need be taken here.
3103 			 */
3104 			break;
3105 		case NT_SRR:
3106 			isp_handle_platform_srr(isp, notify);
3107 			break;
3108 		default:
3109 			isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
3110 			isp_handle_platform_target_notify_ack(isp, notify, 0);
3111 			break;
3112 		}
3113 		break;
3114 	}
3115 	case ISPASYNC_TARGET_NOTIFY_ACK:
3116 	{
3117 		void *inot;
3118 		va_start(ap, cmd);
3119 		inot = va_arg(ap, void *);
3120 		va_end(ap);
3121 		if (isp_notify_ack(isp, inot)) {
3122 			isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT);
3123 			if (tp) {
3124 				tp->isp = isp;
3125 				memcpy(tp->data, inot, sizeof (tp->data));
3126 				tp->not = tp->data;
3127 				callout_init_mtx(&tp->timer, &isp->isp_lock, 0);
3128 				callout_reset(&tp->timer, 5,
3129 				    isp_refire_notify_ack, tp);
3130 			} else {
3131 				isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire");
3132 			}
3133 		}
3134 		break;
3135 	}
3136 	case ISPASYNC_TARGET_ACTION:
3137 	{
3138 		isphdr_t *hp;
3139 
3140 		va_start(ap, cmd);
3141 		hp = va_arg(ap, isphdr_t *);
3142 		va_end(ap);
3143 		switch (hp->rqs_entry_type) {
3144 		case RQSTYPE_ATIO:
3145 			isp_handle_platform_atio7(isp, (at7_entry_t *)hp);
3146 			break;
3147 		case RQSTYPE_CTIO7:
3148 			isp_handle_platform_ctio(isp, (ct7_entry_t *)hp);
3149 			break;
3150 		default:
3151 			isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x",
3152 			    __func__, hp->rqs_entry_type);
3153 			break;
3154 		}
3155 		break;
3156 	}
3157 #endif
3158 	case ISPASYNC_FW_CRASH:
3159 	{
3160 		uint16_t mbox1;
3161 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3162 		isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1);
3163 #if 0
3164 		isp_reinit(isp, 1);
3165 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3166 #endif
3167 		break;
3168 	}
3169 	default:
3170 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3171 		break;
3172 	}
3173 }
3174 
3175 uint64_t
3176 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
3177 {
3178 	uint64_t seed;
3179 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3180 
3181 	/* First try to use explicitly configured WWNs. */
3182 	seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
3183 	if (seed)
3184 		return (seed);
3185 
3186 	/* Otherwise try to use WWNs from NVRAM. */
3187 	if (isactive) {
3188 		seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram :
3189 		    FCPARAM(isp, chan)->isp_wwpn_nvram;
3190 		if (seed)
3191 			return (seed);
3192 	}
3193 
3194 	/* If still no WWNs, try to steal them from the first channel. */
3195 	if (chan > 0) {
3196 		seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn :
3197 		    ISP_FC_PC(isp, 0)->def_wwpn;
3198 		if (seed == 0) {
3199 			seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram :
3200 			    FCPARAM(isp, 0)->isp_wwpn_nvram;
3201 		}
3202 	}
3203 
3204 	/* If still nothing -- improvise. */
3205 	if (seed == 0) {
3206 		seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev);
3207 		if (!iswwnn)
3208 			seed ^= 0x0100000000000000ULL;
3209 	}
3210 
3211 	/* For additional channels we have to improvise even more. */
3212 	if (!iswwnn && chan > 0) {
3213 		/*
3214 		 * We'll stick our channel number plus one first into bits
3215 		 * 57..59 and thence into bits 52..55 which allows for 8 bits
3216 		 * of channel which is enough for our maximum of 255 channels.
3217 		 */
3218 		seed ^= 0x0100000000000000ULL;
3219 		seed ^= ((uint64_t) (chan + 1) & 0xf) << 56;
3220 		seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52;
3221 	}
3222 	return (seed);
3223 }
3224 
3225 void
3226 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
3227 {
3228 	int loc;
3229 	char lbuf[200];
3230 	va_list ap;
3231 
3232 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3233 		return;
3234 	}
3235 	snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev));
3236 	loc = strlen(lbuf);
3237 	va_start(ap, fmt);
3238 	vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap);
3239 	va_end(ap);
3240 	printf("%s\n", lbuf);
3241 }
3242 
3243 void
3244 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
3245 {
3246 	va_list ap;
3247 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3248 		return;
3249 	}
3250 	xpt_print_path(xs->ccb_h.path);
3251 	va_start(ap, fmt);
3252 	vprintf(fmt, ap);
3253 	va_end(ap);
3254 	printf("\n");
3255 }
3256 
3257 uint64_t
3258 isp_nanotime_sub(struct timespec *b, struct timespec *a)
3259 {
3260 	uint64_t elapsed;
3261 	struct timespec x;
3262 
3263 	timespecsub(b, a, &x);
3264 	elapsed = GET_NANOSEC(&x);
3265 	if (elapsed == 0)
3266 		elapsed++;
3267 	return (elapsed);
3268 }
3269 
3270 int
3271 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan)
3272 {
3273 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3274 
3275 	if (fc->fcbsy)
3276 		return (-1);
3277 	fc->fcbsy = 1;
3278 	return (0);
3279 }
3280 
3281 void
3282 isp_platform_intr(void *arg)
3283 {
3284 	ispsoftc_t *isp = arg;
3285 
3286 	ISP_LOCK(isp);
3287 	ISP_RUN_ISR(isp);
3288 	ISP_UNLOCK(isp);
3289 }
3290 
3291 void
3292 isp_platform_intr_resp(void *arg)
3293 {
3294 	ispsoftc_t *isp = arg;
3295 
3296 	ISP_LOCK(isp);
3297 	isp_intr_respq(isp);
3298 	ISP_UNLOCK(isp);
3299 
3300 	/* We have handshake enabled, so explicitly complete interrupt */
3301 	ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3302 }
3303 
3304 void
3305 isp_platform_intr_atio(void *arg)
3306 {
3307 	ispsoftc_t *isp = arg;
3308 
3309 	ISP_LOCK(isp);
3310 #ifdef	ISP_TARGET_MODE
3311 	isp_intr_atioq(isp);
3312 #endif
3313 	ISP_UNLOCK(isp);
3314 
3315 	/* We have handshake enabled, so explicitly complete interrupt */
3316 	ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3317 }
3318 
3319 typedef struct {
3320 	ispsoftc_t		*isp;
3321 	struct ccb_scsiio	*csio;
3322 	void			*qe;
3323 	int			error;
3324 } mush_t;
3325 
3326 static void
3327 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
3328 {
3329 	mush_t *mp = (mush_t *) arg;
3330 	ispsoftc_t *isp= mp->isp;
3331 	struct ccb_scsiio *csio = mp->csio;
3332 	bus_dmasync_op_t op;
3333 
3334 	if (error) {
3335 		mp->error = error;
3336 		return;
3337 	}
3338 	if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3339 	    ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3340 		op = BUS_DMASYNC_PREREAD;
3341 	else
3342 		op = BUS_DMASYNC_PREWRITE;
3343 	bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3344 
3345 	mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg);
3346 	if (mp->error)
3347 		isp_dmafree(isp, csio);
3348 }
3349 
3350 int
3351 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe)
3352 {
3353 	mush_t mp;
3354 	int error;
3355 
3356 	if (XS_XFRLEN(csio)) {
3357 		mp.isp = isp;
3358 		mp.csio = csio;
3359 		mp.qe = qe;
3360 		mp.error = 0;
3361 		error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
3362 		    (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT);
3363 		if (error == 0)
3364 			error = mp.error;
3365 	} else {
3366 		error = ISP_SEND_CMD(isp, qe, NULL, 0);
3367 	}
3368 	switch (error) {
3369 	case 0:
3370 	case CMD_COMPLETE:
3371 	case CMD_EAGAIN:
3372 	case CMD_RQLATER:
3373 		break;
3374 	case ENOMEM:
3375 		error = CMD_EAGAIN;
3376 		break;
3377 	case EINVAL:
3378 	case EFBIG:
3379 		csio->ccb_h.status = CAM_REQ_INVALID;
3380 		error = CMD_COMPLETE;
3381 		break;
3382 	default:
3383 		csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
3384 		error = CMD_COMPLETE;
3385 		break;
3386 	}
3387 	return (error);
3388 }
3389 
3390 void
3391 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio)
3392 {
3393 	bus_dmasync_op_t op;
3394 
3395 	if (XS_XFRLEN(csio) == 0)
3396 		return;
3397 
3398 	if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3399 	    ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3400 		op = BUS_DMASYNC_POSTREAD;
3401 	else
3402 		op = BUS_DMASYNC_POSTWRITE;
3403 	bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3404 	bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
3405 }
3406 
3407 /*
3408  * Reset the command reference number for all LUNs on a specific target
3409  * (needed when a target arrives again) or for all targets on a port
3410  * (needed for events like a LIP).
3411  */
3412 void
3413 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set)
3414 {
3415 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3416 	struct isp_nexus *nxp;
3417 	int i;
3418 
3419 	if (tgt_set == 0)
3420 		isp_prt(isp, ISP_LOGDEBUG0,
3421 		    "Chan %d resetting CRN on all targets", chan);
3422 	else
3423 		isp_prt(isp, ISP_LOGDEBUG0,
3424 		    "Chan %d resetting CRN on target %u", chan, tgt);
3425 
3426 	for (i = 0; i < NEXUS_HASH_WIDTH; i++) {
3427 		for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) {
3428 			if (tgt_set == 0 || tgt == nxp->tgt)
3429 				nxp->crnseed = 0;
3430 		}
3431 	}
3432 }
3433 
3434 int
3435 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd)
3436 {
3437 	lun_id_t lun;
3438 	uint32_t chan, tgt;
3439 	struct isp_fc *fc;
3440 	struct isp_nexus *nxp;
3441 	int idx;
3442 
3443 	chan = XS_CHANNEL(cmd);
3444 	tgt = XS_TGT(cmd);
3445 	lun = XS_LUN(cmd);
3446 	fc = ISP_FC_PC(isp, chan);
3447 	idx = NEXUS_HASH(tgt, lun);
3448 	nxp = fc->nexus_hash[idx];
3449 
3450 	while (nxp) {
3451 		if (nxp->tgt == tgt && nxp->lun == lun)
3452 			break;
3453 		nxp = nxp->next;
3454 	}
3455 	if (nxp == NULL) {
3456 		nxp = fc->nexus_free_list;
3457 		if (nxp == NULL) {
3458 			nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT);
3459 			if (nxp == NULL) {
3460 				return (-1);
3461 			}
3462 		} else {
3463 			fc->nexus_free_list = nxp->next;
3464 		}
3465 		nxp->tgt = tgt;
3466 		nxp->lun = lun;
3467 		nxp->next = fc->nexus_hash[idx];
3468 		fc->nexus_hash[idx] = nxp;
3469 	}
3470 	if (nxp->crnseed == 0)
3471 		nxp->crnseed = 1;
3472 	*crnp = nxp->crnseed++;
3473 	return (0);
3474 }
3475 
3476 /*
3477  * We enter with the lock held
3478  */
3479 void
3480 isp_timer(void *arg)
3481 {
3482 	ispsoftc_t *isp = arg;
3483 #ifdef	ISP_TARGET_MODE
3484 	isp_tmcmd_restart(isp);
3485 #endif
3486 	callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
3487 }
3488 
3489 #ifdef	ISP_TARGET_MODE
3490 isp_ecmd_t *
3491 isp_get_ecmd(ispsoftc_t *isp)
3492 {
3493 	isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free;
3494 	if (ecmd) {
3495 		isp->isp_osinfo.ecmd_free = ecmd->next;
3496 	}
3497 	return (ecmd);
3498 }
3499 
3500 void
3501 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd)
3502 {
3503 	ecmd->next = isp->isp_osinfo.ecmd_free;
3504 	isp->isp_osinfo.ecmd_free = ecmd;
3505 }
3506 #endif
3507