xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 9849949cae0603df0485a0be8a3f80fb8f68f304)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/ioccom.h>
34 #include <dev/isp/isp_ioctl.h>
35 
36 
37 static d_ioctl_t ispioctl;
38 static void isp_intr_enable(void *);
39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
40 static void isp_poll(struct cam_sim *);
41 #if	0
42 static void isp_relsim(void *);
43 #endif
44 static timeout_t isp_watchdog;
45 static void isp_kthread(void *);
46 static void isp_action(struct cam_sim *, union ccb *);
47 
48 
49 #define ISP_CDEV_MAJOR	248
50 static struct cdevsw isp_cdevsw = {
51 	/* open */	nullopen,
52 	/* close */	nullclose,
53 	/* read */	noread,
54 	/* write */	nowrite,
55 	/* ioctl */	ispioctl,
56 	/* poll */	nopoll,
57 	/* mmap */	nommap,
58 	/* strategy */	nostrategy,
59 	/* name */	"isp",
60 	/* maj */	ISP_CDEV_MAJOR,
61 	/* dump */	nodump,
62 	/* psize */	nopsize,
63 	/* flags */	D_TAPE,
64 };
65 
66 static struct ispsoftc *isplist = NULL;
67 
68 void
69 isp_attach(struct ispsoftc *isp)
70 {
71 	int primary, secondary;
72 	struct ccb_setasync csa;
73 	struct cam_devq *devq;
74 	struct cam_sim *sim;
75 	struct cam_path *path;
76 
77 	/*
78 	 * Establish (in case of 12X0) which bus is the primary.
79 	 */
80 
81 	primary = 0;
82 	secondary = 1;
83 
84 	/*
85 	 * Create the device queue for our SIM(s).
86 	 */
87 	devq = cam_simq_alloc(isp->isp_maxcmds);
88 	if (devq == NULL) {
89 		return;
90 	}
91 
92 	/*
93 	 * Construct our SIM entry.
94 	 */
95 	ISPLOCK_2_CAMLOCK(isp);
96 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
97 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
98 	if (sim == NULL) {
99 		cam_simq_free(devq);
100 		CAMLOCK_2_ISPLOCK(isp);
101 		return;
102 	}
103 	CAMLOCK_2_ISPLOCK(isp);
104 
105 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
106 	isp->isp_osinfo.ehook.ich_arg = isp;
107 	ISPLOCK_2_CAMLOCK(isp);
108 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
109 		cam_sim_free(sim, TRUE);
110 		CAMLOCK_2_ISPLOCK(isp);
111 		isp_prt(isp, ISP_LOGERR,
112 		    "could not establish interrupt enable hook");
113 		return;
114 	}
115 
116 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
117 		cam_sim_free(sim, TRUE);
118 		CAMLOCK_2_ISPLOCK(isp);
119 		return;
120 	}
121 
122 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
123 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
124 		xpt_bus_deregister(cam_sim_path(sim));
125 		cam_sim_free(sim, TRUE);
126 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
127 		CAMLOCK_2_ISPLOCK(isp);
128 		return;
129 	}
130 
131 	xpt_setup_ccb(&csa.ccb_h, path, 5);
132 	csa.ccb_h.func_code = XPT_SASYNC_CB;
133 	csa.event_enable = AC_LOST_DEVICE;
134 	csa.callback = isp_cam_async;
135 	csa.callback_arg = sim;
136 	xpt_action((union ccb *)&csa);
137 	CAMLOCK_2_ISPLOCK(isp);
138 	isp->isp_sim = sim;
139 	isp->isp_path = path;
140 	/*
141 	 * Create a kernel thread for fibre channel instances. We
142 	 * don't have dual channel FC cards.
143 	 */
144 	if (IS_FC(isp)) {
145 		ISPLOCK_2_CAMLOCK(isp);
146 		/* XXX: LOCK VIOLATION */
147 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
148 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
149 		    RFHIGHPID, "%s: fc_thrd",
150 		    device_get_nameunit(isp->isp_dev))) {
151 			xpt_bus_deregister(cam_sim_path(sim));
152 			cam_sim_free(sim, TRUE);
153 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
154 			CAMLOCK_2_ISPLOCK(isp);
155 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
156 			return;
157 		}
158 		CAMLOCK_2_ISPLOCK(isp);
159 	}
160 
161 
162 	/*
163 	 * If we have a second channel, construct SIM entry for that.
164 	 */
165 	if (IS_DUALBUS(isp)) {
166 		ISPLOCK_2_CAMLOCK(isp);
167 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
168 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
169 		if (sim == NULL) {
170 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
171 			xpt_free_path(isp->isp_path);
172 			cam_simq_free(devq);
173 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
174 			return;
175 		}
176 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
177 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
178 			xpt_free_path(isp->isp_path);
179 			cam_sim_free(sim, TRUE);
180 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
181 			CAMLOCK_2_ISPLOCK(isp);
182 			return;
183 		}
184 
185 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
186 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
187 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
188 			xpt_free_path(isp->isp_path);
189 			xpt_bus_deregister(cam_sim_path(sim));
190 			cam_sim_free(sim, TRUE);
191 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
192 			CAMLOCK_2_ISPLOCK(isp);
193 			return;
194 		}
195 
196 		xpt_setup_ccb(&csa.ccb_h, path, 5);
197 		csa.ccb_h.func_code = XPT_SASYNC_CB;
198 		csa.event_enable = AC_LOST_DEVICE;
199 		csa.callback = isp_cam_async;
200 		csa.callback_arg = sim;
201 		xpt_action((union ccb *)&csa);
202 		CAMLOCK_2_ISPLOCK(isp);
203 		isp->isp_sim2 = sim;
204 		isp->isp_path2 = path;
205 	}
206 
207 #ifdef	ISP_TARGET_MODE
208 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
209 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
210 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
211 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
212 #endif
213 	/*
214 	 * Create device nodes
215 	 */
216 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
217 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
218 
219 	if (isp->isp_role != ISP_ROLE_NONE) {
220 		isp->isp_state = ISP_RUNSTATE;
221 		ENABLE_INTS(isp);
222 	}
223 	if (isplist == NULL) {
224 		isplist = isp;
225 	} else {
226 		struct ispsoftc *tmp = isplist;
227 		while (tmp->isp_osinfo.next) {
228 			tmp = tmp->isp_osinfo.next;
229 		}
230 		tmp->isp_osinfo.next = isp;
231 	}
232 
233 }
234 
235 static int
236 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
237 {
238 	struct ispsoftc *isp;
239 	int retval = ENOTTY;
240 
241 	isp = isplist;
242 	while (isp) {
243 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
244 			break;
245 		}
246 		isp = isp->isp_osinfo.next;
247 	}
248 	if (isp == NULL)
249 		return (ENXIO);
250 
251 	switch (cmd) {
252 	case ISP_SDBLEV:
253 	{
254 		int olddblev = isp->isp_dblev;
255 		isp->isp_dblev = *(int *)addr;
256 		*(int *)addr = olddblev;
257 		retval = 0;
258 		break;
259 	}
260 	case ISP_RESETHBA:
261 		ISP_LOCK(isp);
262 		isp_reinit(isp);
263 		ISP_UNLOCK(isp);
264 		retval = 0;
265 		break;
266 	case ISP_FC_RESCAN:
267 		if (IS_FC(isp)) {
268 			ISP_LOCK(isp);
269 			if (isp_fc_runstate(isp, 5 * 1000000)) {
270 				retval = EIO;
271 			} else {
272 				retval = 0;
273 			}
274 			ISP_UNLOCK(isp);
275 		}
276 		break;
277 	case ISP_FC_LIP:
278 		if (IS_FC(isp)) {
279 			ISP_LOCK(isp);
280 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
281 				retval = EIO;
282 			} else {
283 				retval = 0;
284 			}
285 			ISP_UNLOCK(isp);
286 		}
287 		break;
288 	case ISP_FC_GETDINFO:
289 	{
290 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
291 		struct lportdb *lp;
292 
293 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
294 			retval = EINVAL;
295 			break;
296 		}
297 		ISP_LOCK(isp);
298 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
299 		if (lp->valid) {
300 			ifc->loopid = lp->loopid;
301 			ifc->portid = lp->portid;
302 			ifc->node_wwn = lp->node_wwn;
303 			ifc->port_wwn = lp->port_wwn;
304 			retval = 0;
305 		} else {
306 			retval = ENODEV;
307 		}
308 		ISP_UNLOCK(isp);
309 		break;
310 	}
311 	default:
312 		break;
313 	}
314 	return (retval);
315 }
316 
317 static void
318 isp_intr_enable(void *arg)
319 {
320 	struct ispsoftc *isp = arg;
321 	if (isp->isp_role != ISP_ROLE_NONE) {
322 		ENABLE_INTS(isp);
323 		isp->isp_osinfo.intsok = 1;
324 	}
325 	/* Release our hook so that the boot can continue. */
326 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
327 }
328 
329 /*
330  * Put the target mode functions here, because some are inlines
331  */
332 
333 #ifdef	ISP_TARGET_MODE
334 
335 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
336 static __inline int are_any_luns_enabled(struct ispsoftc *, int);
337 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
338 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
339 static __inline int isp_psema_sig_rqe(struct ispsoftc *, int);
340 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
341 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int, int);
342 static __inline void isp_vsema_rqe(struct ispsoftc *, int);
343 static cam_status
344 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
345 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
346 static void isp_en_lun(struct ispsoftc *, union ccb *);
347 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
348 static timeout_t isp_refire_putback_atio;
349 static void isp_complete_ctio(union ccb *);
350 static void isp_target_putback_atio(union ccb *);
351 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
352 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
353 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
354 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
355 
356 static __inline int
357 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
358 {
359 	tstate_t *tptr;
360 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
361 	if (tptr == NULL) {
362 		return (0);
363 	}
364 	do {
365 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
366 			return (1);
367 		}
368 	} while ((tptr = tptr->next) != NULL);
369 	return (0);
370 }
371 
372 static __inline int
373 are_any_luns_enabled(struct ispsoftc *isp, int port)
374 {
375 	int lo, hi;
376 	if (IS_DUALBUS(isp)) {
377 		lo = (port * (LUN_HASH_SIZE >> 1));
378 		hi = lo + (LUN_HASH_SIZE >> 1);
379 	} else {
380 		lo = 0;
381 		hi = LUN_HASH_SIZE;
382 	}
383 	for (lo = 0; lo < hi; lo++) {
384 		if (isp->isp_osinfo.lun_hash[lo]) {
385 			return (1);
386 		}
387 	}
388 	return (0);
389 }
390 
391 static __inline tstate_t *
392 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
393 {
394 	tstate_t *tptr = NULL;
395 
396 	if (lun == CAM_LUN_WILDCARD) {
397 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
398 			tptr = &isp->isp_osinfo.tsdflt[bus];
399 			tptr->hold++;
400 			return (tptr);
401 		}
402 	} else {
403 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
404 		if (tptr == NULL) {
405 			return (NULL);
406 		}
407 	}
408 
409 	do {
410 		if (tptr->lun == lun && tptr->bus == bus) {
411 			tptr->hold++;
412 			return (tptr);
413 		}
414 	} while ((tptr = tptr->next) != NULL);
415 	return (tptr);
416 }
417 
418 static __inline void
419 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
420 {
421 	if (tptr->hold)
422 		tptr->hold--;
423 }
424 
425 static __inline int
426 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
427 {
428 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
429 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
430 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
431 			return (-1);
432 		}
433 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
434 	}
435 	return (0);
436 }
437 
438 static __inline int
439 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
440 {
441 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
442 		return (-1);
443 	}
444 	return (0);
445 }
446 
447 static __inline void
448 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
449 {
450 	isp->isp_osinfo.rstatus[bus] = status;
451 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
452 }
453 
454 static __inline void
455 isp_vsema_rqe(struct ispsoftc *isp, int bus)
456 {
457 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
458 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
459 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
460 	}
461 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
462 }
463 
464 static cam_status
465 create_lun_state(struct ispsoftc *isp, int bus,
466     struct cam_path *path, tstate_t **rslt)
467 {
468 	cam_status status;
469 	lun_id_t lun;
470 	int hfx;
471 	tstate_t *tptr, *new;
472 
473 	lun = xpt_path_lun_id(path);
474 	if (lun < 0) {
475 		return (CAM_LUN_INVALID);
476 	}
477 	if (is_lun_enabled(isp, bus, lun)) {
478 		return (CAM_LUN_ALRDY_ENA);
479 	}
480 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
481 	if (new == NULL) {
482 		return (CAM_RESRC_UNAVAIL);
483 	}
484 
485 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
486 	    xpt_path_target_id(path), xpt_path_lun_id(path));
487 	if (status != CAM_REQ_CMP) {
488 		free(new, M_DEVBUF);
489 		return (status);
490 	}
491 	new->bus = bus;
492 	new->lun = lun;
493 	SLIST_INIT(&new->atios);
494 	SLIST_INIT(&new->inots);
495 	new->hold = 1;
496 
497 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
498 	tptr = isp->isp_osinfo.lun_hash[hfx];
499 	if (tptr == NULL) {
500 		isp->isp_osinfo.lun_hash[hfx] = new;
501 	} else {
502 		while (tptr->next)
503 			tptr = tptr->next;
504 		tptr->next = new;
505 	}
506 	*rslt = new;
507 	return (CAM_REQ_CMP);
508 }
509 
510 static __inline void
511 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
512 {
513 	int hfx;
514 	tstate_t *lw, *pw;
515 
516 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
517 	if (tptr->hold) {
518 		return;
519 	}
520 	pw = isp->isp_osinfo.lun_hash[hfx];
521 	if (pw == NULL) {
522 		return;
523 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
524 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
525 	} else {
526 		lw = pw;
527 		pw = lw->next;
528 		while (pw) {
529 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
530 				lw->next = pw->next;
531 				break;
532 			}
533 			lw = pw;
534 			pw = pw->next;
535 		}
536 		if (pw == NULL) {
537 			return;
538 		}
539 	}
540 	free(tptr, M_DEVBUF);
541 }
542 
543 /*
544  * we enter with our locks held.
545  */
546 static void
547 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
548 {
549 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
550 	struct ccb_en_lun *cel = &ccb->cel;
551 	tstate_t *tptr;
552 	u_int16_t rstat;
553 	int bus, cmd, av, wildcard;
554 	lun_id_t lun;
555 	target_id_t tgt;
556 
557 
558 	bus = XS_CHANNEL(ccb) & 0x1;
559 	tgt = ccb->ccb_h.target_id;
560 	lun = ccb->ccb_h.target_lun;
561 
562 	/*
563 	 * Do some sanity checking first.
564 	 */
565 
566 	if ((lun != CAM_LUN_WILDCARD) &&
567 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
568 		ccb->ccb_h.status = CAM_LUN_INVALID;
569 		return;
570 	}
571 
572 	if (IS_SCSI(isp)) {
573 		sdparam *sdp = isp->isp_param;
574 		sdp += bus;
575 		if (tgt != CAM_TARGET_WILDCARD &&
576 		    tgt != sdp->isp_initiator_id) {
577 			ccb->ccb_h.status = CAM_TID_INVALID;
578 			return;
579 		}
580 	} else {
581 		if (tgt != CAM_TARGET_WILDCARD &&
582 		    tgt != FCPARAM(isp)->isp_iid) {
583 			ccb->ccb_h.status = CAM_TID_INVALID;
584 			return;
585 		}
586 		/*
587 		 * This is as a good a place as any to check f/w capabilities.
588 		 */
589 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
590 			isp_prt(isp, ISP_LOGERR,
591 			    "firmware does not support target mode");
592 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
593 			return;
594 		}
595 		/*
596 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
597 		 * XXX: dorks with our already fragile enable/disable code.
598 		 */
599 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
600 			isp_prt(isp, ISP_LOGERR,
601 			    "firmware not SCCLUN capable");
602 		}
603 	}
604 
605 	if (tgt == CAM_TARGET_WILDCARD) {
606 		if (lun == CAM_LUN_WILDCARD) {
607 			wildcard = 1;
608 		} else {
609 			ccb->ccb_h.status = CAM_LUN_INVALID;
610 			return;
611 		}
612 	} else {
613 		wildcard = 0;
614 	}
615 
616 	/*
617 	 * Next check to see whether this is a target/lun wildcard action.
618 	 *
619 	 * If so, we know that we can accept commands for luns that haven't
620 	 * been enabled yet and send them upstream. Otherwise, we have to
621 	 * handle them locally (if we see them at all).
622 	 */
623 
624 	if (wildcard) {
625 		tptr = &isp->isp_osinfo.tsdflt[bus];
626 		if (cel->enable) {
627 			if (isp->isp_osinfo.tmflags[bus] &
628 			    TM_WILDCARD_ENABLED) {
629 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
630 				return;
631 			}
632 			ccb->ccb_h.status =
633 			    xpt_create_path(&tptr->owner, NULL,
634 			    xpt_path_path_id(ccb->ccb_h.path),
635 			    xpt_path_target_id(ccb->ccb_h.path),
636 			    xpt_path_lun_id(ccb->ccb_h.path));
637 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
638 				return;
639 			}
640 			SLIST_INIT(&tptr->atios);
641 			SLIST_INIT(&tptr->inots);
642 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
643 		} else {
644 			if ((isp->isp_osinfo.tmflags[bus] &
645 			    TM_WILDCARD_ENABLED) == 0) {
646 				ccb->ccb_h.status = CAM_REQ_CMP;
647 				return;
648 			}
649 			if (tptr->hold) {
650 				ccb->ccb_h.status = CAM_SCSI_BUSY;
651 				return;
652 			}
653 			xpt_free_path(tptr->owner);
654 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
655 		}
656 	}
657 
658 	/*
659 	 * Now check to see whether this bus needs to be
660 	 * enabled/disabled with respect to target mode.
661 	 */
662 	av = bus << 31;
663 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
664 		av |= ENABLE_TARGET_FLAG;
665 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
666 		if (av) {
667 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
668 			if (wildcard) {
669 				isp->isp_osinfo.tmflags[bus] &=
670 				    ~TM_WILDCARD_ENABLED;
671 				xpt_free_path(tptr->owner);
672 			}
673 			return;
674 		}
675 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
676 		isp_prt(isp, ISP_LOGINFO,
677 		    "Target Mode enabled on channel %d", bus);
678 	} else if (cel->enable == 0 &&
679 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
680 		if (are_any_luns_enabled(isp, bus)) {
681 			ccb->ccb_h.status = CAM_SCSI_BUSY;
682 			return;
683 		}
684 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
685 		if (av) {
686 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
687 			return;
688 		}
689 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
690 		isp_prt(isp, ISP_LOGINFO,
691 		    "Target Mode disabled on channel %d", bus);
692 	}
693 
694 	if (wildcard) {
695 		ccb->ccb_h.status = CAM_REQ_CMP;
696 		return;
697 	}
698 
699 	if (cel->enable) {
700 		ccb->ccb_h.status =
701 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
702 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
703 			return;
704 		}
705 	} else {
706 		tptr = get_lun_statep(isp, bus, lun);
707 		if (tptr == NULL) {
708 			ccb->ccb_h.status = CAM_LUN_INVALID;
709 			return;
710 		}
711 	}
712 
713 	if (isp_psema_sig_rqe(isp, bus)) {
714 		rls_lun_statep(isp, tptr);
715 		if (cel->enable)
716 			destroy_lun_state(isp, tptr);
717 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
718 		return;
719 	}
720 
721 	if (cel->enable) {
722 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
723 		int c, n, ulun = lun;
724 
725 		cmd = RQSTYPE_ENABLE_LUN;
726 		c = DFLT_CMND_CNT;
727 		n = DFLT_INOT_CNT;
728 		if (IS_FC(isp) && lun != 0) {
729 			cmd = RQSTYPE_MODIFY_LUN;
730 			n = 0;
731 			/*
732 		 	 * For SCC firmware, we only deal with setting
733 			 * (enabling or modifying) lun 0.
734 			 */
735 			ulun = 0;
736 		}
737 		rstat = LUN_ERR;
738 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
739 			xpt_print_path(ccb->ccb_h.path);
740 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
741 			goto out;
742 		}
743 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
744 			xpt_print_path(ccb->ccb_h.path);
745 			isp_prt(isp, ISP_LOGERR,
746 			    "wait for ENABLE/MODIFY LUN timed out");
747 			goto out;
748 		}
749 		rstat = isp->isp_osinfo.rstatus[bus];
750 		if (rstat != LUN_OK) {
751 			xpt_print_path(ccb->ccb_h.path);
752 			isp_prt(isp, ISP_LOGERR,
753 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
754 			goto out;
755 		}
756 	} else {
757 		int c, n, ulun = lun;
758 		u_int32_t seq;
759 
760 		rstat = LUN_ERR;
761 		seq = isp->isp_osinfo.rollinfo++;
762 		cmd = -RQSTYPE_MODIFY_LUN;
763 
764 		c = DFLT_CMND_CNT;
765 		n = DFLT_INOT_CNT;
766 		if (IS_FC(isp) && lun != 0) {
767 			n = 0;
768 			/*
769 		 	 * For SCC firmware, we only deal with setting
770 			 * (enabling or modifying) lun 0.
771 			 */
772 			ulun = 0;
773 		}
774 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
775 			xpt_print_path(ccb->ccb_h.path);
776 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
777 			goto out;
778 		}
779 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
780 			xpt_print_path(ccb->ccb_h.path);
781 			isp_prt(isp, ISP_LOGERR,
782 			    "wait for MODIFY LUN timed out");
783 			goto out;
784 		}
785 		rstat = isp->isp_osinfo.rstatus[bus];
786 		if (rstat != LUN_OK) {
787 			xpt_print_path(ccb->ccb_h.path);
788 			isp_prt(isp, ISP_LOGERR,
789 			    "MODIFY LUN returned 0x%x", rstat);
790 			goto out;
791 		}
792 		if (IS_FC(isp) && lun) {
793 			goto out;
794 		}
795 
796 		seq = isp->isp_osinfo.rollinfo++;
797 
798 		rstat = LUN_ERR;
799 		cmd = -RQSTYPE_ENABLE_LUN;
800 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
801 			xpt_print_path(ccb->ccb_h.path);
802 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
803 			goto out;
804 		}
805 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
806 			xpt_print_path(ccb->ccb_h.path);
807 			isp_prt(isp, ISP_LOGERR,
808 			     "wait for DISABLE LUN timed out");
809 			goto out;
810 		}
811 		rstat = isp->isp_osinfo.rstatus[bus];
812 		if (rstat != LUN_OK) {
813 			xpt_print_path(ccb->ccb_h.path);
814 			isp_prt(isp, ISP_LOGWARN,
815 			    "DISABLE LUN returned 0x%x", rstat);
816 			goto out;
817 		}
818 		if (are_any_luns_enabled(isp, bus) == 0) {
819 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
820 			if (av) {
821 				isp_prt(isp, ISP_LOGWARN,
822 				    "disable target mode on channel %d failed",
823 				    bus);
824 				goto out;
825 			}
826 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
827 			xpt_print_path(ccb->ccb_h.path);
828 			isp_prt(isp, ISP_LOGINFO,
829 			    "Target Mode disabled on channel %d", bus);
830 		}
831 	}
832 
833 out:
834 	isp_vsema_rqe(isp, bus);
835 
836 	if (rstat != LUN_OK) {
837 		xpt_print_path(ccb->ccb_h.path);
838 		isp_prt(isp, ISP_LOGWARN,
839 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
840 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
841 		rls_lun_statep(isp, tptr);
842 		if (cel->enable)
843 			destroy_lun_state(isp, tptr);
844 	} else {
845 		xpt_print_path(ccb->ccb_h.path);
846 		isp_prt(isp, ISP_LOGINFO, lfmt,
847 		    (cel->enable) ? "en" : "dis", bus);
848 		rls_lun_statep(isp, tptr);
849 		if (cel->enable == 0) {
850 			destroy_lun_state(isp, tptr);
851 		}
852 		ccb->ccb_h.status = CAM_REQ_CMP;
853 	}
854 }
855 
856 static cam_status
857 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
858 {
859 	tstate_t *tptr;
860 	struct ccb_hdr_slist *lp;
861 	struct ccb_hdr *curelm;
862 	int found;
863 	union ccb *accb = ccb->cab.abort_ccb;
864 
865 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
866 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
867 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
868 			return (CAM_PATH_INVALID);
869 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
870 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
871 			return (CAM_PATH_INVALID);
872 		}
873 	}
874 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
875 	if (tptr == NULL) {
876 		return (CAM_PATH_INVALID);
877 	}
878 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
879 		lp = &tptr->atios;
880 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
881 		lp = &tptr->inots;
882 	} else {
883 		rls_lun_statep(isp, tptr);
884 		return (CAM_UA_ABORT);
885 	}
886 	curelm = SLIST_FIRST(lp);
887 	found = 0;
888 	if (curelm == &accb->ccb_h) {
889 		found = 1;
890 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
891 	} else {
892 		while(curelm != NULL) {
893 			struct ccb_hdr *nextelm;
894 
895 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
896 			if (nextelm == &accb->ccb_h) {
897 				found = 1;
898 				SLIST_NEXT(curelm, sim_links.sle) =
899 				    SLIST_NEXT(nextelm, sim_links.sle);
900 				break;
901 			}
902 			curelm = nextelm;
903 		}
904 	}
905 	rls_lun_statep(isp, tptr);
906 	if (found) {
907 		accb->ccb_h.status = CAM_REQ_ABORTED;
908 		return (CAM_REQ_CMP);
909 	}
910 	return(CAM_PATH_INVALID);
911 }
912 
913 static cam_status
914 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
915 {
916 	void *qe;
917 	struct ccb_scsiio *cso = &ccb->csio;
918 	u_int16_t *hp, save_handle;
919 	u_int16_t iptr, optr;
920 
921 
922 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
923 		xpt_print_path(ccb->ccb_h.path);
924 		printf("Request Queue Overflow in isp_target_start_ctio\n");
925 		return (CAM_RESRC_UNAVAIL);
926 	}
927 	bzero(qe, QENTRY_LEN);
928 
929 	/*
930 	 * We're either moving data or completing a command here.
931 	 */
932 
933 	if (IS_FC(isp)) {
934 		int resid;
935 		ct2_entry_t *cto = qe;
936 
937 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
938 		cto->ct_header.rqs_entry_count = 1;
939 		cto->ct_iid = cso->init_id;
940 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
941 			cto->ct_lun = ccb->ccb_h.target_lun;
942 		}
943 		/*
944 		 * Start with a residual based on what the original datalength
945 		 * was supposed to be. Basically, we ignore what CAM has set
946 		 * for residuals. The data transfer routines will knock off
947 		 * the residual for each byte actually moved- and also will
948 		 * be responsible for setting the underrun flag.
949 		 */
950 		/* HACK! HACK! */
951 		resid = ccb->ccb_h.spriv_field0;
952 		cto->ct_resid = 0;
953 
954 		cto->ct_rxid = cso->tag_id;
955 		if (cso->dxfer_len == 0) {
956 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
957 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
958 				cto->ct_flags |= CT2_SENDSTATUS;
959 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
960 				cto->ct_resid = resid;
961 			}
962 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
963 				int m = min(cso->sense_len, MAXRESPLEN);
964 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
965 				cto->rsp.m1.ct_senselen = m;
966 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
967 			}
968 		} else {
969 			cto->ct_flags |= CT2_FLAG_MODE0;
970 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
971 				cto->ct_flags |= CT2_DATA_IN;
972 			} else {
973 				cto->ct_flags |= CT2_DATA_OUT;
974 			}
975 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
976 				cto->ct_flags |= CT2_SENDSTATUS;
977 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
978 				cto->ct_resid = resid;
979 			}
980 			/*
981 			 * If we're sending data and status back together,
982 			 * we can't also send back sense data as well.
983 			 */
984 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
985 		}
986 		if (cto->ct_flags & CT2_SENDSTATUS) {
987 			isp_prt(isp, ISP_LOGTDEBUG0,
988 			    "CTIO2[%x] SCSI STATUS 0x%x curxf %u finalresid %u",
989 			    cto->ct_rxid, cso->scsi_status, cso->dxfer_len,
990 			    resid);
991 			cto->ct_flags |= CT2_CCINCR;
992 		}
993 		cto->ct_timeout = 10;
994 		hp = &cto->ct_syshandle;
995 	} else {
996 		ct_entry_t *cto = qe;
997 
998 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
999 		cto->ct_header.rqs_entry_count = 1;
1000 		cto->ct_iid = cso->init_id;
1001 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1002 		cto->ct_tgt = ccb->ccb_h.target_id;
1003 		cto->ct_lun = ccb->ccb_h.target_lun;
1004 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1005 		if (AT_HAS_TAG(cso->tag_id)) {
1006 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1007 			cto->ct_flags |= CT_TQAE;
1008 		}
1009 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1010 			cto->ct_flags |= CT_NODISC;
1011 		}
1012 		if (cso->dxfer_len == 0) {
1013 			cto->ct_flags |= CT_NO_DATA;
1014 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1015 			cto->ct_flags |= CT_DATA_IN;
1016 		} else {
1017 			cto->ct_flags |= CT_DATA_OUT;
1018 		}
1019 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1020 			cto->ct_flags |= CT_SENDSTATUS;
1021 			cto->ct_scsi_status = cso->scsi_status;
1022 			cto->ct_resid = cso->resid;
1023 		}
1024 		if (cto->ct_flags & CT_SENDSTATUS) {
1025 			isp_prt(isp, ISP_LOGTDEBUG0,
1026 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1027 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1028 			    cso->tag_id);
1029 			cto->ct_flags |= CT_CCINCR;
1030 		}
1031 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1032 		cto->ct_timeout = 10;
1033 		hp = &cto->ct_syshandle;
1034 	}
1035 
1036 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1037 		xpt_print_path(ccb->ccb_h.path);
1038 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1039 		return (CAM_RESRC_UNAVAIL);
1040 	}
1041 
1042 
1043 	/*
1044 	 * Call the dma setup routines for this entry (and any subsequent
1045 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1046 	 * new things to play with. As with isp_start's usage of DMA setup,
1047 	 * any swizzling is done in the machine dependent layer. Because
1048 	 * of this, we put the request onto the queue area first in native
1049 	 * format.
1050 	 */
1051 
1052 	save_handle = *hp;
1053 
1054 	switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) {
1055 	case CMD_QUEUED:
1056 		ISP_ADD_REQUEST(isp, iptr);
1057 		return (CAM_REQ_INPROG);
1058 
1059 	case CMD_EAGAIN:
1060 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1061 		isp_destroy_handle(isp, save_handle);
1062 		return (CAM_RESRC_UNAVAIL);
1063 
1064 	default:
1065 		isp_destroy_handle(isp, save_handle);
1066 		return (XS_ERR(ccb));
1067 	}
1068 }
1069 
1070 static void
1071 isp_refire_putback_atio(void *arg)
1072 {
1073 	int s = splcam();
1074 	isp_target_putback_atio(arg);
1075 	splx(s);
1076 }
1077 
1078 static void
1079 isp_target_putback_atio(union ccb *ccb)
1080 {
1081 	struct ispsoftc *isp;
1082 	struct ccb_scsiio *cso;
1083 	u_int16_t iptr, optr;
1084 	void *qe;
1085 
1086 	isp = XS_ISP(ccb);
1087 
1088 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
1089 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1090 		isp_prt(isp, ISP_LOGWARN,
1091 		    "isp_target_putback_atio: Request Queue Overflow");
1092 		return;
1093 	}
1094 	bzero(qe, QENTRY_LEN);
1095 	cso = &ccb->csio;
1096 	if (IS_FC(isp)) {
1097 		at2_entry_t *at = qe;
1098 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1099 		at->at_header.rqs_entry_count = 1;
1100 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1101 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1102 		} else {
1103 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1104 		}
1105 		at->at_status = CT_OK;
1106 		at->at_rxid = cso->tag_id;
1107 		ISP_SWIZ_ATIO2(isp, qe, qe);
1108 	} else {
1109 		at_entry_t *at = qe;
1110 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1111 		at->at_header.rqs_entry_count = 1;
1112 		at->at_iid = cso->init_id;
1113 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1114 		at->at_tgt = cso->ccb_h.target_id;
1115 		at->at_lun = cso->ccb_h.target_lun;
1116 		at->at_status = CT_OK;
1117 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1118 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1119 		ISP_SWIZ_ATIO(isp, qe, qe);
1120 	}
1121 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1122 	ISP_ADD_REQUEST(isp, iptr);
1123 	isp_complete_ctio(ccb);
1124 }
1125 
1126 static void
1127 isp_complete_ctio(union ccb *ccb)
1128 {
1129 	struct ispsoftc *isp = XS_ISP(ccb);
1130 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1131 		ccb->ccb_h.status |= CAM_REQ_CMP;
1132 	}
1133 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1134 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1135 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1136 		if (isp->isp_osinfo.simqfrozen == 0) {
1137 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1138 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1139 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1140 			} else {
1141 				isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen");
1142 			}
1143 		} else {
1144 			isp_prt(isp, ISP_LOGWARN,
1145 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1146 		}
1147 	}
1148 	xpt_done(ccb);
1149 }
1150 
1151 /*
1152  * Handle ATIO stuff that the generic code can't.
1153  * This means handling CDBs.
1154  */
1155 
1156 static int
1157 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1158 {
1159 	tstate_t *tptr;
1160 	int status, bus, iswildcard;
1161 	struct ccb_accept_tio *atiop;
1162 
1163 	/*
1164 	 * The firmware status (except for the QLTM_SVALID bit)
1165 	 * indicates why this ATIO was sent to us.
1166 	 *
1167 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1168 	 *
1169 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1170 	 * we're still connected on the SCSI bus.
1171 	 */
1172 	status = aep->at_status;
1173 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1174 		/*
1175 		 * Bus Phase Sequence error. We should have sense data
1176 		 * suggested by the f/w. I'm not sure quite yet what
1177 		 * to do about this for CAM.
1178 		 */
1179 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1180 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1181 		return (0);
1182 	}
1183 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1184 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1185 		    status);
1186 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1187 		return (0);
1188 	}
1189 
1190 	bus = GET_BUS_VAL(aep->at_iid);
1191 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1192 	if (tptr == NULL) {
1193 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1194 		iswildcard = 1;
1195 	} else {
1196 		iswildcard = 0;
1197 	}
1198 
1199 	if (tptr == NULL) {
1200 		/*
1201 		 * Because we can't autofeed sense data back with
1202 		 * a command for parallel SCSI, we can't give back
1203 		 * a CHECK CONDITION. We'll give back a BUSY status
1204 		 * instead. This works out okay because the only
1205 		 * time we should, in fact, get this, is in the
1206 		 * case that somebody configured us without the
1207 		 * blackhole driver, so they get what they deserve.
1208 		 */
1209 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1210 		return (0);
1211 	}
1212 
1213 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1214 	if (atiop == NULL) {
1215 		/*
1216 		 * Because we can't autofeed sense data back with
1217 		 * a command for parallel SCSI, we can't give back
1218 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1219 		 * instead. This works out okay because the only time we
1220 		 * should, in fact, get this, is in the case that we've
1221 		 * run out of ATIOS.
1222 		 */
1223 		xpt_print_path(tptr->owner);
1224 		isp_prt(isp, ISP_LOGWARN,
1225 		    "no ATIOS for lun %d from initiator %d on channel %d",
1226 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1227 		if (aep->at_flags & AT_TQAE)
1228 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1229 		else
1230 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1231 		rls_lun_statep(isp, tptr);
1232 		return (0);
1233 	}
1234 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1235 	if (iswildcard) {
1236 		atiop->ccb_h.target_id = aep->at_tgt;
1237 		atiop->ccb_h.target_lun = aep->at_lun;
1238 	}
1239 	if (aep->at_flags & AT_NODISC) {
1240 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1241 	} else {
1242 		atiop->ccb_h.flags = 0;
1243 	}
1244 
1245 	if (status & QLTM_SVALID) {
1246 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1247 		atiop->sense_len = amt;
1248 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1249 	} else {
1250 		atiop->sense_len = 0;
1251 	}
1252 
1253 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1254 	atiop->cdb_len = aep->at_cdblen;
1255 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1256 	atiop->ccb_h.status = CAM_CDB_RECVD;
1257 	/*
1258 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1259 	 * and the handle (which we have to preserve).
1260 	 */
1261 	AT_MAKE_TAGID(atiop->tag_id, aep);
1262 	if (aep->at_flags & AT_TQAE) {
1263 		atiop->tag_action = aep->at_tag_type;
1264 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1265 	}
1266 	xpt_done((union ccb*)atiop);
1267 	isp_prt(isp, ISP_LOGTDEBUG0,
1268 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1269 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1270 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1271 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1272 	    "nondisc" : "disconnecting");
1273 	rls_lun_statep(isp, tptr);
1274 	return (0);
1275 }
1276 
1277 static int
1278 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1279 {
1280 	lun_id_t lun;
1281 	tstate_t *tptr;
1282 	struct ccb_accept_tio *atiop;
1283 
1284 	/*
1285 	 * The firmware status (except for the QLTM_SVALID bit)
1286 	 * indicates why this ATIO was sent to us.
1287 	 *
1288 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1289 	 */
1290 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1291 		isp_prt(isp, ISP_LOGWARN,
1292 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1293 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1294 		return (0);
1295 	}
1296 
1297 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1298 		lun = aep->at_scclun;
1299 	} else {
1300 		lun = aep->at_lun;
1301 	}
1302 	tptr = get_lun_statep(isp, 0, lun);
1303 	if (tptr == NULL) {
1304 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1305 	}
1306 
1307 	if (tptr == NULL) {
1308 		/*
1309 		 * What we'd like to know is whether or not we have a listener
1310 		 * upstream that really hasn't configured yet. If we do, then
1311 		 * we can give a more sensible reply here. If not, then we can
1312 		 * reject this out of hand.
1313 		 *
1314 		 * Choices for what to send were
1315 		 *
1316                  *	Not Ready, Unit Not Self-Configured Yet
1317 		 *	(0x2,0x3e,0x00)
1318 		 *
1319 		 * for the former and
1320 		 *
1321 		 *	Illegal Request, Logical Unit Not Supported
1322 		 *	(0x5,0x25,0x00)
1323 		 *
1324 		 * for the latter.
1325 		 *
1326 		 * We used to decide whether there was at least one listener
1327 		 * based upon whether the black hole driver was configured.
1328 		 * However, recent config(8) changes have made this hard to do
1329 		 * at this time.
1330 		 *
1331 		 */
1332 		u_int32_t ccode = SCSI_STATUS_BUSY;
1333 
1334 		/*
1335 		 * Because we can't autofeed sense data back with
1336 		 * a command for parallel SCSI, we can't give back
1337 		 * a CHECK CONDITION. We'll give back a BUSY status
1338 		 * instead. This works out okay because the only
1339 		 * time we should, in fact, get this, is in the
1340 		 * case that somebody configured us without the
1341 		 * blackhole driver, so they get what they deserve.
1342 		 */
1343 		isp_endcmd(isp, aep, ccode, 0);
1344 		return (0);
1345 	}
1346 
1347 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1348 	if (atiop == NULL) {
1349 		/*
1350 		 * Because we can't autofeed sense data back with
1351 		 * a command for parallel SCSI, we can't give back
1352 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1353 		 * instead. This works out okay because the only time we
1354 		 * should, in fact, get this, is in the case that we've
1355 		 * run out of ATIOS.
1356 		 */
1357 		xpt_print_path(tptr->owner);
1358 		isp_prt(isp, ISP_LOGWARN,
1359 		    "no ATIOS for lun %d from initiator %d", lun, aep->at_iid);
1360 		rls_lun_statep(isp, tptr);
1361 		if (aep->at_flags & AT_TQAE)
1362 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1363 		else
1364 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1365 		return (0);
1366 	}
1367 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1368 
1369 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1370 		atiop->ccb_h.target_id =
1371 			((fcparam *)isp->isp_param)->isp_loopid;
1372 		atiop->ccb_h.target_lun = lun;
1373 	}
1374 	/*
1375 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1376 	 */
1377 	atiop->sense_len = 0;
1378 
1379 	atiop->init_id = aep->at_iid;
1380 	atiop->cdb_len = ATIO2_CDBLEN;
1381 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1382 	atiop->ccb_h.status = CAM_CDB_RECVD;
1383 	atiop->tag_id = aep->at_rxid;
1384 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1385 	case ATIO2_TC_ATTR_SIMPLEQ:
1386 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1387 		break;
1388         case ATIO2_TC_ATTR_HEADOFQ:
1389 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1390 		break;
1391         case ATIO2_TC_ATTR_ORDERED:
1392 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1393 		break;
1394         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1395 	case ATIO2_TC_ATTR_UNTAGGED:
1396 	default:
1397 		atiop->tag_action = 0;
1398 		break;
1399 	}
1400 	if (atiop->tag_action != 0) {
1401 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1402 	}
1403 
1404 	/*
1405 	 * Preserve overall command datalength in private field.
1406 	 */
1407 	atiop->ccb_h.spriv_field0 = aep->at_datalen;
1408 
1409 	xpt_done((union ccb*)atiop);
1410 	isp_prt(isp, ISP_LOGTDEBUG0,
1411 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1412 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1413 	    lun, aep->at_taskflags, aep->at_datalen);
1414 	rls_lun_statep(isp, tptr);
1415 	return (0);
1416 }
1417 
1418 static int
1419 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1420 {
1421 	union ccb *ccb;
1422 	int sentstatus, ok, notify_cam, resid = 0;
1423 	u_int16_t tval;
1424 
1425 	/*
1426 	 * CTIO and CTIO2 are close enough....
1427 	 */
1428 
1429 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1430 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1431 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1432 
1433 	if (IS_FC(isp)) {
1434 		ct2_entry_t *ct = arg;
1435 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1436 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1437 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1438 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1439 		}
1440 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1441 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1442 			resid = ct->ct_resid;
1443 		}
1444 		isp_prt(isp, ISP_LOGTDEBUG0,
1445 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1446 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1447 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1448 		    resid, sentstatus? "FIN" : "MID");
1449 		tval = ct->ct_rxid;
1450 	} else {
1451 		ct_entry_t *ct = arg;
1452 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1453 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1454 		/*
1455 		 * We *ought* to be able to get back to the original ATIO
1456 		 * here, but for some reason this gets lost. It's just as
1457 		 * well because it's squirrelled away as part of periph
1458 		 * private data.
1459 		 *
1460 		 * We can live without it as long as we continue to use
1461 		 * the auto-replenish feature for CTIOs.
1462 		 */
1463 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1464 		if (ct->ct_status & QLTM_SVALID) {
1465 			char *sp = (char *)ct;
1466 			sp += CTIO_SENSE_OFFSET;
1467 			ccb->csio.sense_len =
1468 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1469 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1470 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1471 		}
1472 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1473 			resid = ct->ct_resid;
1474 		}
1475 		isp_prt(isp, ISP_LOGTDEBUG0,
1476 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1477 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1478 		    ct->ct_status, ct->ct_flags, resid,
1479 		    sentstatus? "FIN" : "MID");
1480 		tval = ct->ct_fwhandle;
1481 	}
1482 	ccb->csio.resid += resid;
1483 
1484 	/*
1485 	 * We're here either because intermediate data transfers are done
1486 	 * and/or the final status CTIO (which may have joined with a
1487 	 * Data Transfer) is done.
1488 	 *
1489 	 * In any case, for this platform, the upper layers figure out
1490 	 * what to do next, so all we do here is collect status and
1491 	 * pass information along. Any DMA handles have already been
1492 	 * freed.
1493 	 */
1494 	if (notify_cam == 0) {
1495 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1496 		return (0);
1497 	}
1498 
1499 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (resid %d)",
1500 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval, ccb->csio.resid);
1501 
1502 	if (!ok) {
1503 		isp_target_putback_atio(ccb);
1504 	} else {
1505 		isp_complete_ctio(ccb);
1506 
1507 	}
1508 	return (0);
1509 }
1510 #endif
1511 
1512 static void
1513 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1514 {
1515 	struct cam_sim *sim;
1516 	struct ispsoftc *isp;
1517 
1518 	sim = (struct cam_sim *)cbarg;
1519 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1520 	switch (code) {
1521 	case AC_LOST_DEVICE:
1522 		if (IS_SCSI(isp)) {
1523 			u_int16_t oflags, nflags;
1524 			sdparam *sdp = isp->isp_param;
1525 			int tgt;
1526 
1527 			tgt = xpt_path_target_id(path);
1528 			ISP_LOCK(isp);
1529 			sdp += cam_sim_bus(sim);
1530 			nflags = sdp->isp_devparam[tgt].nvrm_flags;
1531 #ifndef	ISP_TARGET_MODE
1532 			nflags &= DPARM_SAFE_DFLT;
1533 			if (isp->isp_loaded_fw) {
1534 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1535 			}
1536 #else
1537 			nflags = DPARM_DEFAULT;
1538 #endif
1539 			oflags = sdp->isp_devparam[tgt].goal_flags;
1540 			sdp->isp_devparam[tgt].goal_flags = nflags;
1541 			sdp->isp_devparam[tgt].dev_update = 1;
1542 			isp->isp_update |= (1 << cam_sim_bus(sim));
1543 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1544 			sdp->isp_devparam[tgt].goal_flags = oflags;
1545 			ISP_UNLOCK(isp);
1546 		}
1547 		break;
1548 	default:
1549 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1550 		break;
1551 	}
1552 }
1553 
1554 static void
1555 isp_poll(struct cam_sim *sim)
1556 {
1557 	struct ispsoftc *isp = cam_sim_softc(sim);
1558 	u_int16_t isr, sema, mbox;
1559 
1560 	ISP_LOCK(isp);
1561 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1562 		isp_intr(isp, isr, sema, mbox);
1563 	}
1564 	ISP_UNLOCK(isp);
1565 }
1566 
1567 #if	0
1568 static void
1569 isp_relsim(void *arg)
1570 {
1571 	struct ispsoftc *isp = arg;
1572 	ISP_LOCK(isp);
1573 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1574 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1575 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1576 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1577 			xpt_release_simq(isp->isp_sim, 1);
1578 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1579 		}
1580 	}
1581 	ISP_UNLOCK(isp);
1582 }
1583 #endif
1584 
1585 static void
1586 isp_watchdog(void *arg)
1587 {
1588 	XS_T *xs = arg;
1589 	struct ispsoftc *isp = XS_ISP(xs);
1590 	u_int32_t handle;
1591 
1592 	/*
1593 	 * We've decided this command is dead. Make sure we're not trying
1594 	 * to kill a command that's already dead by getting it's handle and
1595 	 * and seeing whether it's still alive.
1596 	 */
1597 	ISP_LOCK(isp);
1598 	handle = isp_find_handle(isp, xs);
1599 	if (handle) {
1600 		u_int16_t isr, sema, mbox;
1601 
1602 		if (XS_CMD_DONE_P(xs)) {
1603 			isp_prt(isp, ISP_LOGDEBUG1,
1604 			    "watchdog found done cmd (handle 0x%x)", handle);
1605 			ISP_UNLOCK(isp);
1606 			return;
1607 		}
1608 
1609 		if (XS_CMD_WDOG_P(xs)) {
1610 			isp_prt(isp, ISP_LOGDEBUG2,
1611 			    "recursive watchdog (handle 0x%x)", handle);
1612 			ISP_UNLOCK(isp);
1613 			return;
1614 		}
1615 
1616 		XS_CMD_S_WDOG(xs);
1617 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1618 			isp_intr(isp, isr, sema, mbox);
1619 		}
1620 		if (XS_CMD_DONE_P(xs)) {
1621 			isp_prt(isp, ISP_LOGDEBUG2,
1622 			    "watchdog cleanup for handle 0x%x", handle);
1623 			xpt_done((union ccb *) xs);
1624 		} else if (XS_CMD_GRACE_P(xs)) {
1625 			/*
1626 			 * Make sure the command is *really* dead before we
1627 			 * release the handle (and DMA resources) for reuse.
1628 			 */
1629 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1630 
1631 			/*
1632 			 * After this point, the comamnd is really dead.
1633 			 */
1634 			if (XS_XFRLEN(xs)) {
1635 				ISP_DMAFREE(isp, xs, handle);
1636                 	}
1637 			isp_destroy_handle(isp, handle);
1638 			xpt_print_path(xs->ccb_h.path);
1639 			isp_prt(isp, ISP_LOGWARN,
1640 			    "watchdog timeout for handle %x", handle);
1641 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1642 			XS_CMD_C_WDOG(xs);
1643 			isp_done(xs);
1644 		} else {
1645 			u_int16_t iptr, optr;
1646 			ispreq_t *mp;
1647 
1648 			XS_CMD_C_WDOG(xs);
1649 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1650 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
1651 				ISP_UNLOCK(isp);
1652 				return;
1653 			}
1654 			XS_CMD_S_GRACE(xs);
1655 			MEMZERO((void *) mp, sizeof (*mp));
1656 			mp->req_header.rqs_entry_count = 1;
1657 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1658 			mp->req_modifier = SYNC_ALL;
1659 			mp->req_target = XS_CHANNEL(xs) << 7;
1660 			ISP_SWIZZLE_REQUEST(isp, mp);
1661 			ISP_ADD_REQUEST(isp, iptr);
1662 		}
1663 	} else {
1664 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1665 	}
1666 	ISP_UNLOCK(isp);
1667 }
1668 
1669 static int isp_ktmature = 0;
1670 
1671 static void
1672 isp_kthread(void *arg)
1673 {
1674 	int wasfrozen;
1675 	struct ispsoftc *isp = arg;
1676 
1677 	mtx_lock(&isp->isp_lock);
1678 	for (;;) {
1679 		isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state");
1680 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1681 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1682 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1683 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1684 				    isp_ktmature == 0) {
1685 					break;
1686 				}
1687 			}
1688 			msleep(isp_kthread, &isp->isp_lock,
1689 			    PRIBIO, "isp_fcthrd", hz);
1690 		}
1691 		/*
1692 		 * Even if we didn't get good loop state we may be
1693 		 * unfreezing the SIMQ so that we can kill off
1694 		 * commands (if we've never seen loop before, e.g.)
1695 		 */
1696 		isp_ktmature = 1;
1697 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1698 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1699 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1700 			isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq");
1701 			ISPLOCK_2_CAMLOCK(isp);
1702 			xpt_release_simq(isp->isp_sim, 1);
1703 			CAMLOCK_2_ISPLOCK(isp);
1704 		}
1705 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
1706 	}
1707 }
1708 
1709 static void
1710 isp_action(struct cam_sim *sim, union ccb *ccb)
1711 {
1712 	int bus, tgt, error;
1713 	struct ispsoftc *isp;
1714 	struct ccb_trans_settings *cts;
1715 
1716 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1717 
1718 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1719 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1720 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1721 	if (isp->isp_state != ISP_RUNSTATE &&
1722 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1723 		CAMLOCK_2_ISPLOCK(isp);
1724 		isp_init(isp);
1725 		if (isp->isp_state != ISP_INITSTATE) {
1726 			ISP_UNLOCK(isp);
1727 			/*
1728 			 * Lie. Say it was a selection timeout.
1729 			 */
1730 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1731 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1732 			xpt_done(ccb);
1733 			return;
1734 		}
1735 		isp->isp_state = ISP_RUNSTATE;
1736 		ISPLOCK_2_CAMLOCK(isp);
1737 	}
1738 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1739 
1740 
1741 	switch (ccb->ccb_h.func_code) {
1742 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1743 		/*
1744 		 * Do a couple of preliminary checks...
1745 		 */
1746 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1747 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1748 				ccb->ccb_h.status = CAM_REQ_INVALID;
1749 				xpt_done(ccb);
1750 				break;
1751 			}
1752 		}
1753 #ifdef	DIAGNOSTIC
1754 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1755 			ccb->ccb_h.status = CAM_PATH_INVALID;
1756 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1757 			ccb->ccb_h.status = CAM_PATH_INVALID;
1758 		}
1759 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1760 			isp_prt(isp, ISP_LOGERR,
1761 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
1762 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1763 			xpt_done(ccb);
1764 			break;
1765 		}
1766 #endif
1767 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1768 		CAMLOCK_2_ISPLOCK(isp);
1769 		error = isp_start((XS_T *) ccb);
1770 		switch (error) {
1771 		case CMD_QUEUED:
1772 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1773 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1774 				u_int64_t ticks = (u_int64_t) hz;
1775 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1776 					ticks = 60 * 1000 * ticks;
1777 				else
1778 					ticks = ccb->ccb_h.timeout * hz;
1779 				ticks = ((ticks + 999) / 1000) + hz + hz;
1780 				if (ticks >= 0x80000000) {
1781 					isp_prt(isp, ISP_LOGERR,
1782 					    "timeout overflow");
1783 					ticks = 0x80000000;
1784 				}
1785 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
1786 				    (caddr_t)ccb, (int)ticks);
1787 			} else {
1788 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1789 			}
1790 			ISPLOCK_2_CAMLOCK(isp);
1791 			break;
1792 		case CMD_RQLATER:
1793 			/*
1794 			 * This can only happen for Fibre Channel
1795 			 */
1796 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
1797 			if (FCPARAM(isp)->loop_seen_once == 0 && isp_ktmature) {
1798 				ISPLOCK_2_CAMLOCK(isp);
1799 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
1800 				xpt_done(ccb);
1801 				break;
1802 			}
1803 			cv_signal(&isp->isp_osinfo.kthread_cv);
1804 			if (isp->isp_osinfo.simqfrozen == 0) {
1805 				isp_prt(isp, ISP_LOGDEBUG2,
1806 				    "RQLATER freeze simq");
1807 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
1808 				ISPLOCK_2_CAMLOCK(isp);
1809 				xpt_freeze_simq(sim, 1);
1810 			} else {
1811 				ISPLOCK_2_CAMLOCK(isp);
1812 			}
1813 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1814 			xpt_done(ccb);
1815 			break;
1816 		case CMD_EAGAIN:
1817 			if (isp->isp_osinfo.simqfrozen == 0) {
1818 				xpt_freeze_simq(sim, 1);
1819 				isp_prt(isp, ISP_LOGDEBUG2,
1820 				    "EAGAIN freeze simq");
1821 			}
1822 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1823 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1824 			ISPLOCK_2_CAMLOCK(isp);
1825 			xpt_done(ccb);
1826 			break;
1827 		case CMD_COMPLETE:
1828 			isp_done((struct ccb_scsiio *) ccb);
1829 			ISPLOCK_2_CAMLOCK(isp);
1830 			break;
1831 		default:
1832 			isp_prt(isp, ISP_LOGERR,
1833 			    "What's this? 0x%x at %d in file %s",
1834 			    error, __LINE__, __FILE__);
1835 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1836 			xpt_done(ccb);
1837 			ISPLOCK_2_CAMLOCK(isp);
1838 		}
1839 		break;
1840 
1841 #ifdef	ISP_TARGET_MODE
1842 	case XPT_EN_LUN:		/* Enable LUN as a target */
1843 	{
1844 		int iok;
1845 		CAMLOCK_2_ISPLOCK(isp);
1846 		iok = isp->isp_osinfo.intsok;
1847 		isp->isp_osinfo.intsok = 0;
1848 		isp_en_lun(isp, ccb);
1849 		isp->isp_osinfo.intsok = iok;
1850 		ISPLOCK_2_CAMLOCK(isp);
1851 		xpt_done(ccb);
1852 		break;
1853 	}
1854 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1855 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1856 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1857 	{
1858 		tstate_t *tptr =
1859 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
1860 		if (tptr == NULL) {
1861 			ccb->ccb_h.status = CAM_LUN_INVALID;
1862 			xpt_done(ccb);
1863 			break;
1864 		}
1865 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1866 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1867 		CAMLOCK_2_ISPLOCK(isp);
1868 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1869 			SLIST_INSERT_HEAD(&tptr->atios,
1870 			    &ccb->ccb_h, sim_links.sle);
1871 		} else {
1872 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1873 			    sim_links.sle);
1874 		}
1875 		rls_lun_statep(isp, tptr);
1876 		ccb->ccb_h.status = CAM_REQ_INPROG;
1877 		ISPLOCK_2_CAMLOCK(isp);
1878 		break;
1879 	}
1880 	case XPT_CONT_TARGET_IO:
1881 	{
1882 		CAMLOCK_2_ISPLOCK(isp);
1883 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1884 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1885 			if (isp->isp_osinfo.simqfrozen == 0) {
1886 				xpt_freeze_simq(sim, 1);
1887 				xpt_print_path(ccb->ccb_h.path);
1888 				isp_prt(isp, ISP_LOGINFO,
1889 				    "XPT_CONT_TARGET_IO freeze simq");
1890 			}
1891 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1892 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1893 			ISPLOCK_2_CAMLOCK(isp);
1894 			xpt_done(ccb);
1895 		} else {
1896 			ISPLOCK_2_CAMLOCK(isp);
1897 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1898 		}
1899 		break;
1900 	}
1901 #endif
1902 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
1903 
1904 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1905 		tgt = ccb->ccb_h.target_id;
1906 		tgt |= (bus << 16);
1907 
1908 		CAMLOCK_2_ISPLOCK(isp);
1909 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
1910 		ISPLOCK_2_CAMLOCK(isp);
1911 		if (error) {
1912 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1913 		} else {
1914 			ccb->ccb_h.status = CAM_REQ_CMP;
1915 		}
1916 		xpt_done(ccb);
1917 		break;
1918 	case XPT_ABORT:			/* Abort the specified CCB */
1919 	{
1920 		union ccb *accb = ccb->cab.abort_ccb;
1921 		CAMLOCK_2_ISPLOCK(isp);
1922 		switch (accb->ccb_h.func_code) {
1923 #ifdef	ISP_TARGET_MODE
1924 		case XPT_ACCEPT_TARGET_IO:
1925 		case XPT_IMMED_NOTIFY:
1926         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
1927 			break;
1928 		case XPT_CONT_TARGET_IO:
1929 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
1930 			ccb->ccb_h.status = CAM_UA_ABORT;
1931 			break;
1932 #endif
1933 		case XPT_SCSI_IO:
1934 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
1935 			if (error) {
1936 				ccb->ccb_h.status = CAM_UA_ABORT;
1937 			} else {
1938 				ccb->ccb_h.status = CAM_REQ_CMP;
1939 			}
1940 			break;
1941 		default:
1942 			ccb->ccb_h.status = CAM_REQ_INVALID;
1943 			break;
1944 		}
1945 		ISPLOCK_2_CAMLOCK(isp);
1946 		xpt_done(ccb);
1947 		break;
1948 	}
1949 #ifdef	CAM_NEW_TRAN_CODE
1950 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
1951 #else
1952 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
1953 #endif
1954 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1955 		cts = &ccb->cts;
1956 		if (!IS_CURRENT_SETTINGS(cts)) {
1957 			ccb->ccb_h.status = CAM_REQ_INVALID;
1958 			xpt_done(ccb);
1959 			break;
1960 		}
1961 		tgt = cts->ccb_h.target_id;
1962 		CAMLOCK_2_ISPLOCK(isp);
1963 		if (IS_SCSI(isp)) {
1964 #ifndef	CAM_NEW_TRAN_CODE
1965 			sdparam *sdp = isp->isp_param;
1966 			u_int16_t *dptr;
1967 
1968 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1969 
1970 			sdp += bus;
1971 			/*
1972 			 * We always update (internally) from goal_flags
1973 			 * so any request to change settings just gets
1974 			 * vectored to that location.
1975 			 */
1976 			dptr = &sdp->isp_devparam[tgt].goal_flags;
1977 
1978 			/*
1979 			 * Note that these operations affect the
1980 			 * the goal flags (goal_flags)- not
1981 			 * the current state flags. Then we mark
1982 			 * things so that the next operation to
1983 			 * this HBA will cause the update to occur.
1984 			 */
1985 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1986 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
1987 					*dptr |= DPARM_DISC;
1988 				} else {
1989 					*dptr &= ~DPARM_DISC;
1990 				}
1991 			}
1992 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1993 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
1994 					*dptr |= DPARM_TQING;
1995 				} else {
1996 					*dptr &= ~DPARM_TQING;
1997 				}
1998 			}
1999 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2000 				switch (cts->bus_width) {
2001 				case MSG_EXT_WDTR_BUS_16_BIT:
2002 					*dptr |= DPARM_WIDE;
2003 					break;
2004 				default:
2005 					*dptr &= ~DPARM_WIDE;
2006 				}
2007 			}
2008 			/*
2009 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2010 			 * of nonzero will cause us to go to the
2011 			 * selected (from NVRAM) maximum value for
2012 			 * this device. At a later point, we'll
2013 			 * allow finer control.
2014 			 */
2015 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2016 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2017 			    (cts->sync_offset > 0)) {
2018 				*dptr |= DPARM_SYNC;
2019 			} else {
2020 				*dptr &= ~DPARM_SYNC;
2021 			}
2022 			*dptr |= DPARM_SAFE_DFLT;
2023 #else
2024 			struct ccb_trans_settings_scsi *scsi =
2025 			    &cts->proto_specific.scsi;
2026 			struct ccb_trans_settings_spi *spi =
2027 			    &cts->xport_specific.spi;
2028 			sdparam *sdp = isp->isp_param;
2029 			u_int16_t *dptr;
2030 
2031 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2032 			sdp += bus;
2033 			/*
2034 			 * We always update (internally) from goal_flags
2035 			 * so any request to change settings just gets
2036 			 * vectored to that location.
2037 			 */
2038 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2039 
2040 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2041 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2042 					*dptr |= DPARM_DISC;
2043 				else
2044 					*dptr &= ~DPARM_DISC;
2045 			}
2046 
2047 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2048 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2049 					*dptr |= DPARM_TQING;
2050 				else
2051 					*dptr &= ~DPARM_TQING;
2052 			}
2053 
2054 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2055 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2056 					*dptr |= DPARM_WIDE;
2057 				else
2058 					*dptr &= ~DPARM_WIDE;
2059 			}
2060 
2061 			/*
2062 			 * XXX: FIX ME
2063 			 */
2064 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2065 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2066 			    (spi->sync_period && spi->sync_offset)) {
2067 				*dptr |= DPARM_SYNC;
2068 				/*
2069 				 * XXX: CHECK FOR LEGALITY
2070 				 */
2071 				sdp->isp_devparam[tgt].goal_period =
2072 				    spi->sync_period;
2073 				sdp->isp_devparam[tgt].goal_offset =
2074 				    spi->sync_offset;
2075 			} else {
2076 				*dptr &= ~DPARM_SYNC;
2077 			}
2078 #endif
2079 			isp_prt(isp, ISP_LOGDEBUG0,
2080 			    "SET bus %d targ %d to flags %x off %x per %x",
2081 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2082 			    sdp->isp_devparam[tgt].goal_offset,
2083 			    sdp->isp_devparam[tgt].goal_period);
2084 			sdp->isp_devparam[tgt].dev_update = 1;
2085 			isp->isp_update |= (1 << bus);
2086 		}
2087 		ISPLOCK_2_CAMLOCK(isp);
2088 		ccb->ccb_h.status = CAM_REQ_CMP;
2089 		xpt_done(ccb);
2090 		break;
2091 	case XPT_GET_TRAN_SETTINGS:
2092 		cts = &ccb->cts;
2093 		tgt = cts->ccb_h.target_id;
2094 		CAMLOCK_2_ISPLOCK(isp);
2095 		if (IS_FC(isp)) {
2096 #ifndef	CAM_NEW_TRAN_CODE
2097 			/*
2098 			 * a lot of normal SCSI things don't make sense.
2099 			 */
2100 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2101 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2102 			/*
2103 			 * How do you measure the width of a high
2104 			 * speed serial bus? Well, in bytes.
2105 			 *
2106 			 * Offset and period make no sense, though, so we set
2107 			 * (above) a 'base' transfer speed to be gigabit.
2108 			 */
2109 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2110 #else
2111 			fcparam *fcp = isp->isp_param;
2112 			struct ccb_trans_settings_fc *fc =
2113 			    &cts->xport_specific.fc;
2114 
2115 			cts->protocol = PROTO_SCSI;
2116 			cts->protocol_version = SCSI_REV_2;
2117 			cts->transport = XPORT_FC;
2118 			cts->transport_version = 0;
2119 
2120 			fc->valid = CTS_FC_VALID_SPEED;
2121 			fc->bitrate = 100000;
2122 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2123 				struct lportdb *lp = &fcp->portdb[tgt];
2124 				fc->wwnn = lp->node_wwn;
2125 				fc->wwpn = lp->port_wwn;
2126 				fc->port = lp->portid;
2127 				fc->valid |= CTS_FC_VALID_WWNN |
2128 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2129 			}
2130 #endif
2131 		} else {
2132 #ifdef	CAM_NEW_TRAN_CODE
2133 			struct ccb_trans_settings_scsi *scsi =
2134 			    &cts->proto_specific.scsi;
2135 			struct ccb_trans_settings_spi *spi =
2136 			    &cts->xport_specific.spi;
2137 #endif
2138 			sdparam *sdp = isp->isp_param;
2139 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2140 			u_int16_t dval, pval, oval;
2141 
2142 			sdp += bus;
2143 
2144 			if (IS_CURRENT_SETTINGS(cts)) {
2145 				sdp->isp_devparam[tgt].dev_refresh = 1;
2146 				isp->isp_update |= (1 << bus);
2147 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2148 				    NULL);
2149 				dval = sdp->isp_devparam[tgt].actv_flags;
2150 				oval = sdp->isp_devparam[tgt].actv_offset;
2151 				pval = sdp->isp_devparam[tgt].actv_period;
2152 			} else {
2153 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2154 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2155 				pval = sdp->isp_devparam[tgt].nvrm_period;
2156 			}
2157 
2158 #ifndef	CAM_NEW_TRAN_CODE
2159 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2160 
2161 			if (dval & DPARM_DISC) {
2162 				cts->flags |= CCB_TRANS_DISC_ENB;
2163 			}
2164 			if (dval & DPARM_TQING) {
2165 				cts->flags |= CCB_TRANS_TAG_ENB;
2166 			}
2167 			if (dval & DPARM_WIDE) {
2168 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2169 			} else {
2170 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2171 			}
2172 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2173 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2174 
2175 			if ((dval & DPARM_SYNC) && oval != 0) {
2176 				cts->sync_period = pval;
2177 				cts->sync_offset = oval;
2178 				cts->valid |=
2179 				    CCB_TRANS_SYNC_RATE_VALID |
2180 				    CCB_TRANS_SYNC_OFFSET_VALID;
2181 			}
2182 #else
2183 			cts->protocol = PROTO_SCSI;
2184 			cts->protocol_version = SCSI_REV_2;
2185 			cts->transport = XPORT_SPI;
2186 			cts->transport_version = 2;
2187 
2188 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2189 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2190 			if (dval & DPARM_DISC) {
2191 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2192 			}
2193 			if (dval & DPARM_TQING) {
2194 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2195 			}
2196 			if ((dval & DPARM_SYNC) && oval && pval) {
2197 				spi->sync_offset = oval;
2198 				spi->sync_period = pval;
2199 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2200 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2201 			}
2202 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2203 			if (dval & DPARM_WIDE) {
2204 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2205 			} else {
2206 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2207 			}
2208 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2209 				scsi->valid = CTS_SCSI_VALID_TQ;
2210 				spi->valid |= CTS_SPI_VALID_DISC;
2211 			} else {
2212 				scsi->valid = 0;
2213 			}
2214 #endif
2215 			isp_prt(isp, ISP_LOGDEBUG0,
2216 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2217 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2218 			    bus, tgt, dval, oval, pval);
2219 		}
2220 		ISPLOCK_2_CAMLOCK(isp);
2221 		ccb->ccb_h.status = CAM_REQ_CMP;
2222 		xpt_done(ccb);
2223 		break;
2224 
2225 	case XPT_CALC_GEOMETRY:
2226 	{
2227 		struct ccb_calc_geometry *ccg;
2228 		u_int32_t secs_per_cylinder;
2229 		u_int32_t size_mb;
2230 
2231 		ccg = &ccb->ccg;
2232 		if (ccg->block_size == 0) {
2233 			isp_prt(isp, ISP_LOGERR,
2234 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2235 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2236 			ccb->ccb_h.status = CAM_REQ_INVALID;
2237 			xpt_done(ccb);
2238 			break;
2239 		}
2240 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2241 		if (size_mb > 1024) {
2242 			ccg->heads = 255;
2243 			ccg->secs_per_track = 63;
2244 		} else {
2245 			ccg->heads = 64;
2246 			ccg->secs_per_track = 32;
2247 		}
2248 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2249 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2250 		ccb->ccb_h.status = CAM_REQ_CMP;
2251 		xpt_done(ccb);
2252 		break;
2253 	}
2254 	case XPT_RESET_BUS:		/* Reset the specified bus */
2255 		bus = cam_sim_bus(sim);
2256 		CAMLOCK_2_ISPLOCK(isp);
2257 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2258 		ISPLOCK_2_CAMLOCK(isp);
2259 		if (error)
2260 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2261 		else {
2262 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2263 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2264 			else if (isp->isp_path != NULL)
2265 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2266 			ccb->ccb_h.status = CAM_REQ_CMP;
2267 		}
2268 		xpt_done(ccb);
2269 		break;
2270 
2271 	case XPT_TERM_IO:		/* Terminate the I/O process */
2272 		ccb->ccb_h.status = CAM_REQ_INVALID;
2273 		xpt_done(ccb);
2274 		break;
2275 
2276 	case XPT_PATH_INQ:		/* Path routing inquiry */
2277 	{
2278 		struct ccb_pathinq *cpi = &ccb->cpi;
2279 
2280 		cpi->version_num = 1;
2281 #ifdef	ISP_TARGET_MODE
2282 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2283 #else
2284 		cpi->target_sprt = 0;
2285 #endif
2286 		cpi->hba_eng_cnt = 0;
2287 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2288 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2289 		cpi->bus_id = cam_sim_bus(sim);
2290 		if (IS_FC(isp)) {
2291 			cpi->hba_misc = PIM_NOBUSRESET;
2292 			/*
2293 			 * Because our loop ID can shift from time to time,
2294 			 * make our initiator ID out of range of our bus.
2295 			 */
2296 			cpi->initiator_id = cpi->max_target + 1;
2297 
2298 			/*
2299 			 * Set base transfer capabilities for Fibre Channel.
2300 			 * Technically not correct because we don't know
2301 			 * what media we're running on top of- but we'll
2302 			 * look good if we always say 100MB/s.
2303 			 */
2304 			cpi->base_transfer_speed = 100000;
2305 			cpi->hba_inquiry = PI_TAG_ABLE;
2306 #ifdef	CAM_NEW_TRAN_CODE
2307 			cpi->transport = XPORT_FC;
2308 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2309 #endif
2310 		} else {
2311 			sdparam *sdp = isp->isp_param;
2312 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2313 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2314 			cpi->hba_misc = 0;
2315 			cpi->initiator_id = sdp->isp_initiator_id;
2316 			cpi->base_transfer_speed = 3300;
2317 #ifdef	CAM_NEW_TRAN_CODE
2318 			cpi->transport = XPORT_SPI;
2319 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2320 #endif
2321 		}
2322 #ifdef	CAM_NEW_TRAN_CODE
2323 		cpi->protocol = PROTO_SCSI;
2324 		cpi->protocol_version = SCSI_REV_2;
2325 #endif
2326 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2327 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2328 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2329 		cpi->unit_number = cam_sim_unit(sim);
2330 		cpi->ccb_h.status = CAM_REQ_CMP;
2331 		xpt_done(ccb);
2332 		break;
2333 	}
2334 	default:
2335 		ccb->ccb_h.status = CAM_REQ_INVALID;
2336 		xpt_done(ccb);
2337 		break;
2338 	}
2339 }
2340 
2341 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2342 void
2343 isp_done(struct ccb_scsiio *sccb)
2344 {
2345 	struct ispsoftc *isp = XS_ISP(sccb);
2346 
2347 	if (XS_NOERR(sccb))
2348 		XS_SETERR(sccb, CAM_REQ_CMP);
2349 
2350 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2351 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2352 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2353 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2354 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2355 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2356 		} else {
2357 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2358 		}
2359 	}
2360 
2361 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2362 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2363 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2364 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2365 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2366 			if (sccb->scsi_status != SCSI_STATUS_OK)
2367 				isp_prt(isp, ISP_LOGDEBUG2,
2368 				    "freeze devq %d.%d %x %x",
2369 				    sccb->ccb_h.target_id,
2370 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
2371 				    sccb->scsi_status);
2372 		}
2373 	}
2374 
2375 	/*
2376 	 * If we were frozen waiting resources, clear that we were frozen
2377 	 * waiting for resources. If we are no longer frozen, and the devq
2378 	 * isn't frozen, mark the completing CCB to have the XPT layer
2379 	 * release the simq.
2380 	 */
2381 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
2382 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
2383 		if (isp->isp_osinfo.simqfrozen == 0) {
2384 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2385 				isp_prt(isp, ISP_LOGDEBUG2,
2386 				    "isp_done->relsimq");
2387 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2388 			} else {
2389 				isp_prt(isp, ISP_LOGDEBUG2,
2390 				    "isp_done->devq frozen");
2391 			}
2392 		} else {
2393 			isp_prt(isp, ISP_LOGDEBUG2,
2394 			    "isp_done -> simqfrozen = %x",
2395 			    isp->isp_osinfo.simqfrozen);
2396 		}
2397 	}
2398 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2399 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2400 		xpt_print_path(sccb->ccb_h.path);
2401 		isp_prt(isp, ISP_LOGINFO,
2402 		    "cam completion status 0x%x", sccb->ccb_h.status);
2403 	}
2404 
2405 	XS_CMD_S_DONE(sccb);
2406 	if (XS_CMD_WDOG_P(sccb) == 0) {
2407 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2408 		if (XS_CMD_GRACE_P(sccb)) {
2409 			isp_prt(isp, ISP_LOGDEBUG2,
2410 			    "finished command on borrowed time");
2411 		}
2412 		XS_CMD_S_CLEAR(sccb);
2413 		ISPLOCK_2_CAMLOCK(isp);
2414 		xpt_done((union ccb *) sccb);
2415 		CAMLOCK_2_ISPLOCK(isp);
2416 	}
2417 }
2418 
2419 int
2420 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2421 {
2422 	int bus, rv = 0;
2423 	switch (cmd) {
2424 	case ISPASYNC_NEW_TGT_PARAMS:
2425 	{
2426 #ifdef	CAM_NEW_TRAN_CODE
2427 		struct ccb_trans_settings_scsi *scsi;
2428 		struct ccb_trans_settings_spi *spi;
2429 #endif
2430 		int flags, tgt;
2431 		sdparam *sdp = isp->isp_param;
2432 		struct ccb_trans_settings cts;
2433 		struct cam_path *tmppath;
2434 
2435 		bzero(&cts, sizeof (struct ccb_trans_settings));
2436 
2437 		tgt = *((int *)arg);
2438 		bus = (tgt >> 16) & 0xffff;
2439 		tgt &= 0xffff;
2440 		sdp += bus;
2441 		ISPLOCK_2_CAMLOCK(isp);
2442 		if (xpt_create_path(&tmppath, NULL,
2443 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2444 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2445 			CAMLOCK_2_ISPLOCK(isp);
2446 			isp_prt(isp, ISP_LOGWARN,
2447 			    "isp_async cannot make temp path for %d.%d",
2448 			    tgt, bus);
2449 			rv = -1;
2450 			break;
2451 		}
2452 		CAMLOCK_2_ISPLOCK(isp);
2453 		flags = sdp->isp_devparam[tgt].actv_flags;
2454 #ifdef	CAM_NEW_TRAN_CODE
2455 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2456 		cts.protocol = PROTO_SCSI;
2457 		cts.transport = XPORT_SPI;
2458 
2459 		scsi = &cts.proto_specific.scsi;
2460 		spi = &cts.xport_specific.spi;
2461 
2462 		if (flags & DPARM_TQING) {
2463 			scsi->valid |= CTS_SCSI_VALID_TQ;
2464 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2465 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2466 		}
2467 
2468 		if (flags & DPARM_DISC) {
2469 			spi->valid |= CTS_SPI_VALID_DISC;
2470 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2471 		}
2472 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2473 		if (flags & DPARM_WIDE) {
2474 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2475 		} else {
2476 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2477 		}
2478 		if (flags & DPARM_SYNC) {
2479 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2480 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2481 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2482 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2483 		}
2484 #else
2485 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2486 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2487 		if (flags & DPARM_DISC) {
2488 			cts.flags |= CCB_TRANS_DISC_ENB;
2489 		}
2490 		if (flags & DPARM_TQING) {
2491 			cts.flags |= CCB_TRANS_TAG_ENB;
2492 		}
2493 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2494 		cts.bus_width = (flags & DPARM_WIDE)?
2495 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2496 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2497 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2498 		if (flags & DPARM_SYNC) {
2499 			cts.valid |=
2500 			    CCB_TRANS_SYNC_RATE_VALID |
2501 			    CCB_TRANS_SYNC_OFFSET_VALID;
2502 		}
2503 #endif
2504 		isp_prt(isp, ISP_LOGDEBUG2,
2505 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2506 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2507 		    sdp->isp_devparam[tgt].actv_offset, flags);
2508 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2509 		ISPLOCK_2_CAMLOCK(isp);
2510 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2511 		xpt_free_path(tmppath);
2512 		CAMLOCK_2_ISPLOCK(isp);
2513 		break;
2514 	}
2515 	case ISPASYNC_BUS_RESET:
2516 		bus = *((int *)arg);
2517 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2518 		    bus);
2519 		if (bus > 0 && isp->isp_path2) {
2520 			ISPLOCK_2_CAMLOCK(isp);
2521 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2522 			CAMLOCK_2_ISPLOCK(isp);
2523 		} else if (isp->isp_path) {
2524 			ISPLOCK_2_CAMLOCK(isp);
2525 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2526 			CAMLOCK_2_ISPLOCK(isp);
2527 		}
2528 		break;
2529 	case ISPASYNC_LIP:
2530 		if (isp->isp_path) {
2531 			if (isp->isp_osinfo.simqfrozen == 0) {
2532 				isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq");
2533 				ISPLOCK_2_CAMLOCK(isp);
2534 				xpt_freeze_simq(isp->isp_sim, 1);
2535 				CAMLOCK_2_ISPLOCK(isp);
2536 			}
2537 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2538 		}
2539 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2540 		break;
2541 	case ISPASYNC_LOOP_RESET:
2542 		if (isp->isp_path) {
2543 			if (isp->isp_osinfo.simqfrozen == 0) {
2544 				isp_prt(isp, ISP_LOGDEBUG0,
2545 				    "Loop Reset freeze simq");
2546 				ISPLOCK_2_CAMLOCK(isp);
2547 				xpt_freeze_simq(isp->isp_sim, 1);
2548 				CAMLOCK_2_ISPLOCK(isp);
2549 			}
2550 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2551 		}
2552 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2553 		break;
2554 	case ISPASYNC_LOOP_DOWN:
2555 		if (isp->isp_path) {
2556 			if (isp->isp_osinfo.simqfrozen == 0) {
2557 				isp_prt(isp, ISP_LOGDEBUG0,
2558 				    "loop down freeze simq");
2559 				ISPLOCK_2_CAMLOCK(isp);
2560 				xpt_freeze_simq(isp->isp_sim, 1);
2561 				CAMLOCK_2_ISPLOCK(isp);
2562 			}
2563 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2564 		}
2565 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2566 		break;
2567 	case ISPASYNC_LOOP_UP:
2568 		/*
2569 		 * Now we just note that Loop has come up. We don't
2570 		 * actually do anything because we're waiting for a
2571 		 * Change Notify before activating the FC cleanup
2572 		 * thread to look at the state of the loop again.
2573 		 */
2574 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2575 		break;
2576 	case ISPASYNC_PROMENADE:
2577 	{
2578 		struct cam_path *tmppath;
2579 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2580 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2581 		static const char *roles[4] = {
2582 		    "(none)", "Target", "Initiator", "Target/Initiator"
2583 		};
2584 		fcparam *fcp = isp->isp_param;
2585 		int tgt = *((int *) arg);
2586 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2587 		struct lportdb *lp = &fcp->portdb[tgt];
2588 
2589 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2590 		    roles[lp->roles & 0x3],
2591 		    (lp->valid)? "Arrived" : "Departed",
2592 		    (u_int32_t) (lp->port_wwn >> 32),
2593 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2594 		    (u_int32_t) (lp->node_wwn >> 32),
2595 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2596 
2597 		ISPLOCK_2_CAMLOCK(isp);
2598 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2599 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2600 			CAMLOCK_2_ISPLOCK(isp);
2601                         break;
2602                 }
2603 		/*
2604 		 * Policy: only announce targets.
2605 		 */
2606 		if (lp->roles & is_tgt_mask) {
2607 			if (lp->valid) {
2608 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2609 			} else {
2610 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2611 			}
2612 		}
2613 		xpt_free_path(tmppath);
2614 		CAMLOCK_2_ISPLOCK(isp);
2615 		break;
2616 	}
2617 	case ISPASYNC_CHANGE_NOTIFY:
2618 		if (arg == ISPASYNC_CHANGE_PDB) {
2619 			isp_prt(isp, ISP_LOGINFO,
2620 			    "Port Database Changed");
2621 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2622 			isp_prt(isp, ISP_LOGINFO,
2623 			    "Name Server Database Changed");
2624 		}
2625 		cv_signal(&isp->isp_osinfo.kthread_cv);
2626 		break;
2627 	case ISPASYNC_FABRIC_DEV:
2628 	{
2629 		int target, lrange;
2630 		struct lportdb *lp = NULL;
2631 		char *pt;
2632 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2633 		u_int32_t portid;
2634 		u_int64_t wwpn, wwnn;
2635 		fcparam *fcp = isp->isp_param;
2636 
2637 		portid =
2638 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2639 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2640 		    (((u_int32_t) resp->snscb_port_id[2]));
2641 
2642 		wwpn =
2643 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2644 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2645 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2646 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2647 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2648 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2649 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2650 		    (((u_int64_t)resp->snscb_portname[7]));
2651 
2652 		wwnn =
2653 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2654 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2655 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2656 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2657 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2658 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2659 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2660 		    (((u_int64_t)resp->snscb_nodename[7]));
2661 		if (portid == 0 || wwpn == 0) {
2662 			break;
2663 		}
2664 
2665 		switch (resp->snscb_port_type) {
2666 		case 1:
2667 			pt = "   N_Port";
2668 			break;
2669 		case 2:
2670 			pt = "  NL_Port";
2671 			break;
2672 		case 3:
2673 			pt = "F/NL_Port";
2674 			break;
2675 		case 0x7f:
2676 			pt = "  Nx_Port";
2677 			break;
2678 		case 0x81:
2679 			pt = "  F_port";
2680 			break;
2681 		case 0x82:
2682 			pt = "  FL_Port";
2683 			break;
2684 		case 0x84:
2685 			pt = "   E_port";
2686 			break;
2687 		default:
2688 			pt = "?";
2689 			break;
2690 		}
2691 		isp_prt(isp, ISP_LOGINFO,
2692 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2693 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2694 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2695 		/*
2696 		 * We're only interested in SCSI_FCP types (for now)
2697 		 */
2698 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
2699 			break;
2700 		}
2701 		if (fcp->isp_topo != TOPO_F_PORT)
2702 			lrange = FC_SNS_ID+1;
2703 		else
2704 			lrange = 0;
2705 		/*
2706 		 * Is it already in our list?
2707 		 */
2708 		for (target = lrange; target < MAX_FC_TARG; target++) {
2709 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2710 				continue;
2711 			}
2712 			lp = &fcp->portdb[target];
2713 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
2714 				lp->fabric_dev = 1;
2715 				break;
2716 			}
2717 		}
2718 		if (target < MAX_FC_TARG) {
2719 			break;
2720 		}
2721 		for (target = lrange; target < MAX_FC_TARG; target++) {
2722 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2723 				continue;
2724 			}
2725 			lp = &fcp->portdb[target];
2726 			if (lp->port_wwn == 0) {
2727 				break;
2728 			}
2729 		}
2730 		if (target == MAX_FC_TARG) {
2731 			isp_prt(isp, ISP_LOGWARN,
2732 			    "no more space for fabric devices");
2733 			break;
2734 		}
2735 		lp->node_wwn = wwnn;
2736 		lp->port_wwn = wwpn;
2737 		lp->portid = portid;
2738 		lp->fabric_dev = 1;
2739 		break;
2740 	}
2741 #ifdef	ISP_TARGET_MODE
2742 	case ISPASYNC_TARGET_MESSAGE:
2743 	{
2744 		tmd_msg_t *mp = arg;
2745 		isp_prt(isp, ISP_LOGALL,
2746 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2747 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2748 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2749 		    mp->nt_msg[0]);
2750 		break;
2751 	}
2752 	case ISPASYNC_TARGET_EVENT:
2753 	{
2754 		tmd_event_t *ep = arg;
2755 		isp_prt(isp, ISP_LOGALL,
2756 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2757 		break;
2758 	}
2759 	case ISPASYNC_TARGET_ACTION:
2760 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2761 		default:
2762 			isp_prt(isp, ISP_LOGWARN,
2763 			   "event 0x%x for unhandled target action",
2764 			    ((isphdr_t *)arg)->rqs_entry_type);
2765 			break;
2766 		case RQSTYPE_ATIO:
2767 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2768 			break;
2769 		case RQSTYPE_ATIO2:
2770 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2771 			break;
2772 		case RQSTYPE_CTIO2:
2773 		case RQSTYPE_CTIO:
2774 			rv = isp_handle_platform_ctio(isp, arg);
2775 			break;
2776 		case RQSTYPE_ENABLE_LUN:
2777 		case RQSTYPE_MODIFY_LUN:
2778 			if (IS_DUALBUS(isp)) {
2779 				bus =
2780 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
2781 			} else {
2782 				bus = 0;
2783 			}
2784 			isp_cv_signal_rqe(isp, bus,
2785 			    ((lun_entry_t *)arg)->le_status);
2786 			break;
2787 		}
2788 		break;
2789 #endif
2790 	case ISPASYNC_FW_CRASH:
2791 	{
2792 		u_int16_t mbox1, mbox6;
2793 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
2794 		if (IS_DUALBUS(isp)) {
2795 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
2796 		} else {
2797 			mbox6 = 0;
2798 		}
2799                 isp_prt(isp, ISP_LOGERR,
2800                     "Internal Firmware on bus %d Error @ RISC Address 0x%x",
2801                     mbox6, mbox1);
2802 		isp_reinit(isp);
2803 		break;
2804 	}
2805 	case ISPASYNC_UNHANDLED_RESPONSE:
2806 		break;
2807 	default:
2808 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2809 		break;
2810 	}
2811 	return (rv);
2812 }
2813 
2814 
2815 /*
2816  * Locks are held before coming here.
2817  */
2818 void
2819 isp_uninit(struct ispsoftc *isp)
2820 {
2821 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2822 	DISABLE_INTS(isp);
2823 }
2824 
2825 void
2826 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2827 {
2828 	va_list ap;
2829 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2830 		return;
2831 	}
2832 	printf("%s: ", device_get_nameunit(isp->isp_dev));
2833 	va_start(ap, fmt);
2834 	vprintf(fmt, ap);
2835 	va_end(ap);
2836 	printf("\n");
2837 }
2838