xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision eacee0ff7ec955b32e09515246bd97b6edcd2b0f)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/ioccom.h>
34 #include <dev/isp/isp_ioctl.h>
35 
36 
37 static d_ioctl_t ispioctl;
38 static void isp_intr_enable(void *);
39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
40 static void isp_poll(struct cam_sim *);
41 #if	0
42 static void isp_relsim(void *);
43 #endif
44 static timeout_t isp_watchdog;
45 static void isp_kthread(void *);
46 static void isp_action(struct cam_sim *, union ccb *);
47 
48 
49 #define ISP_CDEV_MAJOR	248
50 static struct cdevsw isp_cdevsw = {
51 	/* open */	nullopen,
52 	/* close */	nullclose,
53 	/* read */	noread,
54 	/* write */	nowrite,
55 	/* ioctl */	ispioctl,
56 	/* poll */	nopoll,
57 	/* mmap */	nommap,
58 	/* strategy */	nostrategy,
59 	/* name */	"isp",
60 	/* maj */	ISP_CDEV_MAJOR,
61 	/* dump */	nodump,
62 	/* psize */	nopsize,
63 	/* flags */	D_TAPE,
64 };
65 
66 static struct ispsoftc *isplist = NULL;
67 
68 void
69 isp_attach(struct ispsoftc *isp)
70 {
71 	int primary, secondary;
72 	struct ccb_setasync csa;
73 	struct cam_devq *devq;
74 	struct cam_sim *sim;
75 	struct cam_path *path;
76 
77 	/*
78 	 * Establish (in case of 12X0) which bus is the primary.
79 	 */
80 
81 	primary = 0;
82 	secondary = 1;
83 
84 	/*
85 	 * Create the device queue for our SIM(s).
86 	 */
87 	devq = cam_simq_alloc(isp->isp_maxcmds);
88 	if (devq == NULL) {
89 		return;
90 	}
91 
92 	/*
93 	 * Construct our SIM entry.
94 	 */
95 	ISPLOCK_2_CAMLOCK(isp);
96 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
97 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
98 	if (sim == NULL) {
99 		cam_simq_free(devq);
100 		CAMLOCK_2_ISPLOCK(isp);
101 		return;
102 	}
103 	CAMLOCK_2_ISPLOCK(isp);
104 
105 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
106 	isp->isp_osinfo.ehook.ich_arg = isp;
107 	ISPLOCK_2_CAMLOCK(isp);
108 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
109 		cam_sim_free(sim, TRUE);
110 		CAMLOCK_2_ISPLOCK(isp);
111 		isp_prt(isp, ISP_LOGERR,
112 		    "could not establish interrupt enable hook");
113 		return;
114 	}
115 
116 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
117 		cam_sim_free(sim, TRUE);
118 		CAMLOCK_2_ISPLOCK(isp);
119 		return;
120 	}
121 
122 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
123 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
124 		xpt_bus_deregister(cam_sim_path(sim));
125 		cam_sim_free(sim, TRUE);
126 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
127 		CAMLOCK_2_ISPLOCK(isp);
128 		return;
129 	}
130 
131 	xpt_setup_ccb(&csa.ccb_h, path, 5);
132 	csa.ccb_h.func_code = XPT_SASYNC_CB;
133 	csa.event_enable = AC_LOST_DEVICE;
134 	csa.callback = isp_cam_async;
135 	csa.callback_arg = sim;
136 	xpt_action((union ccb *)&csa);
137 	CAMLOCK_2_ISPLOCK(isp);
138 	isp->isp_sim = sim;
139 	isp->isp_path = path;
140 	/*
141 	 * Create a kernel thread for fibre channel instances. We
142 	 * don't have dual channel FC cards.
143 	 */
144 	if (IS_FC(isp)) {
145 		ISPLOCK_2_CAMLOCK(isp);
146 		/* XXX: LOCK VIOLATION */
147 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
148 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
149 		    RFHIGHPID, "%s: fc_thrd",
150 		    device_get_nameunit(isp->isp_dev))) {
151 			xpt_bus_deregister(cam_sim_path(sim));
152 			cam_sim_free(sim, TRUE);
153 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
154 			CAMLOCK_2_ISPLOCK(isp);
155 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
156 			return;
157 		}
158 		CAMLOCK_2_ISPLOCK(isp);
159 	}
160 
161 
162 	/*
163 	 * If we have a second channel, construct SIM entry for that.
164 	 */
165 	if (IS_DUALBUS(isp)) {
166 		ISPLOCK_2_CAMLOCK(isp);
167 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
168 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
169 		if (sim == NULL) {
170 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
171 			xpt_free_path(isp->isp_path);
172 			cam_simq_free(devq);
173 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
174 			return;
175 		}
176 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
177 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
178 			xpt_free_path(isp->isp_path);
179 			cam_sim_free(sim, TRUE);
180 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
181 			CAMLOCK_2_ISPLOCK(isp);
182 			return;
183 		}
184 
185 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
186 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
187 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
188 			xpt_free_path(isp->isp_path);
189 			xpt_bus_deregister(cam_sim_path(sim));
190 			cam_sim_free(sim, TRUE);
191 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
192 			CAMLOCK_2_ISPLOCK(isp);
193 			return;
194 		}
195 
196 		xpt_setup_ccb(&csa.ccb_h, path, 5);
197 		csa.ccb_h.func_code = XPT_SASYNC_CB;
198 		csa.event_enable = AC_LOST_DEVICE;
199 		csa.callback = isp_cam_async;
200 		csa.callback_arg = sim;
201 		xpt_action((union ccb *)&csa);
202 		CAMLOCK_2_ISPLOCK(isp);
203 		isp->isp_sim2 = sim;
204 		isp->isp_path2 = path;
205 	}
206 
207 #ifdef	ISP_TARGET_MODE
208 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
209 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
210 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
211 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
212 #endif
213 	/*
214 	 * Create device nodes
215 	 */
216 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
217 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
218 
219 	if (isp->isp_role != ISP_ROLE_NONE) {
220 		isp->isp_state = ISP_RUNSTATE;
221 		ENABLE_INTS(isp);
222 	}
223 	if (isplist == NULL) {
224 		isplist = isp;
225 	} else {
226 		struct ispsoftc *tmp = isplist;
227 		while (tmp->isp_osinfo.next) {
228 			tmp = tmp->isp_osinfo.next;
229 		}
230 		tmp->isp_osinfo.next = isp;
231 	}
232 
233 }
234 
235 static int
236 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
237 {
238 	struct ispsoftc *isp;
239 	int retval = ENOTTY;
240 
241 	isp = isplist;
242 	while (isp) {
243 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
244 			break;
245 		}
246 		isp = isp->isp_osinfo.next;
247 	}
248 	if (isp == NULL)
249 		return (ENXIO);
250 
251 	switch (cmd) {
252 #ifdef	ISP_FW_CRASH_DUMP
253 	case ISP_GET_FW_CRASH_DUMP:
254 	{
255 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
256 		size_t sz;
257 
258 		retval = 0;
259 		if (IS_2200(isp))
260 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
261 		else
262 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
263 		ISP_LOCK(isp);
264 		if (ptr && *ptr) {
265 			void *uaddr = *((void **) addr);
266 			if (copyout(ptr, uaddr, sz)) {
267 				retval = EFAULT;
268 			} else {
269 				*ptr = 0;
270 			}
271 		} else {
272 			retval = ENXIO;
273 		}
274 		ISP_UNLOCK(isp);
275 		break;
276 	}
277 
278 	case ISP_FORCE_CRASH_DUMP:
279 		ISP_LOCK(isp);
280 		if ((isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN) == 0) {
281 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
282 			ISPLOCK_2_CAMLOCK(isp);
283 			xpt_freeze_simq(isp->isp_sim, 1);
284 			CAMLOCK_2_ISPLOCK(isp);
285 		}
286 		isp_fw_dump(isp);
287 		isp_reinit(isp);
288 		ISP_UNLOCK(isp);
289 		retval = 0;
290 		break;
291 #endif
292 	case ISP_SDBLEV:
293 	{
294 		int olddblev = isp->isp_dblev;
295 		isp->isp_dblev = *(int *)addr;
296 		*(int *)addr = olddblev;
297 		retval = 0;
298 		break;
299 	}
300 	case ISP_RESETHBA:
301 		ISP_LOCK(isp);
302 		isp_reinit(isp);
303 		ISP_UNLOCK(isp);
304 		retval = 0;
305 		break;
306 	case ISP_FC_RESCAN:
307 		if (IS_FC(isp)) {
308 			ISP_LOCK(isp);
309 			if (isp_fc_runstate(isp, 5 * 1000000)) {
310 				retval = EIO;
311 			} else {
312 				retval = 0;
313 			}
314 			ISP_UNLOCK(isp);
315 		}
316 		break;
317 	case ISP_FC_LIP:
318 		if (IS_FC(isp)) {
319 			ISP_LOCK(isp);
320 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
321 				retval = EIO;
322 			} else {
323 				retval = 0;
324 			}
325 			ISP_UNLOCK(isp);
326 		}
327 		break;
328 	case ISP_FC_GETDINFO:
329 	{
330 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
331 		struct lportdb *lp;
332 
333 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
334 			retval = EINVAL;
335 			break;
336 		}
337 		ISP_LOCK(isp);
338 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
339 		if (lp->valid) {
340 			ifc->loopid = lp->loopid;
341 			ifc->portid = lp->portid;
342 			ifc->node_wwn = lp->node_wwn;
343 			ifc->port_wwn = lp->port_wwn;
344 			retval = 0;
345 		} else {
346 			retval = ENODEV;
347 		}
348 		ISP_UNLOCK(isp);
349 		break;
350 	}
351 	case ISP_GET_STATS:
352 	{
353 		isp_stats_t *sp = (isp_stats_t *) addr;
354 
355 		MEMZERO(sp, sizeof (*sp));
356 		sp->isp_stat_version = ISP_STATS_VERSION;
357 		sp->isp_type = isp->isp_type;
358 		sp->isp_revision = isp->isp_revision;
359 		ISP_LOCK(isp);
360 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
361 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
362 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
363 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
364 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
365 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
366 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
367 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
368 		ISP_UNLOCK(isp);
369 		retval = 0;
370 		break;
371 	}
372 	case ISP_CLR_STATS:
373 		ISP_LOCK(isp);
374 		isp->isp_intcnt = 0;
375 		isp->isp_intbogus = 0;
376 		isp->isp_intmboxc = 0;
377 		isp->isp_intoasync = 0;
378 		isp->isp_rsltccmplt = 0;
379 		isp->isp_fphccmplt = 0;
380 		isp->isp_rscchiwater = 0;
381 		isp->isp_fpcchiwater = 0;
382 		ISP_UNLOCK(isp);
383 		retval = 0;
384 		break;
385 
386 	default:
387 		break;
388 	}
389 	return (retval);
390 }
391 
392 static void
393 isp_intr_enable(void *arg)
394 {
395 	struct ispsoftc *isp = arg;
396 	if (isp->isp_role != ISP_ROLE_NONE) {
397 		ENABLE_INTS(isp);
398 		isp->isp_osinfo.intsok = 1;
399 	}
400 	/* Release our hook so that the boot can continue. */
401 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
402 }
403 
404 /*
405  * Put the target mode functions here, because some are inlines
406  */
407 
408 #ifdef	ISP_TARGET_MODE
409 
410 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
411 static __inline int are_any_luns_enabled(struct ispsoftc *, int);
412 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
413 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
414 static __inline int isp_psema_sig_rqe(struct ispsoftc *, int);
415 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
416 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int, int);
417 static __inline void isp_vsema_rqe(struct ispsoftc *, int);
418 static __inline atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
419 static cam_status
420 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
421 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
422 static void isp_en_lun(struct ispsoftc *, union ccb *);
423 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
424 static timeout_t isp_refire_putback_atio;
425 static void isp_complete_ctio(union ccb *);
426 static void isp_target_putback_atio(union ccb *);
427 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
428 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
429 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
430 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
431 
432 static __inline int
433 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
434 {
435 	tstate_t *tptr;
436 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
437 	if (tptr == NULL) {
438 		return (0);
439 	}
440 	do {
441 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
442 			return (1);
443 		}
444 	} while ((tptr = tptr->next) != NULL);
445 	return (0);
446 }
447 
448 static __inline int
449 are_any_luns_enabled(struct ispsoftc *isp, int port)
450 {
451 	int lo, hi;
452 	if (IS_DUALBUS(isp)) {
453 		lo = (port * (LUN_HASH_SIZE >> 1));
454 		hi = lo + (LUN_HASH_SIZE >> 1);
455 	} else {
456 		lo = 0;
457 		hi = LUN_HASH_SIZE;
458 	}
459 	for (lo = 0; lo < hi; lo++) {
460 		if (isp->isp_osinfo.lun_hash[lo]) {
461 			return (1);
462 		}
463 	}
464 	return (0);
465 }
466 
467 static __inline tstate_t *
468 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
469 {
470 	tstate_t *tptr = NULL;
471 
472 	if (lun == CAM_LUN_WILDCARD) {
473 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
474 			tptr = &isp->isp_osinfo.tsdflt[bus];
475 			tptr->hold++;
476 			return (tptr);
477 		}
478 	} else {
479 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
480 		if (tptr == NULL) {
481 			return (NULL);
482 		}
483 	}
484 
485 	do {
486 		if (tptr->lun == lun && tptr->bus == bus) {
487 			tptr->hold++;
488 			return (tptr);
489 		}
490 	} while ((tptr = tptr->next) != NULL);
491 	return (tptr);
492 }
493 
494 static __inline void
495 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
496 {
497 	if (tptr->hold)
498 		tptr->hold--;
499 }
500 
501 static __inline int
502 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
503 {
504 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
505 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
506 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
507 			return (-1);
508 		}
509 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
510 	}
511 	return (0);
512 }
513 
514 static __inline int
515 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
516 {
517 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
518 		return (-1);
519 	}
520 	return (0);
521 }
522 
523 static __inline void
524 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
525 {
526 	isp->isp_osinfo.rstatus[bus] = status;
527 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
528 }
529 
530 static __inline void
531 isp_vsema_rqe(struct ispsoftc *isp, int bus)
532 {
533 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
534 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
535 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
536 	}
537 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
538 }
539 
540 static __inline atio_private_data_t *
541 isp_get_atpd(struct ispsoftc *isp, int tag)
542 {
543 	atio_private_data_t *atp;
544 	for (atp = isp->isp_osinfo.atpdp;
545 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
546 		if (atp->tag == tag)
547 			return (atp);
548 	}
549 	return (NULL);
550 }
551 
552 static cam_status
553 create_lun_state(struct ispsoftc *isp, int bus,
554     struct cam_path *path, tstate_t **rslt)
555 {
556 	cam_status status;
557 	lun_id_t lun;
558 	int hfx;
559 	tstate_t *tptr, *new;
560 
561 	lun = xpt_path_lun_id(path);
562 	if (lun < 0) {
563 		return (CAM_LUN_INVALID);
564 	}
565 	if (is_lun_enabled(isp, bus, lun)) {
566 		return (CAM_LUN_ALRDY_ENA);
567 	}
568 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
569 	if (new == NULL) {
570 		return (CAM_RESRC_UNAVAIL);
571 	}
572 
573 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
574 	    xpt_path_target_id(path), xpt_path_lun_id(path));
575 	if (status != CAM_REQ_CMP) {
576 		free(new, M_DEVBUF);
577 		return (status);
578 	}
579 	new->bus = bus;
580 	new->lun = lun;
581 	SLIST_INIT(&new->atios);
582 	SLIST_INIT(&new->inots);
583 	new->hold = 1;
584 
585 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
586 	tptr = isp->isp_osinfo.lun_hash[hfx];
587 	if (tptr == NULL) {
588 		isp->isp_osinfo.lun_hash[hfx] = new;
589 	} else {
590 		while (tptr->next)
591 			tptr = tptr->next;
592 		tptr->next = new;
593 	}
594 	*rslt = new;
595 	return (CAM_REQ_CMP);
596 }
597 
598 static __inline void
599 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
600 {
601 	int hfx;
602 	tstate_t *lw, *pw;
603 
604 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
605 	if (tptr->hold) {
606 		return;
607 	}
608 	pw = isp->isp_osinfo.lun_hash[hfx];
609 	if (pw == NULL) {
610 		return;
611 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
612 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
613 	} else {
614 		lw = pw;
615 		pw = lw->next;
616 		while (pw) {
617 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
618 				lw->next = pw->next;
619 				break;
620 			}
621 			lw = pw;
622 			pw = pw->next;
623 		}
624 		if (pw == NULL) {
625 			return;
626 		}
627 	}
628 	free(tptr, M_DEVBUF);
629 }
630 
631 /*
632  * we enter with our locks held.
633  */
634 static void
635 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
636 {
637 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
638 	struct ccb_en_lun *cel = &ccb->cel;
639 	tstate_t *tptr;
640 	u_int16_t rstat;
641 	int bus, cmd, av, wildcard;
642 	lun_id_t lun;
643 	target_id_t tgt;
644 
645 
646 	bus = XS_CHANNEL(ccb) & 0x1;
647 	tgt = ccb->ccb_h.target_id;
648 	lun = ccb->ccb_h.target_lun;
649 
650 	/*
651 	 * Do some sanity checking first.
652 	 */
653 
654 	if ((lun != CAM_LUN_WILDCARD) &&
655 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
656 		ccb->ccb_h.status = CAM_LUN_INVALID;
657 		return;
658 	}
659 
660 	if (IS_SCSI(isp)) {
661 		sdparam *sdp = isp->isp_param;
662 		sdp += bus;
663 		if (tgt != CAM_TARGET_WILDCARD &&
664 		    tgt != sdp->isp_initiator_id) {
665 			ccb->ccb_h.status = CAM_TID_INVALID;
666 			return;
667 		}
668 	} else {
669 		if (tgt != CAM_TARGET_WILDCARD &&
670 		    tgt != FCPARAM(isp)->isp_iid) {
671 			ccb->ccb_h.status = CAM_TID_INVALID;
672 			return;
673 		}
674 		/*
675 		 * This is as a good a place as any to check f/w capabilities.
676 		 */
677 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
678 			isp_prt(isp, ISP_LOGERR,
679 			    "firmware does not support target mode");
680 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
681 			return;
682 		}
683 		/*
684 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
685 		 * XXX: dorks with our already fragile enable/disable code.
686 		 */
687 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
688 			isp_prt(isp, ISP_LOGERR,
689 			    "firmware not SCCLUN capable");
690 		}
691 	}
692 
693 	if (tgt == CAM_TARGET_WILDCARD) {
694 		if (lun == CAM_LUN_WILDCARD) {
695 			wildcard = 1;
696 		} else {
697 			ccb->ccb_h.status = CAM_LUN_INVALID;
698 			return;
699 		}
700 	} else {
701 		wildcard = 0;
702 	}
703 
704 	/*
705 	 * Next check to see whether this is a target/lun wildcard action.
706 	 *
707 	 * If so, we know that we can accept commands for luns that haven't
708 	 * been enabled yet and send them upstream. Otherwise, we have to
709 	 * handle them locally (if we see them at all).
710 	 */
711 
712 	if (wildcard) {
713 		tptr = &isp->isp_osinfo.tsdflt[bus];
714 		if (cel->enable) {
715 			if (isp->isp_osinfo.tmflags[bus] &
716 			    TM_WILDCARD_ENABLED) {
717 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
718 				return;
719 			}
720 			ccb->ccb_h.status =
721 			    xpt_create_path(&tptr->owner, NULL,
722 			    xpt_path_path_id(ccb->ccb_h.path),
723 			    xpt_path_target_id(ccb->ccb_h.path),
724 			    xpt_path_lun_id(ccb->ccb_h.path));
725 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
726 				return;
727 			}
728 			SLIST_INIT(&tptr->atios);
729 			SLIST_INIT(&tptr->inots);
730 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
731 		} else {
732 			if ((isp->isp_osinfo.tmflags[bus] &
733 			    TM_WILDCARD_ENABLED) == 0) {
734 				ccb->ccb_h.status = CAM_REQ_CMP;
735 				return;
736 			}
737 			if (tptr->hold) {
738 				ccb->ccb_h.status = CAM_SCSI_BUSY;
739 				return;
740 			}
741 			xpt_free_path(tptr->owner);
742 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
743 		}
744 	}
745 
746 	/*
747 	 * Now check to see whether this bus needs to be
748 	 * enabled/disabled with respect to target mode.
749 	 */
750 	av = bus << 31;
751 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
752 		av |= ENABLE_TARGET_FLAG;
753 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
754 		if (av) {
755 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
756 			if (wildcard) {
757 				isp->isp_osinfo.tmflags[bus] &=
758 				    ~TM_WILDCARD_ENABLED;
759 				xpt_free_path(tptr->owner);
760 			}
761 			return;
762 		}
763 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
764 		isp_prt(isp, ISP_LOGINFO,
765 		    "Target Mode enabled on channel %d", bus);
766 	} else if (cel->enable == 0 &&
767 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
768 		if (are_any_luns_enabled(isp, bus)) {
769 			ccb->ccb_h.status = CAM_SCSI_BUSY;
770 			return;
771 		}
772 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
773 		if (av) {
774 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
775 			return;
776 		}
777 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
778 		isp_prt(isp, ISP_LOGINFO,
779 		    "Target Mode disabled on channel %d", bus);
780 	}
781 
782 	if (wildcard) {
783 		ccb->ccb_h.status = CAM_REQ_CMP;
784 		return;
785 	}
786 
787 	if (cel->enable) {
788 		ccb->ccb_h.status =
789 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
790 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
791 			return;
792 		}
793 	} else {
794 		tptr = get_lun_statep(isp, bus, lun);
795 		if (tptr == NULL) {
796 			ccb->ccb_h.status = CAM_LUN_INVALID;
797 			return;
798 		}
799 	}
800 
801 	if (isp_psema_sig_rqe(isp, bus)) {
802 		rls_lun_statep(isp, tptr);
803 		if (cel->enable)
804 			destroy_lun_state(isp, tptr);
805 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
806 		return;
807 	}
808 
809 	if (cel->enable) {
810 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
811 		int c, n, ulun = lun;
812 
813 		cmd = RQSTYPE_ENABLE_LUN;
814 		c = DFLT_CMND_CNT;
815 		n = DFLT_INOT_CNT;
816 		if (IS_FC(isp) && lun != 0) {
817 			cmd = RQSTYPE_MODIFY_LUN;
818 			n = 0;
819 			/*
820 		 	 * For SCC firmware, we only deal with setting
821 			 * (enabling or modifying) lun 0.
822 			 */
823 			ulun = 0;
824 		}
825 		rstat = LUN_ERR;
826 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
827 			xpt_print_path(ccb->ccb_h.path);
828 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
829 			goto out;
830 		}
831 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
832 			xpt_print_path(ccb->ccb_h.path);
833 			isp_prt(isp, ISP_LOGERR,
834 			    "wait for ENABLE/MODIFY LUN timed out");
835 			goto out;
836 		}
837 		rstat = isp->isp_osinfo.rstatus[bus];
838 		if (rstat != LUN_OK) {
839 			xpt_print_path(ccb->ccb_h.path);
840 			isp_prt(isp, ISP_LOGERR,
841 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
842 			goto out;
843 		}
844 	} else {
845 		int c, n, ulun = lun;
846 		u_int32_t seq;
847 
848 		rstat = LUN_ERR;
849 		seq = isp->isp_osinfo.rollinfo++;
850 		cmd = -RQSTYPE_MODIFY_LUN;
851 
852 		c = DFLT_CMND_CNT;
853 		n = DFLT_INOT_CNT;
854 		if (IS_FC(isp) && lun != 0) {
855 			n = 0;
856 			/*
857 		 	 * For SCC firmware, we only deal with setting
858 			 * (enabling or modifying) lun 0.
859 			 */
860 			ulun = 0;
861 		}
862 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
863 			xpt_print_path(ccb->ccb_h.path);
864 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
865 			goto out;
866 		}
867 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
868 			xpt_print_path(ccb->ccb_h.path);
869 			isp_prt(isp, ISP_LOGERR,
870 			    "wait for MODIFY LUN timed out");
871 			goto out;
872 		}
873 		rstat = isp->isp_osinfo.rstatus[bus];
874 		if (rstat != LUN_OK) {
875 			xpt_print_path(ccb->ccb_h.path);
876 			isp_prt(isp, ISP_LOGERR,
877 			    "MODIFY LUN returned 0x%x", rstat);
878 			goto out;
879 		}
880 		if (IS_FC(isp) && lun) {
881 			goto out;
882 		}
883 
884 		seq = isp->isp_osinfo.rollinfo++;
885 
886 		rstat = LUN_ERR;
887 		cmd = -RQSTYPE_ENABLE_LUN;
888 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
889 			xpt_print_path(ccb->ccb_h.path);
890 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
891 			goto out;
892 		}
893 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
894 			xpt_print_path(ccb->ccb_h.path);
895 			isp_prt(isp, ISP_LOGERR,
896 			     "wait for DISABLE LUN timed out");
897 			goto out;
898 		}
899 		rstat = isp->isp_osinfo.rstatus[bus];
900 		if (rstat != LUN_OK) {
901 			xpt_print_path(ccb->ccb_h.path);
902 			isp_prt(isp, ISP_LOGWARN,
903 			    "DISABLE LUN returned 0x%x", rstat);
904 			goto out;
905 		}
906 		if (are_any_luns_enabled(isp, bus) == 0) {
907 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
908 			if (av) {
909 				isp_prt(isp, ISP_LOGWARN,
910 				    "disable target mode on channel %d failed",
911 				    bus);
912 				goto out;
913 			}
914 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
915 			xpt_print_path(ccb->ccb_h.path);
916 			isp_prt(isp, ISP_LOGINFO,
917 			    "Target Mode disabled on channel %d", bus);
918 		}
919 	}
920 
921 out:
922 	isp_vsema_rqe(isp, bus);
923 
924 	if (rstat != LUN_OK) {
925 		xpt_print_path(ccb->ccb_h.path);
926 		isp_prt(isp, ISP_LOGWARN,
927 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
928 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
929 		rls_lun_statep(isp, tptr);
930 		if (cel->enable)
931 			destroy_lun_state(isp, tptr);
932 	} else {
933 		xpt_print_path(ccb->ccb_h.path);
934 		isp_prt(isp, ISP_LOGINFO, lfmt,
935 		    (cel->enable) ? "en" : "dis", bus);
936 		rls_lun_statep(isp, tptr);
937 		if (cel->enable == 0) {
938 			destroy_lun_state(isp, tptr);
939 		}
940 		ccb->ccb_h.status = CAM_REQ_CMP;
941 	}
942 }
943 
944 static cam_status
945 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
946 {
947 	tstate_t *tptr;
948 	struct ccb_hdr_slist *lp;
949 	struct ccb_hdr *curelm;
950 	int found;
951 	union ccb *accb = ccb->cab.abort_ccb;
952 
953 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
954 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
955 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
956 			return (CAM_PATH_INVALID);
957 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
958 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
959 			return (CAM_PATH_INVALID);
960 		}
961 	}
962 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
963 	if (tptr == NULL) {
964 		return (CAM_PATH_INVALID);
965 	}
966 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
967 		lp = &tptr->atios;
968 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
969 		lp = &tptr->inots;
970 	} else {
971 		rls_lun_statep(isp, tptr);
972 		return (CAM_UA_ABORT);
973 	}
974 	curelm = SLIST_FIRST(lp);
975 	found = 0;
976 	if (curelm == &accb->ccb_h) {
977 		found = 1;
978 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
979 	} else {
980 		while(curelm != NULL) {
981 			struct ccb_hdr *nextelm;
982 
983 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
984 			if (nextelm == &accb->ccb_h) {
985 				found = 1;
986 				SLIST_NEXT(curelm, sim_links.sle) =
987 				    SLIST_NEXT(nextelm, sim_links.sle);
988 				break;
989 			}
990 			curelm = nextelm;
991 		}
992 	}
993 	rls_lun_statep(isp, tptr);
994 	if (found) {
995 		accb->ccb_h.status = CAM_REQ_ABORTED;
996 		return (CAM_REQ_CMP);
997 	}
998 	return(CAM_PATH_INVALID);
999 }
1000 
1001 static cam_status
1002 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1003 {
1004 	void *qe;
1005 	struct ccb_scsiio *cso = &ccb->csio;
1006 	u_int16_t *hp, save_handle;
1007 	u_int16_t nxti, optr;
1008 	u_int8_t local[QENTRY_LEN];
1009 
1010 
1011 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1012 		xpt_print_path(ccb->ccb_h.path);
1013 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1014 		return (CAM_RESRC_UNAVAIL);
1015 	}
1016 	bzero(local, QENTRY_LEN);
1017 
1018 	/*
1019 	 * We're either moving data or completing a command here.
1020 	 */
1021 
1022 	if (IS_FC(isp)) {
1023 		atio_private_data_t *atp;
1024 		ct2_entry_t *cto = (ct2_entry_t *) local;
1025 
1026 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1027 		cto->ct_header.rqs_entry_count = 1;
1028 		cto->ct_iid = cso->init_id;
1029 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1030 			cto->ct_lun = ccb->ccb_h.target_lun;
1031 		}
1032 
1033 		atp = isp_get_atpd(isp, cso->tag_id);
1034 		if (atp == NULL) {
1035 			panic("cannot find private data adjunct for tag %x",
1036 			    cso->tag_id);
1037 		}
1038 
1039 		cto->ct_rxid = cso->tag_id;
1040 		if (cso->dxfer_len == 0) {
1041 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1042 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1043 				cto->ct_flags |= CT2_SENDSTATUS;
1044 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1045 				cto->ct_resid =
1046 				    atp->orig_datalen - atp->bytes_xfered;
1047 			}
1048 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1049 				int m = min(cso->sense_len, MAXRESPLEN);
1050 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1051 				cto->rsp.m1.ct_senselen = m;
1052 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1053 			}
1054 		} else {
1055 			cto->ct_flags |= CT2_FLAG_MODE0;
1056 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1057 				cto->ct_flags |= CT2_DATA_IN;
1058 			} else {
1059 				cto->ct_flags |= CT2_DATA_OUT;
1060 			}
1061 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1062 				cto->ct_flags |= CT2_SENDSTATUS;
1063 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1064 				cto->ct_resid =
1065 				    atp->orig_datalen -
1066 				    (atp->bytes_xfered + cso->dxfer_len);
1067 			} else {
1068 				atp->last_xframt = cso->dxfer_len;
1069 			}
1070 			/*
1071 			 * If we're sending data and status back together,
1072 			 * we can't also send back sense data as well.
1073 			 */
1074 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1075 		}
1076 
1077 		if (cto->ct_flags & CT2_SENDSTATUS) {
1078 			isp_prt(isp, ISP_LOGTDEBUG0,
1079 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1080 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1081 			    cso->dxfer_len, cto->ct_resid);
1082 			cto->ct_flags |= CT2_CCINCR;
1083 		}
1084 		cto->ct_timeout = 10;
1085 		hp = &cto->ct_syshandle;
1086 	} else {
1087 		ct_entry_t *cto = (ct_entry_t *) local;
1088 
1089 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1090 		cto->ct_header.rqs_entry_count = 1;
1091 		cto->ct_iid = cso->init_id;
1092 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1093 		cto->ct_tgt = ccb->ccb_h.target_id;
1094 		cto->ct_lun = ccb->ccb_h.target_lun;
1095 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1096 		if (AT_HAS_TAG(cso->tag_id)) {
1097 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1098 			cto->ct_flags |= CT_TQAE;
1099 		}
1100 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1101 			cto->ct_flags |= CT_NODISC;
1102 		}
1103 		if (cso->dxfer_len == 0) {
1104 			cto->ct_flags |= CT_NO_DATA;
1105 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1106 			cto->ct_flags |= CT_DATA_IN;
1107 		} else {
1108 			cto->ct_flags |= CT_DATA_OUT;
1109 		}
1110 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1111 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1112 			cto->ct_scsi_status = cso->scsi_status;
1113 			cto->ct_resid = cso->resid;
1114 			isp_prt(isp, ISP_LOGTDEBUG0,
1115 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1116 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1117 			    cso->tag_id);
1118 		}
1119 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1120 		cto->ct_timeout = 10;
1121 		hp = &cto->ct_syshandle;
1122 	}
1123 
1124 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1125 		xpt_print_path(ccb->ccb_h.path);
1126 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1127 		return (CAM_RESRC_UNAVAIL);
1128 	}
1129 
1130 
1131 	/*
1132 	 * Call the dma setup routines for this entry (and any subsequent
1133 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1134 	 * new things to play with. As with isp_start's usage of DMA setup,
1135 	 * any swizzling is done in the machine dependent layer. Because
1136 	 * of this, we put the request onto the queue area first in native
1137 	 * format.
1138 	 */
1139 
1140 	save_handle = *hp;
1141 
1142 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1143 	case CMD_QUEUED:
1144 		ISP_ADD_REQUEST(isp, nxti);
1145 		return (CAM_REQ_INPROG);
1146 
1147 	case CMD_EAGAIN:
1148 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1149 		isp_destroy_handle(isp, save_handle);
1150 		return (CAM_RESRC_UNAVAIL);
1151 
1152 	default:
1153 		isp_destroy_handle(isp, save_handle);
1154 		return (XS_ERR(ccb));
1155 	}
1156 }
1157 
1158 static void
1159 isp_refire_putback_atio(void *arg)
1160 {
1161 	int s = splcam();
1162 	isp_target_putback_atio(arg);
1163 	splx(s);
1164 }
1165 
1166 static void
1167 isp_target_putback_atio(union ccb *ccb)
1168 {
1169 	struct ispsoftc *isp;
1170 	struct ccb_scsiio *cso;
1171 	u_int16_t nxti, optr;
1172 	void *qe;
1173 
1174 	isp = XS_ISP(ccb);
1175 
1176 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1177 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1178 		isp_prt(isp, ISP_LOGWARN,
1179 		    "isp_target_putback_atio: Request Queue Overflow");
1180 		return;
1181 	}
1182 	bzero(qe, QENTRY_LEN);
1183 	cso = &ccb->csio;
1184 	if (IS_FC(isp)) {
1185 		at2_entry_t local, *at = &local;
1186 		MEMZERO(at, sizeof (at2_entry_t));
1187 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1188 		at->at_header.rqs_entry_count = 1;
1189 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1190 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1191 		} else {
1192 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1193 		}
1194 		at->at_status = CT_OK;
1195 		at->at_rxid = cso->tag_id;
1196 		isp_put_atio2(isp, at, qe);
1197 	} else {
1198 		at_entry_t local, *at = &local;
1199 		MEMZERO(at, sizeof (at_entry_t));
1200 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1201 		at->at_header.rqs_entry_count = 1;
1202 		at->at_iid = cso->init_id;
1203 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1204 		at->at_tgt = cso->ccb_h.target_id;
1205 		at->at_lun = cso->ccb_h.target_lun;
1206 		at->at_status = CT_OK;
1207 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1208 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1209 		isp_put_atio(isp, at, qe);
1210 	}
1211 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1212 	ISP_ADD_REQUEST(isp, nxti);
1213 	isp_complete_ctio(ccb);
1214 }
1215 
1216 static void
1217 isp_complete_ctio(union ccb *ccb)
1218 {
1219 	struct ispsoftc *isp = XS_ISP(ccb);
1220 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1221 		ccb->ccb_h.status |= CAM_REQ_CMP;
1222 	}
1223 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1224 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1225 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1226 		if (isp->isp_osinfo.simqfrozen == 0) {
1227 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1228 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1229 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1230 			} else {
1231 				isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen");
1232 			}
1233 		} else {
1234 			isp_prt(isp, ISP_LOGWARN,
1235 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1236 		}
1237 	}
1238 	xpt_done(ccb);
1239 }
1240 
1241 /*
1242  * Handle ATIO stuff that the generic code can't.
1243  * This means handling CDBs.
1244  */
1245 
1246 static int
1247 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1248 {
1249 	tstate_t *tptr;
1250 	int status, bus, iswildcard;
1251 	struct ccb_accept_tio *atiop;
1252 
1253 	/*
1254 	 * The firmware status (except for the QLTM_SVALID bit)
1255 	 * indicates why this ATIO was sent to us.
1256 	 *
1257 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1258 	 *
1259 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1260 	 * we're still connected on the SCSI bus.
1261 	 */
1262 	status = aep->at_status;
1263 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1264 		/*
1265 		 * Bus Phase Sequence error. We should have sense data
1266 		 * suggested by the f/w. I'm not sure quite yet what
1267 		 * to do about this for CAM.
1268 		 */
1269 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1270 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1271 		return (0);
1272 	}
1273 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1274 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1275 		    status);
1276 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1277 		return (0);
1278 	}
1279 
1280 	bus = GET_BUS_VAL(aep->at_iid);
1281 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1282 	if (tptr == NULL) {
1283 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1284 		iswildcard = 1;
1285 	} else {
1286 		iswildcard = 0;
1287 	}
1288 
1289 	if (tptr == NULL) {
1290 		/*
1291 		 * Because we can't autofeed sense data back with
1292 		 * a command for parallel SCSI, we can't give back
1293 		 * a CHECK CONDITION. We'll give back a BUSY status
1294 		 * instead. This works out okay because the only
1295 		 * time we should, in fact, get this, is in the
1296 		 * case that somebody configured us without the
1297 		 * blackhole driver, so they get what they deserve.
1298 		 */
1299 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1300 		return (0);
1301 	}
1302 
1303 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1304 	if (atiop == NULL) {
1305 		/*
1306 		 * Because we can't autofeed sense data back with
1307 		 * a command for parallel SCSI, we can't give back
1308 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1309 		 * instead. This works out okay because the only time we
1310 		 * should, in fact, get this, is in the case that we've
1311 		 * run out of ATIOS.
1312 		 */
1313 		xpt_print_path(tptr->owner);
1314 		isp_prt(isp, ISP_LOGWARN,
1315 		    "no ATIOS for lun %d from initiator %d on channel %d",
1316 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1317 		if (aep->at_flags & AT_TQAE)
1318 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1319 		else
1320 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1321 		rls_lun_statep(isp, tptr);
1322 		return (0);
1323 	}
1324 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1325 	if (iswildcard) {
1326 		atiop->ccb_h.target_id = aep->at_tgt;
1327 		atiop->ccb_h.target_lun = aep->at_lun;
1328 	}
1329 	if (aep->at_flags & AT_NODISC) {
1330 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1331 	} else {
1332 		atiop->ccb_h.flags = 0;
1333 	}
1334 
1335 	if (status & QLTM_SVALID) {
1336 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1337 		atiop->sense_len = amt;
1338 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1339 	} else {
1340 		atiop->sense_len = 0;
1341 	}
1342 
1343 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1344 	atiop->cdb_len = aep->at_cdblen;
1345 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1346 	atiop->ccb_h.status = CAM_CDB_RECVD;
1347 	/*
1348 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1349 	 * and the handle (which we have to preserve).
1350 	 */
1351 	AT_MAKE_TAGID(atiop->tag_id, aep);
1352 	if (aep->at_flags & AT_TQAE) {
1353 		atiop->tag_action = aep->at_tag_type;
1354 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1355 	}
1356 	xpt_done((union ccb*)atiop);
1357 	isp_prt(isp, ISP_LOGTDEBUG0,
1358 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1359 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1360 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1361 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1362 	    "nondisc" : "disconnecting");
1363 	rls_lun_statep(isp, tptr);
1364 	return (0);
1365 }
1366 
1367 static int
1368 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1369 {
1370 	lun_id_t lun;
1371 	tstate_t *tptr;
1372 	struct ccb_accept_tio *atiop;
1373 	atio_private_data_t *atp;
1374 
1375 	/*
1376 	 * The firmware status (except for the QLTM_SVALID bit)
1377 	 * indicates why this ATIO was sent to us.
1378 	 *
1379 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1380 	 */
1381 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1382 		isp_prt(isp, ISP_LOGWARN,
1383 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1384 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1385 		return (0);
1386 	}
1387 
1388 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1389 		lun = aep->at_scclun;
1390 	} else {
1391 		lun = aep->at_lun;
1392 	}
1393 	tptr = get_lun_statep(isp, 0, lun);
1394 	if (tptr == NULL) {
1395 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1396 	}
1397 
1398 	if (tptr == NULL) {
1399 		/*
1400 		 * What we'd like to know is whether or not we have a listener
1401 		 * upstream that really hasn't configured yet. If we do, then
1402 		 * we can give a more sensible reply here. If not, then we can
1403 		 * reject this out of hand.
1404 		 *
1405 		 * Choices for what to send were
1406 		 *
1407                  *	Not Ready, Unit Not Self-Configured Yet
1408 		 *	(0x2,0x3e,0x00)
1409 		 *
1410 		 * for the former and
1411 		 *
1412 		 *	Illegal Request, Logical Unit Not Supported
1413 		 *	(0x5,0x25,0x00)
1414 		 *
1415 		 * for the latter.
1416 		 *
1417 		 * We used to decide whether there was at least one listener
1418 		 * based upon whether the black hole driver was configured.
1419 		 * However, recent config(8) changes have made this hard to do
1420 		 * at this time.
1421 		 *
1422 		 */
1423 		u_int32_t ccode = SCSI_STATUS_BUSY;
1424 
1425 		/*
1426 		 * Because we can't autofeed sense data back with
1427 		 * a command for parallel SCSI, we can't give back
1428 		 * a CHECK CONDITION. We'll give back a BUSY status
1429 		 * instead. This works out okay because the only
1430 		 * time we should, in fact, get this, is in the
1431 		 * case that somebody configured us without the
1432 		 * blackhole driver, so they get what they deserve.
1433 		 */
1434 		isp_endcmd(isp, aep, ccode, 0);
1435 		return (0);
1436 	}
1437 
1438 	atp = isp_get_atpd(isp, 0);
1439 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1440 	if (atiop == NULL || atp == NULL) {
1441 		/*
1442 		 * Because we can't autofeed sense data back with
1443 		 * a command for parallel SCSI, we can't give back
1444 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1445 		 * instead. This works out okay because the only time we
1446 		 * should, in fact, get this, is in the case that we've
1447 		 * run out of ATIOS.
1448 		 */
1449 		xpt_print_path(tptr->owner);
1450 		isp_prt(isp, ISP_LOGWARN,
1451 		    "no ATIOS for lun %d from initiator %d", lun, aep->at_iid);
1452 		rls_lun_statep(isp, tptr);
1453 		if (aep->at_flags & AT_TQAE)
1454 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1455 		else
1456 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1457 		return (0);
1458 	}
1459 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1460 
1461 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1462 		atiop->ccb_h.target_id =
1463 			((fcparam *)isp->isp_param)->isp_loopid;
1464 		atiop->ccb_h.target_lun = lun;
1465 	}
1466 	/*
1467 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1468 	 */
1469 	atiop->sense_len = 0;
1470 
1471 	atiop->init_id = aep->at_iid;
1472 	atiop->cdb_len = ATIO2_CDBLEN;
1473 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1474 	atiop->ccb_h.status = CAM_CDB_RECVD;
1475 	atiop->tag_id = aep->at_rxid;
1476 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1477 	case ATIO2_TC_ATTR_SIMPLEQ:
1478 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1479 		break;
1480         case ATIO2_TC_ATTR_HEADOFQ:
1481 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1482 		break;
1483         case ATIO2_TC_ATTR_ORDERED:
1484 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1485 		break;
1486         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1487 	case ATIO2_TC_ATTR_UNTAGGED:
1488 	default:
1489 		atiop->tag_action = 0;
1490 		break;
1491 	}
1492 	if (atiop->tag_action != 0) {
1493 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1494 	}
1495 
1496 	atp->tag = atiop->tag_id;
1497 	atp->orig_datalen = aep->at_datalen;
1498 	atp->last_xframt = 0;
1499 	atp->bytes_xfered = 0;
1500 
1501 	xpt_done((union ccb*)atiop);
1502 	isp_prt(isp, ISP_LOGTDEBUG0,
1503 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1504 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1505 	    lun, aep->at_taskflags, aep->at_datalen);
1506 	rls_lun_statep(isp, tptr);
1507 	return (0);
1508 }
1509 
1510 static int
1511 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1512 {
1513 	union ccb *ccb;
1514 	int sentstatus, ok, notify_cam, resid = 0;
1515 	u_int16_t tval;
1516 
1517 	/*
1518 	 * CTIO and CTIO2 are close enough....
1519 	 */
1520 
1521 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1522 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1523 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1524 
1525 	if (IS_FC(isp)) {
1526 		ct2_entry_t *ct = arg;
1527 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1528 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1529 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1530 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1531 		}
1532 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1533 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1534 			atio_private_data_t *atp =
1535 			    isp_get_atpd(isp, ct->ct_rxid);
1536 			if (atp == NULL) {
1537 				panic("cannot find adjunct after I/O");
1538 			}
1539 			resid = ct->ct_resid;
1540 			atp->bytes_xfered += (atp->last_xframt - resid);
1541 			atp->last_xframt = 0;
1542 			if (sentstatus) {
1543 				atp->tag = 0;
1544 			}
1545 		}
1546 		isp_prt(isp, ISP_LOGTDEBUG0,
1547 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1548 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1549 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1550 		    resid, sentstatus? "FIN" : "MID");
1551 		tval = ct->ct_rxid;
1552 	} else {
1553 		ct_entry_t *ct = arg;
1554 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1555 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1556 		/*
1557 		 * We *ought* to be able to get back to the original ATIO
1558 		 * here, but for some reason this gets lost. It's just as
1559 		 * well because it's squirrelled away as part of periph
1560 		 * private data.
1561 		 *
1562 		 * We can live without it as long as we continue to use
1563 		 * the auto-replenish feature for CTIOs.
1564 		 */
1565 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1566 		if (ct->ct_status & QLTM_SVALID) {
1567 			char *sp = (char *)ct;
1568 			sp += CTIO_SENSE_OFFSET;
1569 			ccb->csio.sense_len =
1570 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1571 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1572 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1573 		}
1574 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1575 			resid = ct->ct_resid;
1576 		}
1577 		isp_prt(isp, ISP_LOGTDEBUG0,
1578 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1579 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1580 		    ct->ct_status, ct->ct_flags, resid,
1581 		    sentstatus? "FIN" : "MID");
1582 		tval = ct->ct_fwhandle;
1583 	}
1584 	ccb->csio.resid += resid;
1585 
1586 	/*
1587 	 * We're here either because intermediate data transfers are done
1588 	 * and/or the final status CTIO (which may have joined with a
1589 	 * Data Transfer) is done.
1590 	 *
1591 	 * In any case, for this platform, the upper layers figure out
1592 	 * what to do next, so all we do here is collect status and
1593 	 * pass information along. Any DMA handles have already been
1594 	 * freed.
1595 	 */
1596 	if (notify_cam == 0) {
1597 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1598 		return (0);
1599 	}
1600 
1601 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1602 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1603 
1604 	if (!ok) {
1605 		isp_target_putback_atio(ccb);
1606 	} else {
1607 		isp_complete_ctio(ccb);
1608 
1609 	}
1610 	return (0);
1611 }
1612 #endif
1613 
1614 static void
1615 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1616 {
1617 	struct cam_sim *sim;
1618 	struct ispsoftc *isp;
1619 
1620 	sim = (struct cam_sim *)cbarg;
1621 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1622 	switch (code) {
1623 	case AC_LOST_DEVICE:
1624 		if (IS_SCSI(isp)) {
1625 			u_int16_t oflags, nflags;
1626 			sdparam *sdp = isp->isp_param;
1627 			int tgt;
1628 
1629 			tgt = xpt_path_target_id(path);
1630 			ISP_LOCK(isp);
1631 			sdp += cam_sim_bus(sim);
1632 			nflags = sdp->isp_devparam[tgt].nvrm_flags;
1633 #ifndef	ISP_TARGET_MODE
1634 			nflags &= DPARM_SAFE_DFLT;
1635 			if (isp->isp_loaded_fw) {
1636 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1637 			}
1638 #else
1639 			nflags = DPARM_DEFAULT;
1640 #endif
1641 			oflags = sdp->isp_devparam[tgt].goal_flags;
1642 			sdp->isp_devparam[tgt].goal_flags = nflags;
1643 			sdp->isp_devparam[tgt].dev_update = 1;
1644 			isp->isp_update |= (1 << cam_sim_bus(sim));
1645 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1646 			sdp->isp_devparam[tgt].goal_flags = oflags;
1647 			ISP_UNLOCK(isp);
1648 		}
1649 		break;
1650 	default:
1651 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1652 		break;
1653 	}
1654 }
1655 
1656 static void
1657 isp_poll(struct cam_sim *sim)
1658 {
1659 	struct ispsoftc *isp = cam_sim_softc(sim);
1660 	u_int16_t isr, sema, mbox;
1661 
1662 	ISP_LOCK(isp);
1663 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1664 		isp_intr(isp, isr, sema, mbox);
1665 	}
1666 	ISP_UNLOCK(isp);
1667 }
1668 
1669 #if	0
1670 static void
1671 isp_relsim(void *arg)
1672 {
1673 	struct ispsoftc *isp = arg;
1674 	ISP_LOCK(isp);
1675 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1676 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1677 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1678 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1679 			xpt_release_simq(isp->isp_sim, 1);
1680 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1681 		}
1682 	}
1683 	ISP_UNLOCK(isp);
1684 }
1685 #endif
1686 
1687 static void
1688 isp_watchdog(void *arg)
1689 {
1690 	XS_T *xs = arg;
1691 	struct ispsoftc *isp = XS_ISP(xs);
1692 	u_int32_t handle;
1693 
1694 	/*
1695 	 * We've decided this command is dead. Make sure we're not trying
1696 	 * to kill a command that's already dead by getting it's handle and
1697 	 * and seeing whether it's still alive.
1698 	 */
1699 	ISP_LOCK(isp);
1700 	handle = isp_find_handle(isp, xs);
1701 	if (handle) {
1702 		u_int16_t isr, sema, mbox;
1703 
1704 		if (XS_CMD_DONE_P(xs)) {
1705 			isp_prt(isp, ISP_LOGDEBUG1,
1706 			    "watchdog found done cmd (handle 0x%x)", handle);
1707 			ISP_UNLOCK(isp);
1708 			return;
1709 		}
1710 
1711 		if (XS_CMD_WDOG_P(xs)) {
1712 			isp_prt(isp, ISP_LOGDEBUG2,
1713 			    "recursive watchdog (handle 0x%x)", handle);
1714 			ISP_UNLOCK(isp);
1715 			return;
1716 		}
1717 
1718 		XS_CMD_S_WDOG(xs);
1719 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1720 			isp_intr(isp, isr, sema, mbox);
1721 		}
1722 		if (XS_CMD_DONE_P(xs)) {
1723 			isp_prt(isp, ISP_LOGDEBUG2,
1724 			    "watchdog cleanup for handle 0x%x", handle);
1725 			xpt_done((union ccb *) xs);
1726 		} else if (XS_CMD_GRACE_P(xs)) {
1727 			/*
1728 			 * Make sure the command is *really* dead before we
1729 			 * release the handle (and DMA resources) for reuse.
1730 			 */
1731 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1732 
1733 			/*
1734 			 * After this point, the comamnd is really dead.
1735 			 */
1736 			if (XS_XFRLEN(xs)) {
1737 				ISP_DMAFREE(isp, xs, handle);
1738                 	}
1739 			isp_destroy_handle(isp, handle);
1740 			xpt_print_path(xs->ccb_h.path);
1741 			isp_prt(isp, ISP_LOGWARN,
1742 			    "watchdog timeout for handle 0x%x", handle);
1743 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1744 			XS_CMD_C_WDOG(xs);
1745 			isp_done(xs);
1746 		} else {
1747 			u_int16_t nxti, optr;
1748 			ispreq_t local, *mp= &local, *qe;
1749 
1750 			XS_CMD_C_WDOG(xs);
1751 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1752 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1753 				ISP_UNLOCK(isp);
1754 				return;
1755 			}
1756 			XS_CMD_S_GRACE(xs);
1757 			MEMZERO((void *) mp, sizeof (*mp));
1758 			mp->req_header.rqs_entry_count = 1;
1759 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1760 			mp->req_modifier = SYNC_ALL;
1761 			mp->req_target = XS_CHANNEL(xs) << 7;
1762 			isp_put_request(isp, mp, qe);
1763 			ISP_ADD_REQUEST(isp, nxti);
1764 		}
1765 	} else {
1766 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1767 	}
1768 	ISP_UNLOCK(isp);
1769 }
1770 
1771 static int isp_ktmature = 0;
1772 
1773 static void
1774 isp_kthread(void *arg)
1775 {
1776 	int wasfrozen;
1777 	struct ispsoftc *isp = arg;
1778 
1779 	mtx_lock(&isp->isp_lock);
1780 	for (;;) {
1781 		isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state");
1782 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1783 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1784 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1785 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1786 				    isp_ktmature == 0) {
1787 					break;
1788 				}
1789 			}
1790 			msleep(isp_kthread, &isp->isp_lock,
1791 			    PRIBIO, "isp_fcthrd", hz);
1792 		}
1793 		/*
1794 		 * Even if we didn't get good loop state we may be
1795 		 * unfreezing the SIMQ so that we can kill off
1796 		 * commands (if we've never seen loop before, e.g.)
1797 		 */
1798 		isp_ktmature = 1;
1799 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1800 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1801 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1802 			isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq");
1803 			ISPLOCK_2_CAMLOCK(isp);
1804 			xpt_release_simq(isp->isp_sim, 1);
1805 			CAMLOCK_2_ISPLOCK(isp);
1806 		}
1807 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
1808 	}
1809 }
1810 
1811 static void
1812 isp_action(struct cam_sim *sim, union ccb *ccb)
1813 {
1814 	int bus, tgt, error;
1815 	struct ispsoftc *isp;
1816 	struct ccb_trans_settings *cts;
1817 
1818 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1819 
1820 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1821 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1822 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1823 	if (isp->isp_state != ISP_RUNSTATE &&
1824 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1825 		CAMLOCK_2_ISPLOCK(isp);
1826 		isp_init(isp);
1827 		if (isp->isp_state != ISP_INITSTATE) {
1828 			ISP_UNLOCK(isp);
1829 			/*
1830 			 * Lie. Say it was a selection timeout.
1831 			 */
1832 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1833 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1834 			xpt_done(ccb);
1835 			return;
1836 		}
1837 		isp->isp_state = ISP_RUNSTATE;
1838 		ISPLOCK_2_CAMLOCK(isp);
1839 	}
1840 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1841 
1842 
1843 	switch (ccb->ccb_h.func_code) {
1844 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1845 		/*
1846 		 * Do a couple of preliminary checks...
1847 		 */
1848 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1849 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1850 				ccb->ccb_h.status = CAM_REQ_INVALID;
1851 				xpt_done(ccb);
1852 				break;
1853 			}
1854 		}
1855 #ifdef	DIAGNOSTIC
1856 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1857 			ccb->ccb_h.status = CAM_PATH_INVALID;
1858 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1859 			ccb->ccb_h.status = CAM_PATH_INVALID;
1860 		}
1861 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1862 			isp_prt(isp, ISP_LOGERR,
1863 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
1864 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1865 			xpt_done(ccb);
1866 			break;
1867 		}
1868 #endif
1869 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1870 		CAMLOCK_2_ISPLOCK(isp);
1871 		error = isp_start((XS_T *) ccb);
1872 		switch (error) {
1873 		case CMD_QUEUED:
1874 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1875 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1876 				u_int64_t ticks = (u_int64_t) hz;
1877 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1878 					ticks = 60 * 1000 * ticks;
1879 				else
1880 					ticks = ccb->ccb_h.timeout * hz;
1881 				ticks = ((ticks + 999) / 1000) + hz + hz;
1882 				if (ticks >= 0x80000000) {
1883 					isp_prt(isp, ISP_LOGERR,
1884 					    "timeout overflow");
1885 					ticks = 0x80000000;
1886 				}
1887 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
1888 				    (caddr_t)ccb, (int)ticks);
1889 			} else {
1890 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1891 			}
1892 			ISPLOCK_2_CAMLOCK(isp);
1893 			break;
1894 		case CMD_RQLATER:
1895 			/*
1896 			 * This can only happen for Fibre Channel
1897 			 */
1898 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
1899 			if (FCPARAM(isp)->loop_seen_once == 0 && isp_ktmature) {
1900 				ISPLOCK_2_CAMLOCK(isp);
1901 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
1902 				xpt_done(ccb);
1903 				break;
1904 			}
1905 			cv_signal(&isp->isp_osinfo.kthread_cv);
1906 			if (isp->isp_osinfo.simqfrozen == 0) {
1907 				isp_prt(isp, ISP_LOGDEBUG2,
1908 				    "RQLATER freeze simq");
1909 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
1910 				ISPLOCK_2_CAMLOCK(isp);
1911 				xpt_freeze_simq(sim, 1);
1912 			} else {
1913 				ISPLOCK_2_CAMLOCK(isp);
1914 			}
1915 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1916 			xpt_done(ccb);
1917 			break;
1918 		case CMD_EAGAIN:
1919 			if (isp->isp_osinfo.simqfrozen == 0) {
1920 				xpt_freeze_simq(sim, 1);
1921 				isp_prt(isp, ISP_LOGDEBUG2,
1922 				    "EAGAIN freeze simq");
1923 			}
1924 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1925 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1926 			ISPLOCK_2_CAMLOCK(isp);
1927 			xpt_done(ccb);
1928 			break;
1929 		case CMD_COMPLETE:
1930 			isp_done((struct ccb_scsiio *) ccb);
1931 			ISPLOCK_2_CAMLOCK(isp);
1932 			break;
1933 		default:
1934 			isp_prt(isp, ISP_LOGERR,
1935 			    "What's this? 0x%x at %d in file %s",
1936 			    error, __LINE__, __FILE__);
1937 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1938 			xpt_done(ccb);
1939 			ISPLOCK_2_CAMLOCK(isp);
1940 		}
1941 		break;
1942 
1943 #ifdef	ISP_TARGET_MODE
1944 	case XPT_EN_LUN:		/* Enable LUN as a target */
1945 	{
1946 		int iok;
1947 		CAMLOCK_2_ISPLOCK(isp);
1948 		iok = isp->isp_osinfo.intsok;
1949 		isp->isp_osinfo.intsok = 0;
1950 		isp_en_lun(isp, ccb);
1951 		isp->isp_osinfo.intsok = iok;
1952 		ISPLOCK_2_CAMLOCK(isp);
1953 		xpt_done(ccb);
1954 		break;
1955 	}
1956 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1957 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1958 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1959 	{
1960 		tstate_t *tptr =
1961 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
1962 		if (tptr == NULL) {
1963 			ccb->ccb_h.status = CAM_LUN_INVALID;
1964 			xpt_done(ccb);
1965 			break;
1966 		}
1967 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1968 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1969 		CAMLOCK_2_ISPLOCK(isp);
1970 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1971 			SLIST_INSERT_HEAD(&tptr->atios,
1972 			    &ccb->ccb_h, sim_links.sle);
1973 		} else {
1974 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1975 			    sim_links.sle);
1976 		}
1977 		rls_lun_statep(isp, tptr);
1978 		ccb->ccb_h.status = CAM_REQ_INPROG;
1979 		ISPLOCK_2_CAMLOCK(isp);
1980 		break;
1981 	}
1982 	case XPT_CONT_TARGET_IO:
1983 	{
1984 		CAMLOCK_2_ISPLOCK(isp);
1985 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1986 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1987 			if (isp->isp_osinfo.simqfrozen == 0) {
1988 				xpt_freeze_simq(sim, 1);
1989 				xpt_print_path(ccb->ccb_h.path);
1990 				isp_prt(isp, ISP_LOGINFO,
1991 				    "XPT_CONT_TARGET_IO freeze simq");
1992 			}
1993 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1994 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1995 			ISPLOCK_2_CAMLOCK(isp);
1996 			xpt_done(ccb);
1997 		} else {
1998 			ISPLOCK_2_CAMLOCK(isp);
1999 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2000 		}
2001 		break;
2002 	}
2003 #endif
2004 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2005 
2006 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2007 		tgt = ccb->ccb_h.target_id;
2008 		tgt |= (bus << 16);
2009 
2010 		CAMLOCK_2_ISPLOCK(isp);
2011 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2012 		ISPLOCK_2_CAMLOCK(isp);
2013 		if (error) {
2014 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2015 		} else {
2016 			ccb->ccb_h.status = CAM_REQ_CMP;
2017 		}
2018 		xpt_done(ccb);
2019 		break;
2020 	case XPT_ABORT:			/* Abort the specified CCB */
2021 	{
2022 		union ccb *accb = ccb->cab.abort_ccb;
2023 		CAMLOCK_2_ISPLOCK(isp);
2024 		switch (accb->ccb_h.func_code) {
2025 #ifdef	ISP_TARGET_MODE
2026 		case XPT_ACCEPT_TARGET_IO:
2027 		case XPT_IMMED_NOTIFY:
2028         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2029 			break;
2030 		case XPT_CONT_TARGET_IO:
2031 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2032 			ccb->ccb_h.status = CAM_UA_ABORT;
2033 			break;
2034 #endif
2035 		case XPT_SCSI_IO:
2036 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2037 			if (error) {
2038 				ccb->ccb_h.status = CAM_UA_ABORT;
2039 			} else {
2040 				ccb->ccb_h.status = CAM_REQ_CMP;
2041 			}
2042 			break;
2043 		default:
2044 			ccb->ccb_h.status = CAM_REQ_INVALID;
2045 			break;
2046 		}
2047 		ISPLOCK_2_CAMLOCK(isp);
2048 		xpt_done(ccb);
2049 		break;
2050 	}
2051 #ifdef	CAM_NEW_TRAN_CODE
2052 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2053 #else
2054 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2055 #endif
2056 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2057 		cts = &ccb->cts;
2058 		if (!IS_CURRENT_SETTINGS(cts)) {
2059 			ccb->ccb_h.status = CAM_REQ_INVALID;
2060 			xpt_done(ccb);
2061 			break;
2062 		}
2063 		tgt = cts->ccb_h.target_id;
2064 		CAMLOCK_2_ISPLOCK(isp);
2065 		if (IS_SCSI(isp)) {
2066 #ifndef	CAM_NEW_TRAN_CODE
2067 			sdparam *sdp = isp->isp_param;
2068 			u_int16_t *dptr;
2069 
2070 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2071 
2072 			sdp += bus;
2073 			/*
2074 			 * We always update (internally) from goal_flags
2075 			 * so any request to change settings just gets
2076 			 * vectored to that location.
2077 			 */
2078 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2079 
2080 			/*
2081 			 * Note that these operations affect the
2082 			 * the goal flags (goal_flags)- not
2083 			 * the current state flags. Then we mark
2084 			 * things so that the next operation to
2085 			 * this HBA will cause the update to occur.
2086 			 */
2087 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2088 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2089 					*dptr |= DPARM_DISC;
2090 				} else {
2091 					*dptr &= ~DPARM_DISC;
2092 				}
2093 			}
2094 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2095 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2096 					*dptr |= DPARM_TQING;
2097 				} else {
2098 					*dptr &= ~DPARM_TQING;
2099 				}
2100 			}
2101 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2102 				switch (cts->bus_width) {
2103 				case MSG_EXT_WDTR_BUS_16_BIT:
2104 					*dptr |= DPARM_WIDE;
2105 					break;
2106 				default:
2107 					*dptr &= ~DPARM_WIDE;
2108 				}
2109 			}
2110 			/*
2111 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2112 			 * of nonzero will cause us to go to the
2113 			 * selected (from NVRAM) maximum value for
2114 			 * this device. At a later point, we'll
2115 			 * allow finer control.
2116 			 */
2117 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2118 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2119 			    (cts->sync_offset > 0)) {
2120 				*dptr |= DPARM_SYNC;
2121 			} else {
2122 				*dptr &= ~DPARM_SYNC;
2123 			}
2124 			*dptr |= DPARM_SAFE_DFLT;
2125 #else
2126 			struct ccb_trans_settings_scsi *scsi =
2127 			    &cts->proto_specific.scsi;
2128 			struct ccb_trans_settings_spi *spi =
2129 			    &cts->xport_specific.spi;
2130 			sdparam *sdp = isp->isp_param;
2131 			u_int16_t *dptr;
2132 
2133 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2134 			sdp += bus;
2135 			/*
2136 			 * We always update (internally) from goal_flags
2137 			 * so any request to change settings just gets
2138 			 * vectored to that location.
2139 			 */
2140 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2141 
2142 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2143 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2144 					*dptr |= DPARM_DISC;
2145 				else
2146 					*dptr &= ~DPARM_DISC;
2147 			}
2148 
2149 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2150 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2151 					*dptr |= DPARM_TQING;
2152 				else
2153 					*dptr &= ~DPARM_TQING;
2154 			}
2155 
2156 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2157 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2158 					*dptr |= DPARM_WIDE;
2159 				else
2160 					*dptr &= ~DPARM_WIDE;
2161 			}
2162 
2163 			/*
2164 			 * XXX: FIX ME
2165 			 */
2166 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2167 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2168 			    (spi->sync_period && spi->sync_offset)) {
2169 				*dptr |= DPARM_SYNC;
2170 				/*
2171 				 * XXX: CHECK FOR LEGALITY
2172 				 */
2173 				sdp->isp_devparam[tgt].goal_period =
2174 				    spi->sync_period;
2175 				sdp->isp_devparam[tgt].goal_offset =
2176 				    spi->sync_offset;
2177 			} else {
2178 				*dptr &= ~DPARM_SYNC;
2179 			}
2180 #endif
2181 			isp_prt(isp, ISP_LOGDEBUG0,
2182 			    "SET bus %d targ %d to flags %x off %x per %x",
2183 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2184 			    sdp->isp_devparam[tgt].goal_offset,
2185 			    sdp->isp_devparam[tgt].goal_period);
2186 			sdp->isp_devparam[tgt].dev_update = 1;
2187 			isp->isp_update |= (1 << bus);
2188 		}
2189 		ISPLOCK_2_CAMLOCK(isp);
2190 		ccb->ccb_h.status = CAM_REQ_CMP;
2191 		xpt_done(ccb);
2192 		break;
2193 	case XPT_GET_TRAN_SETTINGS:
2194 		cts = &ccb->cts;
2195 		tgt = cts->ccb_h.target_id;
2196 		CAMLOCK_2_ISPLOCK(isp);
2197 		if (IS_FC(isp)) {
2198 #ifndef	CAM_NEW_TRAN_CODE
2199 			/*
2200 			 * a lot of normal SCSI things don't make sense.
2201 			 */
2202 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2203 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2204 			/*
2205 			 * How do you measure the width of a high
2206 			 * speed serial bus? Well, in bytes.
2207 			 *
2208 			 * Offset and period make no sense, though, so we set
2209 			 * (above) a 'base' transfer speed to be gigabit.
2210 			 */
2211 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2212 #else
2213 			fcparam *fcp = isp->isp_param;
2214 			struct ccb_trans_settings_fc *fc =
2215 			    &cts->xport_specific.fc;
2216 
2217 			cts->protocol = PROTO_SCSI;
2218 			cts->protocol_version = SCSI_REV_2;
2219 			cts->transport = XPORT_FC;
2220 			cts->transport_version = 0;
2221 
2222 			fc->valid = CTS_FC_VALID_SPEED;
2223 			if (fcp->isp_gbspeed == 2)
2224 				fc->bitrate = 200000;
2225 			else
2226 				fc->bitrate = 100000;
2227 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2228 				struct lportdb *lp = &fcp->portdb[tgt];
2229 				fc->wwnn = lp->node_wwn;
2230 				fc->wwpn = lp->port_wwn;
2231 				fc->port = lp->portid;
2232 				fc->valid |= CTS_FC_VALID_WWNN |
2233 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2234 			}
2235 #endif
2236 		} else {
2237 #ifdef	CAM_NEW_TRAN_CODE
2238 			struct ccb_trans_settings_scsi *scsi =
2239 			    &cts->proto_specific.scsi;
2240 			struct ccb_trans_settings_spi *spi =
2241 			    &cts->xport_specific.spi;
2242 #endif
2243 			sdparam *sdp = isp->isp_param;
2244 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2245 			u_int16_t dval, pval, oval;
2246 
2247 			sdp += bus;
2248 
2249 			if (IS_CURRENT_SETTINGS(cts)) {
2250 				sdp->isp_devparam[tgt].dev_refresh = 1;
2251 				isp->isp_update |= (1 << bus);
2252 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2253 				    NULL);
2254 				dval = sdp->isp_devparam[tgt].actv_flags;
2255 				oval = sdp->isp_devparam[tgt].actv_offset;
2256 				pval = sdp->isp_devparam[tgt].actv_period;
2257 			} else {
2258 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2259 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2260 				pval = sdp->isp_devparam[tgt].nvrm_period;
2261 			}
2262 
2263 #ifndef	CAM_NEW_TRAN_CODE
2264 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2265 
2266 			if (dval & DPARM_DISC) {
2267 				cts->flags |= CCB_TRANS_DISC_ENB;
2268 			}
2269 			if (dval & DPARM_TQING) {
2270 				cts->flags |= CCB_TRANS_TAG_ENB;
2271 			}
2272 			if (dval & DPARM_WIDE) {
2273 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2274 			} else {
2275 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2276 			}
2277 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2278 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2279 
2280 			if ((dval & DPARM_SYNC) && oval != 0) {
2281 				cts->sync_period = pval;
2282 				cts->sync_offset = oval;
2283 				cts->valid |=
2284 				    CCB_TRANS_SYNC_RATE_VALID |
2285 				    CCB_TRANS_SYNC_OFFSET_VALID;
2286 			}
2287 #else
2288 			cts->protocol = PROTO_SCSI;
2289 			cts->protocol_version = SCSI_REV_2;
2290 			cts->transport = XPORT_SPI;
2291 			cts->transport_version = 2;
2292 
2293 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2294 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2295 			if (dval & DPARM_DISC) {
2296 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2297 			}
2298 			if (dval & DPARM_TQING) {
2299 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2300 			}
2301 			if ((dval & DPARM_SYNC) && oval && pval) {
2302 				spi->sync_offset = oval;
2303 				spi->sync_period = pval;
2304 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2305 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2306 			}
2307 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2308 			if (dval & DPARM_WIDE) {
2309 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2310 			} else {
2311 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2312 			}
2313 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2314 				scsi->valid = CTS_SCSI_VALID_TQ;
2315 				spi->valid |= CTS_SPI_VALID_DISC;
2316 			} else {
2317 				scsi->valid = 0;
2318 			}
2319 #endif
2320 			isp_prt(isp, ISP_LOGDEBUG0,
2321 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2322 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2323 			    bus, tgt, dval, oval, pval);
2324 		}
2325 		ISPLOCK_2_CAMLOCK(isp);
2326 		ccb->ccb_h.status = CAM_REQ_CMP;
2327 		xpt_done(ccb);
2328 		break;
2329 
2330 	case XPT_CALC_GEOMETRY:
2331 	{
2332 		struct ccb_calc_geometry *ccg;
2333 		u_int32_t secs_per_cylinder;
2334 		u_int32_t size_mb;
2335 
2336 		ccg = &ccb->ccg;
2337 		if (ccg->block_size == 0) {
2338 			isp_prt(isp, ISP_LOGERR,
2339 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2340 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2341 			ccb->ccb_h.status = CAM_REQ_INVALID;
2342 			xpt_done(ccb);
2343 			break;
2344 		}
2345 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2346 		if (size_mb > 1024) {
2347 			ccg->heads = 255;
2348 			ccg->secs_per_track = 63;
2349 		} else {
2350 			ccg->heads = 64;
2351 			ccg->secs_per_track = 32;
2352 		}
2353 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2354 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2355 		ccb->ccb_h.status = CAM_REQ_CMP;
2356 		xpt_done(ccb);
2357 		break;
2358 	}
2359 	case XPT_RESET_BUS:		/* Reset the specified bus */
2360 		bus = cam_sim_bus(sim);
2361 		CAMLOCK_2_ISPLOCK(isp);
2362 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2363 		ISPLOCK_2_CAMLOCK(isp);
2364 		if (error)
2365 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2366 		else {
2367 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2368 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2369 			else if (isp->isp_path != NULL)
2370 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2371 			ccb->ccb_h.status = CAM_REQ_CMP;
2372 		}
2373 		xpt_done(ccb);
2374 		break;
2375 
2376 	case XPT_TERM_IO:		/* Terminate the I/O process */
2377 		ccb->ccb_h.status = CAM_REQ_INVALID;
2378 		xpt_done(ccb);
2379 		break;
2380 
2381 	case XPT_PATH_INQ:		/* Path routing inquiry */
2382 	{
2383 		struct ccb_pathinq *cpi = &ccb->cpi;
2384 
2385 		cpi->version_num = 1;
2386 #ifdef	ISP_TARGET_MODE
2387 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2388 #else
2389 		cpi->target_sprt = 0;
2390 #endif
2391 		cpi->hba_eng_cnt = 0;
2392 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2393 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2394 		cpi->bus_id = cam_sim_bus(sim);
2395 		if (IS_FC(isp)) {
2396 			cpi->hba_misc = PIM_NOBUSRESET;
2397 			/*
2398 			 * Because our loop ID can shift from time to time,
2399 			 * make our initiator ID out of range of our bus.
2400 			 */
2401 			cpi->initiator_id = cpi->max_target + 1;
2402 
2403 			/*
2404 			 * Set base transfer capabilities for Fibre Channel.
2405 			 * Technically not correct because we don't know
2406 			 * what media we're running on top of- but we'll
2407 			 * look good if we always say 100MB/s.
2408 			 */
2409 			if (FCPARAM(isp)->isp_gbspeed == 2)
2410 				cpi->base_transfer_speed = 200000;
2411 			else
2412 				cpi->base_transfer_speed = 100000;
2413 			cpi->hba_inquiry = PI_TAG_ABLE;
2414 #ifdef	CAM_NEW_TRAN_CODE
2415 			cpi->transport = XPORT_FC;
2416 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2417 #endif
2418 		} else {
2419 			sdparam *sdp = isp->isp_param;
2420 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2421 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2422 			cpi->hba_misc = 0;
2423 			cpi->initiator_id = sdp->isp_initiator_id;
2424 			cpi->base_transfer_speed = 3300;
2425 #ifdef	CAM_NEW_TRAN_CODE
2426 			cpi->transport = XPORT_SPI;
2427 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2428 #endif
2429 		}
2430 #ifdef	CAM_NEW_TRAN_CODE
2431 		cpi->protocol = PROTO_SCSI;
2432 		cpi->protocol_version = SCSI_REV_2;
2433 #endif
2434 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2435 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2436 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2437 		cpi->unit_number = cam_sim_unit(sim);
2438 		cpi->ccb_h.status = CAM_REQ_CMP;
2439 		xpt_done(ccb);
2440 		break;
2441 	}
2442 	default:
2443 		ccb->ccb_h.status = CAM_REQ_INVALID;
2444 		xpt_done(ccb);
2445 		break;
2446 	}
2447 }
2448 
2449 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2450 void
2451 isp_done(struct ccb_scsiio *sccb)
2452 {
2453 	struct ispsoftc *isp = XS_ISP(sccb);
2454 
2455 	if (XS_NOERR(sccb))
2456 		XS_SETERR(sccb, CAM_REQ_CMP);
2457 
2458 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2459 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2460 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2461 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2462 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2463 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2464 		} else {
2465 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2466 		}
2467 	}
2468 
2469 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2470 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2471 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2472 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2473 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2474 			if (sccb->scsi_status != SCSI_STATUS_OK)
2475 				isp_prt(isp, ISP_LOGDEBUG2,
2476 				    "freeze devq %d.%d %x %x",
2477 				    sccb->ccb_h.target_id,
2478 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
2479 				    sccb->scsi_status);
2480 		}
2481 	}
2482 
2483 	/*
2484 	 * If we were frozen waiting resources, clear that we were frozen
2485 	 * waiting for resources. If we are no longer frozen, and the devq
2486 	 * isn't frozen, mark the completing CCB to have the XPT layer
2487 	 * release the simq.
2488 	 */
2489 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
2490 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
2491 		if (isp->isp_osinfo.simqfrozen == 0) {
2492 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2493 				isp_prt(isp, ISP_LOGDEBUG2,
2494 				    "isp_done->relsimq");
2495 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2496 			} else {
2497 				isp_prt(isp, ISP_LOGDEBUG2,
2498 				    "isp_done->devq frozen");
2499 			}
2500 		} else {
2501 			isp_prt(isp, ISP_LOGDEBUG2,
2502 			    "isp_done -> simqfrozen = %x",
2503 			    isp->isp_osinfo.simqfrozen);
2504 		}
2505 	}
2506 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2507 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2508 		xpt_print_path(sccb->ccb_h.path);
2509 		isp_prt(isp, ISP_LOGINFO,
2510 		    "cam completion status 0x%x", sccb->ccb_h.status);
2511 	}
2512 
2513 	XS_CMD_S_DONE(sccb);
2514 	if (XS_CMD_WDOG_P(sccb) == 0) {
2515 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2516 		if (XS_CMD_GRACE_P(sccb)) {
2517 			isp_prt(isp, ISP_LOGDEBUG2,
2518 			    "finished command on borrowed time");
2519 		}
2520 		XS_CMD_S_CLEAR(sccb);
2521 		ISPLOCK_2_CAMLOCK(isp);
2522 		xpt_done((union ccb *) sccb);
2523 		CAMLOCK_2_ISPLOCK(isp);
2524 	}
2525 }
2526 
2527 int
2528 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2529 {
2530 	int bus, rv = 0;
2531 	switch (cmd) {
2532 	case ISPASYNC_NEW_TGT_PARAMS:
2533 	{
2534 #ifdef	CAM_NEW_TRAN_CODE
2535 		struct ccb_trans_settings_scsi *scsi;
2536 		struct ccb_trans_settings_spi *spi;
2537 #endif
2538 		int flags, tgt;
2539 		sdparam *sdp = isp->isp_param;
2540 		struct ccb_trans_settings cts;
2541 		struct cam_path *tmppath;
2542 
2543 		bzero(&cts, sizeof (struct ccb_trans_settings));
2544 
2545 		tgt = *((int *)arg);
2546 		bus = (tgt >> 16) & 0xffff;
2547 		tgt &= 0xffff;
2548 		sdp += bus;
2549 		ISPLOCK_2_CAMLOCK(isp);
2550 		if (xpt_create_path(&tmppath, NULL,
2551 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2552 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2553 			CAMLOCK_2_ISPLOCK(isp);
2554 			isp_prt(isp, ISP_LOGWARN,
2555 			    "isp_async cannot make temp path for %d.%d",
2556 			    tgt, bus);
2557 			rv = -1;
2558 			break;
2559 		}
2560 		CAMLOCK_2_ISPLOCK(isp);
2561 		flags = sdp->isp_devparam[tgt].actv_flags;
2562 #ifdef	CAM_NEW_TRAN_CODE
2563 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2564 		cts.protocol = PROTO_SCSI;
2565 		cts.transport = XPORT_SPI;
2566 
2567 		scsi = &cts.proto_specific.scsi;
2568 		spi = &cts.xport_specific.spi;
2569 
2570 		if (flags & DPARM_TQING) {
2571 			scsi->valid |= CTS_SCSI_VALID_TQ;
2572 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2573 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2574 		}
2575 
2576 		if (flags & DPARM_DISC) {
2577 			spi->valid |= CTS_SPI_VALID_DISC;
2578 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2579 		}
2580 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2581 		if (flags & DPARM_WIDE) {
2582 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2583 		} else {
2584 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2585 		}
2586 		if (flags & DPARM_SYNC) {
2587 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2588 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2589 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2590 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2591 		}
2592 #else
2593 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2594 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2595 		if (flags & DPARM_DISC) {
2596 			cts.flags |= CCB_TRANS_DISC_ENB;
2597 		}
2598 		if (flags & DPARM_TQING) {
2599 			cts.flags |= CCB_TRANS_TAG_ENB;
2600 		}
2601 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2602 		cts.bus_width = (flags & DPARM_WIDE)?
2603 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2604 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2605 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2606 		if (flags & DPARM_SYNC) {
2607 			cts.valid |=
2608 			    CCB_TRANS_SYNC_RATE_VALID |
2609 			    CCB_TRANS_SYNC_OFFSET_VALID;
2610 		}
2611 #endif
2612 		isp_prt(isp, ISP_LOGDEBUG2,
2613 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2614 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2615 		    sdp->isp_devparam[tgt].actv_offset, flags);
2616 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2617 		ISPLOCK_2_CAMLOCK(isp);
2618 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2619 		xpt_free_path(tmppath);
2620 		CAMLOCK_2_ISPLOCK(isp);
2621 		break;
2622 	}
2623 	case ISPASYNC_BUS_RESET:
2624 		bus = *((int *)arg);
2625 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2626 		    bus);
2627 		if (bus > 0 && isp->isp_path2) {
2628 			ISPLOCK_2_CAMLOCK(isp);
2629 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2630 			CAMLOCK_2_ISPLOCK(isp);
2631 		} else if (isp->isp_path) {
2632 			ISPLOCK_2_CAMLOCK(isp);
2633 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2634 			CAMLOCK_2_ISPLOCK(isp);
2635 		}
2636 		break;
2637 	case ISPASYNC_LIP:
2638 		if (isp->isp_path) {
2639 			if (isp->isp_osinfo.simqfrozen == 0) {
2640 				isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq");
2641 				ISPLOCK_2_CAMLOCK(isp);
2642 				xpt_freeze_simq(isp->isp_sim, 1);
2643 				CAMLOCK_2_ISPLOCK(isp);
2644 			}
2645 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2646 		}
2647 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2648 		break;
2649 	case ISPASYNC_LOOP_RESET:
2650 		if (isp->isp_path) {
2651 			if (isp->isp_osinfo.simqfrozen == 0) {
2652 				isp_prt(isp, ISP_LOGDEBUG0,
2653 				    "Loop Reset freeze simq");
2654 				ISPLOCK_2_CAMLOCK(isp);
2655 				xpt_freeze_simq(isp->isp_sim, 1);
2656 				CAMLOCK_2_ISPLOCK(isp);
2657 			}
2658 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2659 		}
2660 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2661 		break;
2662 	case ISPASYNC_LOOP_DOWN:
2663 		if (isp->isp_path) {
2664 			if (isp->isp_osinfo.simqfrozen == 0) {
2665 				isp_prt(isp, ISP_LOGDEBUG0,
2666 				    "loop down freeze simq");
2667 				ISPLOCK_2_CAMLOCK(isp);
2668 				xpt_freeze_simq(isp->isp_sim, 1);
2669 				CAMLOCK_2_ISPLOCK(isp);
2670 			}
2671 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2672 		}
2673 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2674 		break;
2675 	case ISPASYNC_LOOP_UP:
2676 		/*
2677 		 * Now we just note that Loop has come up. We don't
2678 		 * actually do anything because we're waiting for a
2679 		 * Change Notify before activating the FC cleanup
2680 		 * thread to look at the state of the loop again.
2681 		 */
2682 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2683 		break;
2684 	case ISPASYNC_PROMENADE:
2685 	{
2686 		struct cam_path *tmppath;
2687 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2688 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2689 		static const char *roles[4] = {
2690 		    "(none)", "Target", "Initiator", "Target/Initiator"
2691 		};
2692 		fcparam *fcp = isp->isp_param;
2693 		int tgt = *((int *) arg);
2694 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2695 		struct lportdb *lp = &fcp->portdb[tgt];
2696 
2697 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2698 		    roles[lp->roles & 0x3],
2699 		    (lp->valid)? "Arrived" : "Departed",
2700 		    (u_int32_t) (lp->port_wwn >> 32),
2701 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2702 		    (u_int32_t) (lp->node_wwn >> 32),
2703 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2704 
2705 		ISPLOCK_2_CAMLOCK(isp);
2706 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2707 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2708 			CAMLOCK_2_ISPLOCK(isp);
2709                         break;
2710                 }
2711 		/*
2712 		 * Policy: only announce targets.
2713 		 */
2714 		if (lp->roles & is_tgt_mask) {
2715 			if (lp->valid) {
2716 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2717 			} else {
2718 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2719 			}
2720 		}
2721 		xpt_free_path(tmppath);
2722 		CAMLOCK_2_ISPLOCK(isp);
2723 		break;
2724 	}
2725 	case ISPASYNC_CHANGE_NOTIFY:
2726 		if (arg == ISPASYNC_CHANGE_PDB) {
2727 			isp_prt(isp, ISP_LOGINFO,
2728 			    "Port Database Changed");
2729 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2730 			isp_prt(isp, ISP_LOGINFO,
2731 			    "Name Server Database Changed");
2732 		}
2733 		cv_signal(&isp->isp_osinfo.kthread_cv);
2734 		break;
2735 	case ISPASYNC_FABRIC_DEV:
2736 	{
2737 		int target, lrange;
2738 		struct lportdb *lp = NULL;
2739 		char *pt;
2740 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2741 		u_int32_t portid;
2742 		u_int64_t wwpn, wwnn;
2743 		fcparam *fcp = isp->isp_param;
2744 
2745 		portid =
2746 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2747 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2748 		    (((u_int32_t) resp->snscb_port_id[2]));
2749 
2750 		wwpn =
2751 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2752 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2753 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2754 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2755 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2756 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2757 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2758 		    (((u_int64_t)resp->snscb_portname[7]));
2759 
2760 		wwnn =
2761 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2762 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2763 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2764 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2765 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2766 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2767 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2768 		    (((u_int64_t)resp->snscb_nodename[7]));
2769 		if (portid == 0 || wwpn == 0) {
2770 			break;
2771 		}
2772 
2773 		switch (resp->snscb_port_type) {
2774 		case 1:
2775 			pt = "   N_Port";
2776 			break;
2777 		case 2:
2778 			pt = "  NL_Port";
2779 			break;
2780 		case 3:
2781 			pt = "F/NL_Port";
2782 			break;
2783 		case 0x7f:
2784 			pt = "  Nx_Port";
2785 			break;
2786 		case 0x81:
2787 			pt = "  F_port";
2788 			break;
2789 		case 0x82:
2790 			pt = "  FL_Port";
2791 			break;
2792 		case 0x84:
2793 			pt = "   E_port";
2794 			break;
2795 		default:
2796 			pt = "?";
2797 			break;
2798 		}
2799 		isp_prt(isp, ISP_LOGINFO,
2800 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2801 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2802 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2803 		/*
2804 		 * We're only interested in SCSI_FCP types (for now)
2805 		 */
2806 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
2807 			break;
2808 		}
2809 		if (fcp->isp_topo != TOPO_F_PORT)
2810 			lrange = FC_SNS_ID+1;
2811 		else
2812 			lrange = 0;
2813 		/*
2814 		 * Is it already in our list?
2815 		 */
2816 		for (target = lrange; target < MAX_FC_TARG; target++) {
2817 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2818 				continue;
2819 			}
2820 			lp = &fcp->portdb[target];
2821 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
2822 				lp->fabric_dev = 1;
2823 				break;
2824 			}
2825 		}
2826 		if (target < MAX_FC_TARG) {
2827 			break;
2828 		}
2829 		for (target = lrange; target < MAX_FC_TARG; target++) {
2830 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2831 				continue;
2832 			}
2833 			lp = &fcp->portdb[target];
2834 			if (lp->port_wwn == 0) {
2835 				break;
2836 			}
2837 		}
2838 		if (target == MAX_FC_TARG) {
2839 			isp_prt(isp, ISP_LOGWARN,
2840 			    "no more space for fabric devices");
2841 			break;
2842 		}
2843 		lp->node_wwn = wwnn;
2844 		lp->port_wwn = wwpn;
2845 		lp->portid = portid;
2846 		lp->fabric_dev = 1;
2847 		break;
2848 	}
2849 #ifdef	ISP_TARGET_MODE
2850 	case ISPASYNC_TARGET_MESSAGE:
2851 	{
2852 		tmd_msg_t *mp = arg;
2853 		isp_prt(isp, ISP_LOGALL,
2854 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2855 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2856 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2857 		    mp->nt_msg[0]);
2858 		break;
2859 	}
2860 	case ISPASYNC_TARGET_EVENT:
2861 	{
2862 		tmd_event_t *ep = arg;
2863 		isp_prt(isp, ISP_LOGALL,
2864 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2865 		break;
2866 	}
2867 	case ISPASYNC_TARGET_ACTION:
2868 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2869 		default:
2870 			isp_prt(isp, ISP_LOGWARN,
2871 			   "event 0x%x for unhandled target action",
2872 			    ((isphdr_t *)arg)->rqs_entry_type);
2873 			break;
2874 		case RQSTYPE_ATIO:
2875 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2876 			break;
2877 		case RQSTYPE_ATIO2:
2878 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2879 			break;
2880 		case RQSTYPE_CTIO2:
2881 		case RQSTYPE_CTIO:
2882 			rv = isp_handle_platform_ctio(isp, arg);
2883 			break;
2884 		case RQSTYPE_ENABLE_LUN:
2885 		case RQSTYPE_MODIFY_LUN:
2886 			if (IS_DUALBUS(isp)) {
2887 				bus =
2888 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
2889 			} else {
2890 				bus = 0;
2891 			}
2892 			isp_cv_signal_rqe(isp, bus,
2893 			    ((lun_entry_t *)arg)->le_status);
2894 			break;
2895 		}
2896 		break;
2897 #endif
2898 	case ISPASYNC_FW_CRASH:
2899 	{
2900 		u_int16_t mbox1, mbox6;
2901 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
2902 		if (IS_DUALBUS(isp)) {
2903 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
2904 		} else {
2905 			mbox6 = 0;
2906 		}
2907                 isp_prt(isp, ISP_LOGERR,
2908                     "Internal Firmware on bus %d Error @ RISC Address 0x%x",
2909                     mbox6, mbox1);
2910 		isp_reinit(isp);
2911 		break;
2912 	}
2913 	case ISPASYNC_UNHANDLED_RESPONSE:
2914 		break;
2915 	default:
2916 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2917 		break;
2918 	}
2919 	return (rv);
2920 }
2921 
2922 
2923 /*
2924  * Locks are held before coming here.
2925  */
2926 void
2927 isp_uninit(struct ispsoftc *isp)
2928 {
2929 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2930 	DISABLE_INTS(isp);
2931 }
2932 
2933 void
2934 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2935 {
2936 	va_list ap;
2937 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2938 		return;
2939 	}
2940 	printf("%s: ", device_get_nameunit(isp->isp_dev));
2941 	va_start(ap, fmt);
2942 	vprintf(fmt, ap);
2943 	va_end(ap);
2944 	printf("\n");
2945 }
2946