xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 09e8dea79366f1e5b3a73e8a271b26e4b6bf2e6a)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/ioccom.h>
34 #include <dev/isp/isp_ioctl.h>
35 
36 
37 static d_ioctl_t ispioctl;
38 static void isp_intr_enable(void *);
39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
40 static void isp_poll(struct cam_sim *);
41 #if	0
42 static void isp_relsim(void *);
43 #endif
44 static timeout_t isp_watchdog;
45 static void isp_kthread(void *);
46 static void isp_action(struct cam_sim *, union ccb *);
47 
48 
49 #define ISP_CDEV_MAJOR	248
50 static struct cdevsw isp_cdevsw = {
51 	/* open */	nullopen,
52 	/* close */	nullclose,
53 	/* read */	noread,
54 	/* write */	nowrite,
55 	/* ioctl */	ispioctl,
56 	/* poll */	nopoll,
57 	/* mmap */	nommap,
58 	/* strategy */	nostrategy,
59 	/* name */	"isp",
60 	/* maj */	ISP_CDEV_MAJOR,
61 	/* dump */	nodump,
62 	/* psize */	nopsize,
63 	/* flags */	D_TAPE,
64 };
65 
66 static struct ispsoftc *isplist = NULL;
67 
68 void
69 isp_attach(struct ispsoftc *isp)
70 {
71 	int primary, secondary;
72 	struct ccb_setasync csa;
73 	struct cam_devq *devq;
74 	struct cam_sim *sim;
75 	struct cam_path *path;
76 
77 	/*
78 	 * Establish (in case of 12X0) which bus is the primary.
79 	 */
80 
81 	primary = 0;
82 	secondary = 1;
83 
84 	/*
85 	 * Create the device queue for our SIM(s).
86 	 */
87 	devq = cam_simq_alloc(isp->isp_maxcmds);
88 	if (devq == NULL) {
89 		return;
90 	}
91 
92 	/*
93 	 * Construct our SIM entry.
94 	 */
95 	ISPLOCK_2_CAMLOCK(isp);
96 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
97 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
98 	if (sim == NULL) {
99 		cam_simq_free(devq);
100 		CAMLOCK_2_ISPLOCK(isp);
101 		return;
102 	}
103 	CAMLOCK_2_ISPLOCK(isp);
104 
105 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
106 	isp->isp_osinfo.ehook.ich_arg = isp;
107 	ISPLOCK_2_CAMLOCK(isp);
108 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
109 		cam_sim_free(sim, TRUE);
110 		CAMLOCK_2_ISPLOCK(isp);
111 		isp_prt(isp, ISP_LOGERR,
112 		    "could not establish interrupt enable hook");
113 		return;
114 	}
115 
116 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
117 		cam_sim_free(sim, TRUE);
118 		CAMLOCK_2_ISPLOCK(isp);
119 		return;
120 	}
121 
122 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
123 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
124 		xpt_bus_deregister(cam_sim_path(sim));
125 		cam_sim_free(sim, TRUE);
126 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
127 		CAMLOCK_2_ISPLOCK(isp);
128 		return;
129 	}
130 
131 	xpt_setup_ccb(&csa.ccb_h, path, 5);
132 	csa.ccb_h.func_code = XPT_SASYNC_CB;
133 	csa.event_enable = AC_LOST_DEVICE;
134 	csa.callback = isp_cam_async;
135 	csa.callback_arg = sim;
136 	xpt_action((union ccb *)&csa);
137 	CAMLOCK_2_ISPLOCK(isp);
138 	isp->isp_sim = sim;
139 	isp->isp_path = path;
140 	/*
141 	 * Create a kernel thread for fibre channel instances. We
142 	 * don't have dual channel FC cards.
143 	 */
144 	if (IS_FC(isp)) {
145 		ISPLOCK_2_CAMLOCK(isp);
146 		/* XXX: LOCK VIOLATION */
147 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
148 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
149 		    RFHIGHPID, "%s: fc_thrd",
150 		    device_get_nameunit(isp->isp_dev))) {
151 			xpt_bus_deregister(cam_sim_path(sim));
152 			cam_sim_free(sim, TRUE);
153 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
154 			CAMLOCK_2_ISPLOCK(isp);
155 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
156 			return;
157 		}
158 		CAMLOCK_2_ISPLOCK(isp);
159 	}
160 
161 
162 	/*
163 	 * If we have a second channel, construct SIM entry for that.
164 	 */
165 	if (IS_DUALBUS(isp)) {
166 		ISPLOCK_2_CAMLOCK(isp);
167 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
168 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
169 		if (sim == NULL) {
170 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
171 			xpt_free_path(isp->isp_path);
172 			cam_simq_free(devq);
173 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
174 			return;
175 		}
176 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
177 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
178 			xpt_free_path(isp->isp_path);
179 			cam_sim_free(sim, TRUE);
180 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
181 			CAMLOCK_2_ISPLOCK(isp);
182 			return;
183 		}
184 
185 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
186 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
187 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
188 			xpt_free_path(isp->isp_path);
189 			xpt_bus_deregister(cam_sim_path(sim));
190 			cam_sim_free(sim, TRUE);
191 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
192 			CAMLOCK_2_ISPLOCK(isp);
193 			return;
194 		}
195 
196 		xpt_setup_ccb(&csa.ccb_h, path, 5);
197 		csa.ccb_h.func_code = XPT_SASYNC_CB;
198 		csa.event_enable = AC_LOST_DEVICE;
199 		csa.callback = isp_cam_async;
200 		csa.callback_arg = sim;
201 		xpt_action((union ccb *)&csa);
202 		CAMLOCK_2_ISPLOCK(isp);
203 		isp->isp_sim2 = sim;
204 		isp->isp_path2 = path;
205 	}
206 
207 #ifdef	ISP_TARGET_MODE
208 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
209 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
210 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
211 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
212 #endif
213 	/*
214 	 * Create device nodes
215 	 */
216 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
217 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
218 
219 	if (isp->isp_role != ISP_ROLE_NONE) {
220 		isp->isp_state = ISP_RUNSTATE;
221 		ENABLE_INTS(isp);
222 	}
223 	if (isplist == NULL) {
224 		isplist = isp;
225 	} else {
226 		struct ispsoftc *tmp = isplist;
227 		while (tmp->isp_osinfo.next) {
228 			tmp = tmp->isp_osinfo.next;
229 		}
230 		tmp->isp_osinfo.next = isp;
231 	}
232 
233 }
234 
235 static int
236 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
237 {
238 	struct ispsoftc *isp;
239 	int retval = ENOTTY;
240 
241 	isp = isplist;
242 	while (isp) {
243 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
244 			break;
245 		}
246 		isp = isp->isp_osinfo.next;
247 	}
248 	if (isp == NULL)
249 		return (ENXIO);
250 
251 	switch (cmd) {
252 #ifdef	ISP_FW_CRASH_DUMP
253 	case ISP_GET_FW_CRASH_DUMP:
254 	{
255 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
256 		size_t sz;
257 
258 		retval = 0;
259 		if (IS_2200(isp))
260 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
261 		else
262 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
263 		ISP_LOCK(isp);
264 		if (ptr && *ptr) {
265 			void *uaddr = *((void **) addr);
266 			if (copyout(ptr, uaddr, sz)) {
267 				retval = EFAULT;
268 			} else {
269 				*ptr = 0;
270 			}
271 		} else {
272 			retval = ENXIO;
273 		}
274 		ISP_UNLOCK(isp);
275 		break;
276 	}
277 
278 	case ISP_FORCE_CRASH_DUMP:
279 		ISP_LOCK(isp);
280 		if ((isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN) == 0) {
281 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
282 			ISPLOCK_2_CAMLOCK(isp);
283 			xpt_freeze_simq(isp->isp_sim, 1);
284 			CAMLOCK_2_ISPLOCK(isp);
285 		}
286 		isp_fw_dump(isp);
287 		isp_reinit(isp);
288 		ISP_UNLOCK(isp);
289 		retval = 0;
290 		break;
291 #endif
292 	case ISP_SDBLEV:
293 	{
294 		int olddblev = isp->isp_dblev;
295 		isp->isp_dblev = *(int *)addr;
296 		*(int *)addr = olddblev;
297 		retval = 0;
298 		break;
299 	}
300 	case ISP_RESETHBA:
301 		ISP_LOCK(isp);
302 		isp_reinit(isp);
303 		ISP_UNLOCK(isp);
304 		retval = 0;
305 		break;
306 	case ISP_RESCAN:
307 		if (IS_FC(isp)) {
308 			ISP_LOCK(isp);
309 			if (isp_fc_runstate(isp, 5 * 1000000)) {
310 				retval = EIO;
311 			} else {
312 				retval = 0;
313 			}
314 			ISP_UNLOCK(isp);
315 		}
316 		break;
317 	case ISP_FC_LIP:
318 		if (IS_FC(isp)) {
319 			ISP_LOCK(isp);
320 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
321 				retval = EIO;
322 			} else {
323 				retval = 0;
324 			}
325 			ISP_UNLOCK(isp);
326 		}
327 		break;
328 	case ISP_FC_GETDINFO:
329 	{
330 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
331 		struct lportdb *lp;
332 
333 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
334 			retval = EINVAL;
335 			break;
336 		}
337 		ISP_LOCK(isp);
338 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
339 		if (lp->valid) {
340 			ifc->loopid = lp->loopid;
341 			ifc->portid = lp->portid;
342 			ifc->node_wwn = lp->node_wwn;
343 			ifc->port_wwn = lp->port_wwn;
344 			retval = 0;
345 		} else {
346 			retval = ENODEV;
347 		}
348 		ISP_UNLOCK(isp);
349 		break;
350 	}
351 	case ISP_GET_STATS:
352 	{
353 		isp_stats_t *sp = (isp_stats_t *) addr;
354 
355 		MEMZERO(sp, sizeof (*sp));
356 		sp->isp_stat_version = ISP_STATS_VERSION;
357 		sp->isp_type = isp->isp_type;
358 		sp->isp_revision = isp->isp_revision;
359 		ISP_LOCK(isp);
360 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
361 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
362 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
363 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
364 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
365 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
366 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
367 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
368 		ISP_UNLOCK(isp);
369 		retval = 0;
370 		break;
371 	}
372 	case ISP_CLR_STATS:
373 		ISP_LOCK(isp);
374 		isp->isp_intcnt = 0;
375 		isp->isp_intbogus = 0;
376 		isp->isp_intmboxc = 0;
377 		isp->isp_intoasync = 0;
378 		isp->isp_rsltccmplt = 0;
379 		isp->isp_fphccmplt = 0;
380 		isp->isp_rscchiwater = 0;
381 		isp->isp_fpcchiwater = 0;
382 		ISP_UNLOCK(isp);
383 		retval = 0;
384 		break;
385 	case ISP_FC_GETHINFO:
386 	{
387 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
388 		MEMZERO(hba, sizeof (*hba));
389 		ISP_LOCK(isp);
390 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
391 		hba->fc_scsi_supported = 1;
392 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
393 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
394 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
395 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
396 		ISP_UNLOCK(isp);
397 		retval = 0;
398 		break;
399 	}
400 	default:
401 		break;
402 	}
403 	return (retval);
404 }
405 
406 static void
407 isp_intr_enable(void *arg)
408 {
409 	struct ispsoftc *isp = arg;
410 	if (isp->isp_role != ISP_ROLE_NONE) {
411 		ENABLE_INTS(isp);
412 		isp->isp_osinfo.intsok = 1;
413 	}
414 	/* Release our hook so that the boot can continue. */
415 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
416 }
417 
418 /*
419  * Put the target mode functions here, because some are inlines
420  */
421 
422 #ifdef	ISP_TARGET_MODE
423 
424 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
425 static __inline int are_any_luns_enabled(struct ispsoftc *, int);
426 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
427 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
428 static __inline int isp_psema_sig_rqe(struct ispsoftc *, int);
429 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
430 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int, int);
431 static __inline void isp_vsema_rqe(struct ispsoftc *, int);
432 static __inline atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
433 static cam_status
434 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
435 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
436 static void isp_en_lun(struct ispsoftc *, union ccb *);
437 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
438 static timeout_t isp_refire_putback_atio;
439 static void isp_complete_ctio(union ccb *);
440 static void isp_target_putback_atio(union ccb *);
441 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
442 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
443 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
444 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
445 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
446 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
447 
448 static __inline int
449 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
450 {
451 	tstate_t *tptr;
452 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
453 	if (tptr == NULL) {
454 		return (0);
455 	}
456 	do {
457 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
458 			return (1);
459 		}
460 	} while ((tptr = tptr->next) != NULL);
461 	return (0);
462 }
463 
464 static __inline int
465 are_any_luns_enabled(struct ispsoftc *isp, int port)
466 {
467 	int lo, hi;
468 	if (IS_DUALBUS(isp)) {
469 		lo = (port * (LUN_HASH_SIZE >> 1));
470 		hi = lo + (LUN_HASH_SIZE >> 1);
471 	} else {
472 		lo = 0;
473 		hi = LUN_HASH_SIZE;
474 	}
475 	for (lo = 0; lo < hi; lo++) {
476 		if (isp->isp_osinfo.lun_hash[lo]) {
477 			return (1);
478 		}
479 	}
480 	return (0);
481 }
482 
483 static __inline tstate_t *
484 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
485 {
486 	tstate_t *tptr = NULL;
487 
488 	if (lun == CAM_LUN_WILDCARD) {
489 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
490 			tptr = &isp->isp_osinfo.tsdflt[bus];
491 			tptr->hold++;
492 			return (tptr);
493 		}
494 	} else {
495 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
496 		if (tptr == NULL) {
497 			return (NULL);
498 		}
499 	}
500 
501 	do {
502 		if (tptr->lun == lun && tptr->bus == bus) {
503 			tptr->hold++;
504 			return (tptr);
505 		}
506 	} while ((tptr = tptr->next) != NULL);
507 	return (tptr);
508 }
509 
510 static __inline void
511 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
512 {
513 	if (tptr->hold)
514 		tptr->hold--;
515 }
516 
517 static __inline int
518 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
519 {
520 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
521 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
522 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
523 			return (-1);
524 		}
525 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
526 	}
527 	return (0);
528 }
529 
530 static __inline int
531 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
532 {
533 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
534 		return (-1);
535 	}
536 	return (0);
537 }
538 
539 static __inline void
540 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
541 {
542 	isp->isp_osinfo.rstatus[bus] = status;
543 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
544 }
545 
546 static __inline void
547 isp_vsema_rqe(struct ispsoftc *isp, int bus)
548 {
549 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
550 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
551 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
552 	}
553 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
554 }
555 
556 static __inline atio_private_data_t *
557 isp_get_atpd(struct ispsoftc *isp, int tag)
558 {
559 	atio_private_data_t *atp;
560 	for (atp = isp->isp_osinfo.atpdp;
561 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
562 		if (atp->tag == tag)
563 			return (atp);
564 	}
565 	return (NULL);
566 }
567 
568 static cam_status
569 create_lun_state(struct ispsoftc *isp, int bus,
570     struct cam_path *path, tstate_t **rslt)
571 {
572 	cam_status status;
573 	lun_id_t lun;
574 	int hfx;
575 	tstate_t *tptr, *new;
576 
577 	lun = xpt_path_lun_id(path);
578 	if (lun < 0) {
579 		return (CAM_LUN_INVALID);
580 	}
581 	if (is_lun_enabled(isp, bus, lun)) {
582 		return (CAM_LUN_ALRDY_ENA);
583 	}
584 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
585 	if (new == NULL) {
586 		return (CAM_RESRC_UNAVAIL);
587 	}
588 
589 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
590 	    xpt_path_target_id(path), xpt_path_lun_id(path));
591 	if (status != CAM_REQ_CMP) {
592 		free(new, M_DEVBUF);
593 		return (status);
594 	}
595 	new->bus = bus;
596 	new->lun = lun;
597 	SLIST_INIT(&new->atios);
598 	SLIST_INIT(&new->inots);
599 	new->hold = 1;
600 
601 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
602 	tptr = isp->isp_osinfo.lun_hash[hfx];
603 	if (tptr == NULL) {
604 		isp->isp_osinfo.lun_hash[hfx] = new;
605 	} else {
606 		while (tptr->next)
607 			tptr = tptr->next;
608 		tptr->next = new;
609 	}
610 	*rslt = new;
611 	return (CAM_REQ_CMP);
612 }
613 
614 static __inline void
615 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
616 {
617 	int hfx;
618 	tstate_t *lw, *pw;
619 
620 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
621 	if (tptr->hold) {
622 		return;
623 	}
624 	pw = isp->isp_osinfo.lun_hash[hfx];
625 	if (pw == NULL) {
626 		return;
627 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
628 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
629 	} else {
630 		lw = pw;
631 		pw = lw->next;
632 		while (pw) {
633 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
634 				lw->next = pw->next;
635 				break;
636 			}
637 			lw = pw;
638 			pw = pw->next;
639 		}
640 		if (pw == NULL) {
641 			return;
642 		}
643 	}
644 	free(tptr, M_DEVBUF);
645 }
646 
647 /*
648  * we enter with our locks held.
649  */
650 static void
651 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
652 {
653 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
654 	struct ccb_en_lun *cel = &ccb->cel;
655 	tstate_t *tptr;
656 	u_int16_t rstat;
657 	int bus, cmd, av, wildcard;
658 	lun_id_t lun;
659 	target_id_t tgt;
660 
661 
662 	bus = XS_CHANNEL(ccb) & 0x1;
663 	tgt = ccb->ccb_h.target_id;
664 	lun = ccb->ccb_h.target_lun;
665 
666 	/*
667 	 * Do some sanity checking first.
668 	 */
669 
670 	if ((lun != CAM_LUN_WILDCARD) &&
671 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
672 		ccb->ccb_h.status = CAM_LUN_INVALID;
673 		return;
674 	}
675 
676 	if (IS_SCSI(isp)) {
677 		sdparam *sdp = isp->isp_param;
678 		sdp += bus;
679 		if (tgt != CAM_TARGET_WILDCARD &&
680 		    tgt != sdp->isp_initiator_id) {
681 			ccb->ccb_h.status = CAM_TID_INVALID;
682 			return;
683 		}
684 	} else {
685 		if (tgt != CAM_TARGET_WILDCARD &&
686 		    tgt != FCPARAM(isp)->isp_iid) {
687 			ccb->ccb_h.status = CAM_TID_INVALID;
688 			return;
689 		}
690 		/*
691 		 * This is as a good a place as any to check f/w capabilities.
692 		 */
693 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
694 			isp_prt(isp, ISP_LOGERR,
695 			    "firmware does not support target mode");
696 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
697 			return;
698 		}
699 		/*
700 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
701 		 * XXX: dorks with our already fragile enable/disable code.
702 		 */
703 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
704 			isp_prt(isp, ISP_LOGERR,
705 			    "firmware not SCCLUN capable");
706 		}
707 	}
708 
709 	if (tgt == CAM_TARGET_WILDCARD) {
710 		if (lun == CAM_LUN_WILDCARD) {
711 			wildcard = 1;
712 		} else {
713 			ccb->ccb_h.status = CAM_LUN_INVALID;
714 			return;
715 		}
716 	} else {
717 		wildcard = 0;
718 	}
719 
720 	/*
721 	 * Next check to see whether this is a target/lun wildcard action.
722 	 *
723 	 * If so, we know that we can accept commands for luns that haven't
724 	 * been enabled yet and send them upstream. Otherwise, we have to
725 	 * handle them locally (if we see them at all).
726 	 */
727 
728 	if (wildcard) {
729 		tptr = &isp->isp_osinfo.tsdflt[bus];
730 		if (cel->enable) {
731 			if (isp->isp_osinfo.tmflags[bus] &
732 			    TM_WILDCARD_ENABLED) {
733 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
734 				return;
735 			}
736 			ccb->ccb_h.status =
737 			    xpt_create_path(&tptr->owner, NULL,
738 			    xpt_path_path_id(ccb->ccb_h.path),
739 			    xpt_path_target_id(ccb->ccb_h.path),
740 			    xpt_path_lun_id(ccb->ccb_h.path));
741 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
742 				return;
743 			}
744 			SLIST_INIT(&tptr->atios);
745 			SLIST_INIT(&tptr->inots);
746 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
747 		} else {
748 			if ((isp->isp_osinfo.tmflags[bus] &
749 			    TM_WILDCARD_ENABLED) == 0) {
750 				ccb->ccb_h.status = CAM_REQ_CMP;
751 				return;
752 			}
753 			if (tptr->hold) {
754 				ccb->ccb_h.status = CAM_SCSI_BUSY;
755 				return;
756 			}
757 			xpt_free_path(tptr->owner);
758 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
759 		}
760 	}
761 
762 	/*
763 	 * Now check to see whether this bus needs to be
764 	 * enabled/disabled with respect to target mode.
765 	 */
766 	av = bus << 31;
767 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
768 		av |= ENABLE_TARGET_FLAG;
769 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
770 		if (av) {
771 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
772 			if (wildcard) {
773 				isp->isp_osinfo.tmflags[bus] &=
774 				    ~TM_WILDCARD_ENABLED;
775 				xpt_free_path(tptr->owner);
776 			}
777 			return;
778 		}
779 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
780 		isp_prt(isp, ISP_LOGINFO,
781 		    "Target Mode enabled on channel %d", bus);
782 	} else if (cel->enable == 0 &&
783 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
784 		if (are_any_luns_enabled(isp, bus)) {
785 			ccb->ccb_h.status = CAM_SCSI_BUSY;
786 			return;
787 		}
788 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
789 		if (av) {
790 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
791 			return;
792 		}
793 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
794 		isp_prt(isp, ISP_LOGINFO,
795 		    "Target Mode disabled on channel %d", bus);
796 	}
797 
798 	if (wildcard) {
799 		ccb->ccb_h.status = CAM_REQ_CMP;
800 		return;
801 	}
802 
803 	if (cel->enable) {
804 		ccb->ccb_h.status =
805 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
806 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
807 			return;
808 		}
809 	} else {
810 		tptr = get_lun_statep(isp, bus, lun);
811 		if (tptr == NULL) {
812 			ccb->ccb_h.status = CAM_LUN_INVALID;
813 			return;
814 		}
815 	}
816 
817 	if (isp_psema_sig_rqe(isp, bus)) {
818 		rls_lun_statep(isp, tptr);
819 		if (cel->enable)
820 			destroy_lun_state(isp, tptr);
821 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
822 		return;
823 	}
824 
825 	if (cel->enable) {
826 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
827 		int c, n, ulun = lun;
828 
829 		cmd = RQSTYPE_ENABLE_LUN;
830 		c = DFLT_CMND_CNT;
831 		n = DFLT_INOT_CNT;
832 		if (IS_FC(isp) && lun != 0) {
833 			cmd = RQSTYPE_MODIFY_LUN;
834 			n = 0;
835 			/*
836 		 	 * For SCC firmware, we only deal with setting
837 			 * (enabling or modifying) lun 0.
838 			 */
839 			ulun = 0;
840 		}
841 		rstat = LUN_ERR;
842 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
843 			xpt_print_path(ccb->ccb_h.path);
844 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
845 			goto out;
846 		}
847 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
848 			xpt_print_path(ccb->ccb_h.path);
849 			isp_prt(isp, ISP_LOGERR,
850 			    "wait for ENABLE/MODIFY LUN timed out");
851 			goto out;
852 		}
853 		rstat = isp->isp_osinfo.rstatus[bus];
854 		if (rstat != LUN_OK) {
855 			xpt_print_path(ccb->ccb_h.path);
856 			isp_prt(isp, ISP_LOGERR,
857 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
858 			goto out;
859 		}
860 	} else {
861 		int c, n, ulun = lun;
862 		u_int32_t seq;
863 
864 		rstat = LUN_ERR;
865 		seq = isp->isp_osinfo.rollinfo++;
866 		cmd = -RQSTYPE_MODIFY_LUN;
867 
868 		c = DFLT_CMND_CNT;
869 		n = DFLT_INOT_CNT;
870 		if (IS_FC(isp) && lun != 0) {
871 			n = 0;
872 			/*
873 		 	 * For SCC firmware, we only deal with setting
874 			 * (enabling or modifying) lun 0.
875 			 */
876 			ulun = 0;
877 		}
878 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
879 			xpt_print_path(ccb->ccb_h.path);
880 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
881 			goto out;
882 		}
883 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
884 			xpt_print_path(ccb->ccb_h.path);
885 			isp_prt(isp, ISP_LOGERR,
886 			    "wait for MODIFY LUN timed out");
887 			goto out;
888 		}
889 		rstat = isp->isp_osinfo.rstatus[bus];
890 		if (rstat != LUN_OK) {
891 			xpt_print_path(ccb->ccb_h.path);
892 			isp_prt(isp, ISP_LOGERR,
893 			    "MODIFY LUN returned 0x%x", rstat);
894 			goto out;
895 		}
896 		if (IS_FC(isp) && lun) {
897 			goto out;
898 		}
899 
900 		seq = isp->isp_osinfo.rollinfo++;
901 
902 		rstat = LUN_ERR;
903 		cmd = -RQSTYPE_ENABLE_LUN;
904 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
905 			xpt_print_path(ccb->ccb_h.path);
906 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
907 			goto out;
908 		}
909 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
910 			xpt_print_path(ccb->ccb_h.path);
911 			isp_prt(isp, ISP_LOGERR,
912 			     "wait for DISABLE LUN timed out");
913 			goto out;
914 		}
915 		rstat = isp->isp_osinfo.rstatus[bus];
916 		if (rstat != LUN_OK) {
917 			xpt_print_path(ccb->ccb_h.path);
918 			isp_prt(isp, ISP_LOGWARN,
919 			    "DISABLE LUN returned 0x%x", rstat);
920 			goto out;
921 		}
922 		if (are_any_luns_enabled(isp, bus) == 0) {
923 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
924 			if (av) {
925 				isp_prt(isp, ISP_LOGWARN,
926 				    "disable target mode on channel %d failed",
927 				    bus);
928 				goto out;
929 			}
930 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
931 			xpt_print_path(ccb->ccb_h.path);
932 			isp_prt(isp, ISP_LOGINFO,
933 			    "Target Mode disabled on channel %d", bus);
934 		}
935 	}
936 
937 out:
938 	isp_vsema_rqe(isp, bus);
939 
940 	if (rstat != LUN_OK) {
941 		xpt_print_path(ccb->ccb_h.path);
942 		isp_prt(isp, ISP_LOGWARN,
943 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
944 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
945 		rls_lun_statep(isp, tptr);
946 		if (cel->enable)
947 			destroy_lun_state(isp, tptr);
948 	} else {
949 		xpt_print_path(ccb->ccb_h.path);
950 		isp_prt(isp, ISP_LOGINFO, lfmt,
951 		    (cel->enable) ? "en" : "dis", bus);
952 		rls_lun_statep(isp, tptr);
953 		if (cel->enable == 0) {
954 			destroy_lun_state(isp, tptr);
955 		}
956 		ccb->ccb_h.status = CAM_REQ_CMP;
957 	}
958 }
959 
960 static cam_status
961 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
962 {
963 	tstate_t *tptr;
964 	struct ccb_hdr_slist *lp;
965 	struct ccb_hdr *curelm;
966 	int found;
967 	union ccb *accb = ccb->cab.abort_ccb;
968 
969 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
970 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
971 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
972 			return (CAM_PATH_INVALID);
973 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
974 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
975 			return (CAM_PATH_INVALID);
976 		}
977 	}
978 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
979 	if (tptr == NULL) {
980 		return (CAM_PATH_INVALID);
981 	}
982 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
983 		lp = &tptr->atios;
984 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
985 		lp = &tptr->inots;
986 	} else {
987 		rls_lun_statep(isp, tptr);
988 		return (CAM_UA_ABORT);
989 	}
990 	curelm = SLIST_FIRST(lp);
991 	found = 0;
992 	if (curelm == &accb->ccb_h) {
993 		found = 1;
994 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
995 	} else {
996 		while(curelm != NULL) {
997 			struct ccb_hdr *nextelm;
998 
999 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1000 			if (nextelm == &accb->ccb_h) {
1001 				found = 1;
1002 				SLIST_NEXT(curelm, sim_links.sle) =
1003 				    SLIST_NEXT(nextelm, sim_links.sle);
1004 				break;
1005 			}
1006 			curelm = nextelm;
1007 		}
1008 	}
1009 	rls_lun_statep(isp, tptr);
1010 	if (found) {
1011 		accb->ccb_h.status = CAM_REQ_ABORTED;
1012 		return (CAM_REQ_CMP);
1013 	}
1014 	return(CAM_PATH_INVALID);
1015 }
1016 
1017 static cam_status
1018 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1019 {
1020 	void *qe;
1021 	struct ccb_scsiio *cso = &ccb->csio;
1022 	u_int16_t *hp, save_handle;
1023 	u_int16_t nxti, optr;
1024 	u_int8_t local[QENTRY_LEN];
1025 
1026 
1027 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1028 		xpt_print_path(ccb->ccb_h.path);
1029 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1030 		return (CAM_RESRC_UNAVAIL);
1031 	}
1032 	bzero(local, QENTRY_LEN);
1033 
1034 	/*
1035 	 * We're either moving data or completing a command here.
1036 	 */
1037 
1038 	if (IS_FC(isp)) {
1039 		atio_private_data_t *atp;
1040 		ct2_entry_t *cto = (ct2_entry_t *) local;
1041 
1042 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1043 		cto->ct_header.rqs_entry_count = 1;
1044 		cto->ct_iid = cso->init_id;
1045 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1046 			cto->ct_lun = ccb->ccb_h.target_lun;
1047 		}
1048 
1049 		atp = isp_get_atpd(isp, cso->tag_id);
1050 		if (atp == NULL) {
1051 			isp_prt(isp, ISP_LOGERR,
1052 			    "cannot find private data adjunct for tag %x",
1053 			    cso->tag_id);
1054 			return (-1);
1055 		}
1056 
1057 		cto->ct_rxid = cso->tag_id;
1058 		if (cso->dxfer_len == 0) {
1059 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1060 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1061 				cto->ct_flags |= CT2_SENDSTATUS;
1062 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1063 				cto->ct_resid =
1064 				    atp->orig_datalen - atp->bytes_xfered;
1065 				if (cto->ct_resid < 0) {
1066 					cto->rsp.m1.ct_scsi_status |=
1067 					    CT2_DATA_OVER;
1068 				} else if (cto->ct_resid > 0) {
1069 					cto->rsp.m1.ct_scsi_status |=
1070 					    CT2_DATA_UNDER;
1071 				}
1072 			}
1073 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1074 				int m = min(cso->sense_len, MAXRESPLEN);
1075 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1076 				cto->rsp.m1.ct_senselen = m;
1077 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1078 			}
1079 		} else {
1080 			cto->ct_flags |= CT2_FLAG_MODE0;
1081 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1082 				cto->ct_flags |= CT2_DATA_IN;
1083 			} else {
1084 				cto->ct_flags |= CT2_DATA_OUT;
1085 			}
1086 			cto->ct_reloff = atp->bytes_xfered;
1087 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1088 				cto->ct_flags |= CT2_SENDSTATUS;
1089 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1090 				cto->ct_resid =
1091 				    atp->orig_datalen -
1092 				    (atp->bytes_xfered + cso->dxfer_len);
1093 				if (cto->ct_resid < 0) {
1094 					cto->rsp.m0.ct_scsi_status |=
1095 					    CT2_DATA_OVER;
1096 				} else if (cto->ct_resid > 0) {
1097 					cto->rsp.m0.ct_scsi_status |=
1098 					    CT2_DATA_UNDER;
1099 				}
1100 			} else {
1101 				atp->last_xframt = cso->dxfer_len;
1102 			}
1103 			/*
1104 			 * If we're sending data and status back together,
1105 			 * we can't also send back sense data as well.
1106 			 */
1107 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1108 		}
1109 
1110 		if (cto->ct_flags & CT2_SENDSTATUS) {
1111 			isp_prt(isp, ISP_LOGTDEBUG0,
1112 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1113 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1114 			    cso->dxfer_len, cto->ct_resid);
1115 			cto->ct_flags |= CT2_CCINCR;
1116 			atp->state = ATPD_STATE_LAST_CTIO;
1117 		} else
1118 			atp->state = ATPD_STATE_CTIO;
1119 		cto->ct_timeout = 10;
1120 		hp = &cto->ct_syshandle;
1121 	} else {
1122 		ct_entry_t *cto = (ct_entry_t *) local;
1123 
1124 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1125 		cto->ct_header.rqs_entry_count = 1;
1126 		cto->ct_iid = cso->init_id;
1127 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1128 		cto->ct_tgt = ccb->ccb_h.target_id;
1129 		cto->ct_lun = ccb->ccb_h.target_lun;
1130 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1131 		if (AT_HAS_TAG(cso->tag_id)) {
1132 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1133 			cto->ct_flags |= CT_TQAE;
1134 		}
1135 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1136 			cto->ct_flags |= CT_NODISC;
1137 		}
1138 		if (cso->dxfer_len == 0) {
1139 			cto->ct_flags |= CT_NO_DATA;
1140 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1141 			cto->ct_flags |= CT_DATA_IN;
1142 		} else {
1143 			cto->ct_flags |= CT_DATA_OUT;
1144 		}
1145 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1146 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1147 			cto->ct_scsi_status = cso->scsi_status;
1148 			cto->ct_resid = cso->resid;
1149 			isp_prt(isp, ISP_LOGTDEBUG0,
1150 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1151 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1152 			    cso->tag_id);
1153 		}
1154 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1155 		cto->ct_timeout = 10;
1156 		hp = &cto->ct_syshandle;
1157 	}
1158 
1159 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1160 		xpt_print_path(ccb->ccb_h.path);
1161 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1162 		return (CAM_RESRC_UNAVAIL);
1163 	}
1164 
1165 
1166 	/*
1167 	 * Call the dma setup routines for this entry (and any subsequent
1168 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1169 	 * new things to play with. As with isp_start's usage of DMA setup,
1170 	 * any swizzling is done in the machine dependent layer. Because
1171 	 * of this, we put the request onto the queue area first in native
1172 	 * format.
1173 	 */
1174 
1175 	save_handle = *hp;
1176 
1177 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1178 	case CMD_QUEUED:
1179 		ISP_ADD_REQUEST(isp, nxti);
1180 		return (CAM_REQ_INPROG);
1181 
1182 	case CMD_EAGAIN:
1183 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1184 		isp_destroy_handle(isp, save_handle);
1185 		return (CAM_RESRC_UNAVAIL);
1186 
1187 	default:
1188 		isp_destroy_handle(isp, save_handle);
1189 		return (XS_ERR(ccb));
1190 	}
1191 }
1192 
1193 static void
1194 isp_refire_putback_atio(void *arg)
1195 {
1196 	int s = splcam();
1197 	isp_target_putback_atio(arg);
1198 	splx(s);
1199 }
1200 
1201 static void
1202 isp_target_putback_atio(union ccb *ccb)
1203 {
1204 	struct ispsoftc *isp;
1205 	struct ccb_scsiio *cso;
1206 	u_int16_t nxti, optr;
1207 	void *qe;
1208 
1209 	isp = XS_ISP(ccb);
1210 
1211 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1212 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1213 		isp_prt(isp, ISP_LOGWARN,
1214 		    "isp_target_putback_atio: Request Queue Overflow");
1215 		return;
1216 	}
1217 	bzero(qe, QENTRY_LEN);
1218 	cso = &ccb->csio;
1219 	if (IS_FC(isp)) {
1220 		at2_entry_t local, *at = &local;
1221 		MEMZERO(at, sizeof (at2_entry_t));
1222 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1223 		at->at_header.rqs_entry_count = 1;
1224 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1225 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1226 		} else {
1227 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1228 		}
1229 		at->at_status = CT_OK;
1230 		at->at_rxid = cso->tag_id;
1231 		at->at_iid = cso->ccb_h.target_id;
1232 		isp_put_atio2(isp, at, qe);
1233 	} else {
1234 		at_entry_t local, *at = &local;
1235 		MEMZERO(at, sizeof (at_entry_t));
1236 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1237 		at->at_header.rqs_entry_count = 1;
1238 		at->at_iid = cso->init_id;
1239 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1240 		at->at_tgt = cso->ccb_h.target_id;
1241 		at->at_lun = cso->ccb_h.target_lun;
1242 		at->at_status = CT_OK;
1243 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1244 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1245 		isp_put_atio(isp, at, qe);
1246 	}
1247 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1248 	ISP_ADD_REQUEST(isp, nxti);
1249 	isp_complete_ctio(ccb);
1250 }
1251 
1252 static void
1253 isp_complete_ctio(union ccb *ccb)
1254 {
1255 	struct ispsoftc *isp = XS_ISP(ccb);
1256 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1257 		ccb->ccb_h.status |= CAM_REQ_CMP;
1258 	}
1259 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1260 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1261 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1262 		if (isp->isp_osinfo.simqfrozen == 0) {
1263 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1264 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1265 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1266 			} else {
1267 				isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen");
1268 			}
1269 		} else {
1270 			isp_prt(isp, ISP_LOGWARN,
1271 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1272 		}
1273 	}
1274 	xpt_done(ccb);
1275 }
1276 
1277 /*
1278  * Handle ATIO stuff that the generic code can't.
1279  * This means handling CDBs.
1280  */
1281 
1282 static int
1283 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1284 {
1285 	tstate_t *tptr;
1286 	int status, bus, iswildcard;
1287 	struct ccb_accept_tio *atiop;
1288 
1289 	/*
1290 	 * The firmware status (except for the QLTM_SVALID bit)
1291 	 * indicates why this ATIO was sent to us.
1292 	 *
1293 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1294 	 *
1295 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1296 	 * we're still connected on the SCSI bus.
1297 	 */
1298 	status = aep->at_status;
1299 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1300 		/*
1301 		 * Bus Phase Sequence error. We should have sense data
1302 		 * suggested by the f/w. I'm not sure quite yet what
1303 		 * to do about this for CAM.
1304 		 */
1305 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1306 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1307 		return (0);
1308 	}
1309 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1310 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1311 		    status);
1312 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1313 		return (0);
1314 	}
1315 
1316 	bus = GET_BUS_VAL(aep->at_iid);
1317 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1318 	if (tptr == NULL) {
1319 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1320 		iswildcard = 1;
1321 	} else {
1322 		iswildcard = 0;
1323 	}
1324 
1325 	if (tptr == NULL) {
1326 		/*
1327 		 * Because we can't autofeed sense data back with
1328 		 * a command for parallel SCSI, we can't give back
1329 		 * a CHECK CONDITION. We'll give back a BUSY status
1330 		 * instead. This works out okay because the only
1331 		 * time we should, in fact, get this, is in the
1332 		 * case that somebody configured us without the
1333 		 * blackhole driver, so they get what they deserve.
1334 		 */
1335 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1336 		return (0);
1337 	}
1338 
1339 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1340 	if (atiop == NULL) {
1341 		/*
1342 		 * Because we can't autofeed sense data back with
1343 		 * a command for parallel SCSI, we can't give back
1344 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1345 		 * instead. This works out okay because the only time we
1346 		 * should, in fact, get this, is in the case that we've
1347 		 * run out of ATIOS.
1348 		 */
1349 		xpt_print_path(tptr->owner);
1350 		isp_prt(isp, ISP_LOGWARN,
1351 		    "no ATIOS for lun %d from initiator %d on channel %d",
1352 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1353 		if (aep->at_flags & AT_TQAE)
1354 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1355 		else
1356 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1357 		rls_lun_statep(isp, tptr);
1358 		return (0);
1359 	}
1360 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1361 	if (iswildcard) {
1362 		atiop->ccb_h.target_id = aep->at_tgt;
1363 		atiop->ccb_h.target_lun = aep->at_lun;
1364 	}
1365 	if (aep->at_flags & AT_NODISC) {
1366 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1367 	} else {
1368 		atiop->ccb_h.flags = 0;
1369 	}
1370 
1371 	if (status & QLTM_SVALID) {
1372 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1373 		atiop->sense_len = amt;
1374 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1375 	} else {
1376 		atiop->sense_len = 0;
1377 	}
1378 
1379 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1380 	atiop->cdb_len = aep->at_cdblen;
1381 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1382 	atiop->ccb_h.status = CAM_CDB_RECVD;
1383 	/*
1384 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1385 	 * and the handle (which we have to preserve).
1386 	 */
1387 	AT_MAKE_TAGID(atiop->tag_id, aep);
1388 	if (aep->at_flags & AT_TQAE) {
1389 		atiop->tag_action = aep->at_tag_type;
1390 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1391 	}
1392 	xpt_done((union ccb*)atiop);
1393 	isp_prt(isp, ISP_LOGTDEBUG0,
1394 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1395 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1396 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1397 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1398 	    "nondisc" : "disconnecting");
1399 	rls_lun_statep(isp, tptr);
1400 	return (0);
1401 }
1402 
1403 static int
1404 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1405 {
1406 	lun_id_t lun;
1407 	tstate_t *tptr;
1408 	struct ccb_accept_tio *atiop;
1409 	atio_private_data_t *atp;
1410 
1411 	/*
1412 	 * The firmware status (except for the QLTM_SVALID bit)
1413 	 * indicates why this ATIO was sent to us.
1414 	 *
1415 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1416 	 */
1417 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1418 		isp_prt(isp, ISP_LOGWARN,
1419 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1420 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1421 		return (0);
1422 	}
1423 
1424 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1425 		lun = aep->at_scclun;
1426 	} else {
1427 		lun = aep->at_lun;
1428 	}
1429 	tptr = get_lun_statep(isp, 0, lun);
1430 	if (tptr == NULL) {
1431 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1432 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1433 	}
1434 
1435 	if (tptr == NULL) {
1436 		/*
1437 		 * What we'd like to know is whether or not we have a listener
1438 		 * upstream that really hasn't configured yet. If we do, then
1439 		 * we can give a more sensible reply here. If not, then we can
1440 		 * reject this out of hand.
1441 		 *
1442 		 * Choices for what to send were
1443 		 *
1444                  *	Not Ready, Unit Not Self-Configured Yet
1445 		 *	(0x2,0x3e,0x00)
1446 		 *
1447 		 * for the former and
1448 		 *
1449 		 *	Illegal Request, Logical Unit Not Supported
1450 		 *	(0x5,0x25,0x00)
1451 		 *
1452 		 * for the latter.
1453 		 *
1454 		 * We used to decide whether there was at least one listener
1455 		 * based upon whether the black hole driver was configured.
1456 		 * However, recent config(8) changes have made this hard to do
1457 		 * at this time.
1458 		 *
1459 		 */
1460 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1461 		return (0);
1462 	}
1463 
1464 	atp = isp_get_atpd(isp, 0);
1465 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1466 	if (atiop == NULL || atp == NULL) {
1467 		/*
1468 		 * Because we can't autofeed sense data back with
1469 		 * a command for parallel SCSI, we can't give back
1470 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1471 		 * instead. This works out okay because the only time we
1472 		 * should, in fact, get this, is in the case that we've
1473 		 * run out of ATIOS.
1474 		 */
1475 		xpt_print_path(tptr->owner);
1476 		isp_prt(isp, ISP_LOGWARN,
1477 		    "no %s for lun %d from initiator %d",
1478 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1479 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1480 		rls_lun_statep(isp, tptr);
1481 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1482 		return (0);
1483 	}
1484 	atp->state = ATPD_STATE_ATIO;
1485 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1486 	tptr->atio_count--;
1487 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1488 	    lun, tptr->atio_count);
1489 
1490 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1491 		atiop->ccb_h.target_id =
1492 		    ((fcparam *)isp->isp_param)->isp_loopid;
1493 		atiop->ccb_h.target_lun = lun;
1494 	}
1495 	/*
1496 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1497 	 */
1498 	atiop->sense_len = 0;
1499 
1500 	atiop->init_id = aep->at_iid;
1501 	atiop->cdb_len = ATIO2_CDBLEN;
1502 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1503 	atiop->ccb_h.status = CAM_CDB_RECVD;
1504 	atiop->tag_id = aep->at_rxid;
1505 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1506 	case ATIO2_TC_ATTR_SIMPLEQ:
1507 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1508 		break;
1509         case ATIO2_TC_ATTR_HEADOFQ:
1510 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1511 		break;
1512         case ATIO2_TC_ATTR_ORDERED:
1513 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1514 		break;
1515         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1516 	case ATIO2_TC_ATTR_UNTAGGED:
1517 	default:
1518 		atiop->tag_action = 0;
1519 		break;
1520 	}
1521 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1522 
1523 	atp->tag = atiop->tag_id;
1524 	atp->lun = lun;
1525 	atp->orig_datalen = aep->at_datalen;
1526 	atp->last_xframt = 0;
1527 	atp->bytes_xfered = 0;
1528 	atp->state = ATPD_STATE_CAM;
1529 	xpt_done((union ccb*)atiop);
1530 
1531 	isp_prt(isp, ISP_LOGTDEBUG0,
1532 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1533 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1534 	    lun, aep->at_taskflags, aep->at_datalen);
1535 	rls_lun_statep(isp, tptr);
1536 	return (0);
1537 }
1538 
1539 static int
1540 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1541 {
1542 	union ccb *ccb;
1543 	int sentstatus, ok, notify_cam, resid = 0;
1544 	u_int16_t tval;
1545 
1546 	/*
1547 	 * CTIO and CTIO2 are close enough....
1548 	 */
1549 
1550 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1551 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1552 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1553 
1554 	if (IS_FC(isp)) {
1555 		ct2_entry_t *ct = arg;
1556 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1557 		if (atp == NULL) {
1558 			isp_prt(isp, ISP_LOGERR,
1559 			    "cannot find adjunct for %x after I/O",
1560 			    ct->ct_rxid);
1561 			return (0);
1562 		}
1563 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1564 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1565 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1566 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1567 		}
1568 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1569 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1570 			resid = ct->ct_resid;
1571 			atp->bytes_xfered += (atp->last_xframt - resid);
1572 			atp->last_xframt = 0;
1573 		}
1574 		if (sentstatus || !ok) {
1575 			atp->tag = 0;
1576 		}
1577 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1578 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1579 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1580 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1581 		    resid, sentstatus? "FIN" : "MID");
1582 		tval = ct->ct_rxid;
1583 
1584 		/* XXX: should really come after isp_complete_ctio */
1585 		atp->state = ATPD_STATE_PDON;
1586 	} else {
1587 		ct_entry_t *ct = arg;
1588 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1589 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1590 		/*
1591 		 * We *ought* to be able to get back to the original ATIO
1592 		 * here, but for some reason this gets lost. It's just as
1593 		 * well because it's squirrelled away as part of periph
1594 		 * private data.
1595 		 *
1596 		 * We can live without it as long as we continue to use
1597 		 * the auto-replenish feature for CTIOs.
1598 		 */
1599 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1600 		if (ct->ct_status & QLTM_SVALID) {
1601 			char *sp = (char *)ct;
1602 			sp += CTIO_SENSE_OFFSET;
1603 			ccb->csio.sense_len =
1604 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1605 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1606 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1607 		}
1608 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1609 			resid = ct->ct_resid;
1610 		}
1611 		isp_prt(isp, ISP_LOGTDEBUG0,
1612 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1613 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1614 		    ct->ct_status, ct->ct_flags, resid,
1615 		    sentstatus? "FIN" : "MID");
1616 		tval = ct->ct_fwhandle;
1617 	}
1618 	ccb->csio.resid += resid;
1619 
1620 	/*
1621 	 * We're here either because intermediate data transfers are done
1622 	 * and/or the final status CTIO (which may have joined with a
1623 	 * Data Transfer) is done.
1624 	 *
1625 	 * In any case, for this platform, the upper layers figure out
1626 	 * what to do next, so all we do here is collect status and
1627 	 * pass information along. Any DMA handles have already been
1628 	 * freed.
1629 	 */
1630 	if (notify_cam == 0) {
1631 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1632 		return (0);
1633 	}
1634 
1635 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1636 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1637 
1638 	if (!ok) {
1639 		isp_target_putback_atio(ccb);
1640 	} else {
1641 		isp_complete_ctio(ccb);
1642 
1643 	}
1644 	return (0);
1645 }
1646 
1647 static int
1648 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1649 {
1650 	return (0);	/* XXXX */
1651 }
1652 
1653 static int
1654 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1655 {
1656 
1657 	switch (inp->in_status) {
1658 	case IN_PORT_LOGOUT:
1659 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1660 		   inp->in_iid);
1661 		break;
1662 	case IN_PORT_CHANGED:
1663 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1664 		   inp->in_iid);
1665 		break;
1666 	case IN_GLOBAL_LOGO:
1667 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1668 		break;
1669 	case IN_ABORT_TASK:
1670 	{
1671 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1672 		struct ccb_immed_notify *inot = NULL;
1673 
1674 		if (atp) {
1675 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1676 			if (tptr) {
1677 				inot = (struct ccb_immed_notify *)
1678 				    SLIST_FIRST(&tptr->inots);
1679 				if (inot) {
1680 					SLIST_REMOVE_HEAD(&tptr->inots,
1681 					    sim_links.sle);
1682 				}
1683 			}
1684 			isp_prt(isp, ISP_LOGWARN,
1685 			   "abort task RX_ID %x IID %d state %d",
1686 			   inp->in_seqid, inp->in_iid, atp->state);
1687 		} else {
1688 			isp_prt(isp, ISP_LOGWARN,
1689 			   "abort task RX_ID %x from iid %d, state unknown",
1690 			   inp->in_seqid, inp->in_iid);
1691 		}
1692 		if (inot) {
1693 			inot->initiator_id = inp->in_iid;
1694 			inot->sense_len = 0;
1695 			inot->message_args[0] = MSG_ABORT_TAG;
1696 			inot->message_args[1] = inp->in_seqid & 0xff;
1697 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1698 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1699 			xpt_done((union ccb *)inot);
1700 		}
1701 		break;
1702 	}
1703 	default:
1704 		break;
1705 	}
1706 	return (0);
1707 }
1708 #endif
1709 
1710 static void
1711 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1712 {
1713 	struct cam_sim *sim;
1714 	struct ispsoftc *isp;
1715 
1716 	sim = (struct cam_sim *)cbarg;
1717 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1718 	switch (code) {
1719 	case AC_LOST_DEVICE:
1720 		if (IS_SCSI(isp)) {
1721 			u_int16_t oflags, nflags;
1722 			sdparam *sdp = isp->isp_param;
1723 			int tgt;
1724 
1725 			tgt = xpt_path_target_id(path);
1726 			ISP_LOCK(isp);
1727 			sdp += cam_sim_bus(sim);
1728 			nflags = sdp->isp_devparam[tgt].nvrm_flags;
1729 #ifndef	ISP_TARGET_MODE
1730 			nflags &= DPARM_SAFE_DFLT;
1731 			if (isp->isp_loaded_fw) {
1732 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1733 			}
1734 #else
1735 			nflags = DPARM_DEFAULT;
1736 #endif
1737 			oflags = sdp->isp_devparam[tgt].goal_flags;
1738 			sdp->isp_devparam[tgt].goal_flags = nflags;
1739 			sdp->isp_devparam[tgt].dev_update = 1;
1740 			isp->isp_update |= (1 << cam_sim_bus(sim));
1741 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1742 			sdp->isp_devparam[tgt].goal_flags = oflags;
1743 			ISP_UNLOCK(isp);
1744 		}
1745 		break;
1746 	default:
1747 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1748 		break;
1749 	}
1750 }
1751 
1752 static void
1753 isp_poll(struct cam_sim *sim)
1754 {
1755 	struct ispsoftc *isp = cam_sim_softc(sim);
1756 	u_int16_t isr, sema, mbox;
1757 
1758 	ISP_LOCK(isp);
1759 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1760 		isp_intr(isp, isr, sema, mbox);
1761 	}
1762 	ISP_UNLOCK(isp);
1763 }
1764 
1765 #if	0
1766 static void
1767 isp_relsim(void *arg)
1768 {
1769 	struct ispsoftc *isp = arg;
1770 	ISP_LOCK(isp);
1771 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1772 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1773 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1774 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1775 			xpt_release_simq(isp->isp_sim, 1);
1776 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1777 		}
1778 	}
1779 	ISP_UNLOCK(isp);
1780 }
1781 #endif
1782 
1783 static void
1784 isp_watchdog(void *arg)
1785 {
1786 	XS_T *xs = arg;
1787 	struct ispsoftc *isp = XS_ISP(xs);
1788 	u_int32_t handle;
1789 
1790 	/*
1791 	 * We've decided this command is dead. Make sure we're not trying
1792 	 * to kill a command that's already dead by getting it's handle and
1793 	 * and seeing whether it's still alive.
1794 	 */
1795 	ISP_LOCK(isp);
1796 	handle = isp_find_handle(isp, xs);
1797 	if (handle) {
1798 		u_int16_t isr, sema, mbox;
1799 
1800 		if (XS_CMD_DONE_P(xs)) {
1801 			isp_prt(isp, ISP_LOGDEBUG1,
1802 			    "watchdog found done cmd (handle 0x%x)", handle);
1803 			ISP_UNLOCK(isp);
1804 			return;
1805 		}
1806 
1807 		if (XS_CMD_WDOG_P(xs)) {
1808 			isp_prt(isp, ISP_LOGDEBUG2,
1809 			    "recursive watchdog (handle 0x%x)", handle);
1810 			ISP_UNLOCK(isp);
1811 			return;
1812 		}
1813 
1814 		XS_CMD_S_WDOG(xs);
1815 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1816 			isp_intr(isp, isr, sema, mbox);
1817 		}
1818 		if (XS_CMD_DONE_P(xs)) {
1819 			isp_prt(isp, ISP_LOGDEBUG2,
1820 			    "watchdog cleanup for handle 0x%x", handle);
1821 			xpt_done((union ccb *) xs);
1822 		} else if (XS_CMD_GRACE_P(xs)) {
1823 			/*
1824 			 * Make sure the command is *really* dead before we
1825 			 * release the handle (and DMA resources) for reuse.
1826 			 */
1827 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1828 
1829 			/*
1830 			 * After this point, the comamnd is really dead.
1831 			 */
1832 			if (XS_XFRLEN(xs)) {
1833 				ISP_DMAFREE(isp, xs, handle);
1834                 	}
1835 			isp_destroy_handle(isp, handle);
1836 			xpt_print_path(xs->ccb_h.path);
1837 			isp_prt(isp, ISP_LOGWARN,
1838 			    "watchdog timeout for handle 0x%x", handle);
1839 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1840 			XS_CMD_C_WDOG(xs);
1841 			isp_done(xs);
1842 		} else {
1843 			u_int16_t nxti, optr;
1844 			ispreq_t local, *mp= &local, *qe;
1845 
1846 			XS_CMD_C_WDOG(xs);
1847 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1848 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1849 				ISP_UNLOCK(isp);
1850 				return;
1851 			}
1852 			XS_CMD_S_GRACE(xs);
1853 			MEMZERO((void *) mp, sizeof (*mp));
1854 			mp->req_header.rqs_entry_count = 1;
1855 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1856 			mp->req_modifier = SYNC_ALL;
1857 			mp->req_target = XS_CHANNEL(xs) << 7;
1858 			isp_put_request(isp, mp, qe);
1859 			ISP_ADD_REQUEST(isp, nxti);
1860 		}
1861 	} else {
1862 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1863 	}
1864 	ISP_UNLOCK(isp);
1865 }
1866 
1867 static int isp_ktmature = 0;
1868 
1869 static void
1870 isp_kthread(void *arg)
1871 {
1872 	int wasfrozen;
1873 	struct ispsoftc *isp = arg;
1874 
1875 	mtx_lock(&isp->isp_lock);
1876 	for (;;) {
1877 		isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state");
1878 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1879 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1880 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1881 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1882 				    isp_ktmature == 0) {
1883 					break;
1884 				}
1885 			}
1886 			msleep(isp_kthread, &isp->isp_lock,
1887 			    PRIBIO, "isp_fcthrd", hz);
1888 		}
1889 		/*
1890 		 * Even if we didn't get good loop state we may be
1891 		 * unfreezing the SIMQ so that we can kill off
1892 		 * commands (if we've never seen loop before, e.g.)
1893 		 */
1894 		isp_ktmature = 1;
1895 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1896 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1897 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1898 			isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq");
1899 			ISPLOCK_2_CAMLOCK(isp);
1900 			xpt_release_simq(isp->isp_sim, 1);
1901 			CAMLOCK_2_ISPLOCK(isp);
1902 		}
1903 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
1904 	}
1905 }
1906 
1907 static void
1908 isp_action(struct cam_sim *sim, union ccb *ccb)
1909 {
1910 	int bus, tgt, error;
1911 	struct ispsoftc *isp;
1912 	struct ccb_trans_settings *cts;
1913 
1914 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1915 
1916 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1917 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1918 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1919 	if (isp->isp_state != ISP_RUNSTATE &&
1920 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1921 		CAMLOCK_2_ISPLOCK(isp);
1922 		isp_init(isp);
1923 		if (isp->isp_state != ISP_INITSTATE) {
1924 			ISP_UNLOCK(isp);
1925 			/*
1926 			 * Lie. Say it was a selection timeout.
1927 			 */
1928 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1929 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1930 			xpt_done(ccb);
1931 			return;
1932 		}
1933 		isp->isp_state = ISP_RUNSTATE;
1934 		ISPLOCK_2_CAMLOCK(isp);
1935 	}
1936 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1937 
1938 
1939 	switch (ccb->ccb_h.func_code) {
1940 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1941 		/*
1942 		 * Do a couple of preliminary checks...
1943 		 */
1944 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1945 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1946 				ccb->ccb_h.status = CAM_REQ_INVALID;
1947 				xpt_done(ccb);
1948 				break;
1949 			}
1950 		}
1951 #ifdef	DIAGNOSTIC
1952 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1953 			ccb->ccb_h.status = CAM_PATH_INVALID;
1954 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1955 			ccb->ccb_h.status = CAM_PATH_INVALID;
1956 		}
1957 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1958 			isp_prt(isp, ISP_LOGERR,
1959 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
1960 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1961 			xpt_done(ccb);
1962 			break;
1963 		}
1964 #endif
1965 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1966 		CAMLOCK_2_ISPLOCK(isp);
1967 		error = isp_start((XS_T *) ccb);
1968 		switch (error) {
1969 		case CMD_QUEUED:
1970 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1971 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1972 				u_int64_t ticks = (u_int64_t) hz;
1973 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1974 					ticks = 60 * 1000 * ticks;
1975 				else
1976 					ticks = ccb->ccb_h.timeout * hz;
1977 				ticks = ((ticks + 999) / 1000) + hz + hz;
1978 				if (ticks >= 0x80000000) {
1979 					isp_prt(isp, ISP_LOGERR,
1980 					    "timeout overflow");
1981 					ticks = 0x80000000;
1982 				}
1983 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
1984 				    (caddr_t)ccb, (int)ticks);
1985 			} else {
1986 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1987 			}
1988 			ISPLOCK_2_CAMLOCK(isp);
1989 			break;
1990 		case CMD_RQLATER:
1991 			/*
1992 			 * This can only happen for Fibre Channel
1993 			 */
1994 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
1995 			if (FCPARAM(isp)->loop_seen_once == 0 && isp_ktmature) {
1996 				ISPLOCK_2_CAMLOCK(isp);
1997 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
1998 				xpt_done(ccb);
1999 				break;
2000 			}
2001 			cv_signal(&isp->isp_osinfo.kthread_cv);
2002 			if (isp->isp_osinfo.simqfrozen == 0) {
2003 				isp_prt(isp, ISP_LOGDEBUG2,
2004 				    "RQLATER freeze simq");
2005 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2006 				ISPLOCK_2_CAMLOCK(isp);
2007 				xpt_freeze_simq(sim, 1);
2008 			} else {
2009 				ISPLOCK_2_CAMLOCK(isp);
2010 			}
2011 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2012 			xpt_done(ccb);
2013 			break;
2014 		case CMD_EAGAIN:
2015 			if (isp->isp_osinfo.simqfrozen == 0) {
2016 				xpt_freeze_simq(sim, 1);
2017 				isp_prt(isp, ISP_LOGDEBUG2,
2018 				    "EAGAIN freeze simq");
2019 			}
2020 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
2021 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2022 			ISPLOCK_2_CAMLOCK(isp);
2023 			xpt_done(ccb);
2024 			break;
2025 		case CMD_COMPLETE:
2026 			isp_done((struct ccb_scsiio *) ccb);
2027 			ISPLOCK_2_CAMLOCK(isp);
2028 			break;
2029 		default:
2030 			isp_prt(isp, ISP_LOGERR,
2031 			    "What's this? 0x%x at %d in file %s",
2032 			    error, __LINE__, __FILE__);
2033 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2034 			xpt_done(ccb);
2035 			ISPLOCK_2_CAMLOCK(isp);
2036 		}
2037 		break;
2038 
2039 #ifdef	ISP_TARGET_MODE
2040 	case XPT_EN_LUN:		/* Enable LUN as a target */
2041 	{
2042 		int iok;
2043 		CAMLOCK_2_ISPLOCK(isp);
2044 		iok = isp->isp_osinfo.intsok;
2045 		isp->isp_osinfo.intsok = 0;
2046 		isp_en_lun(isp, ccb);
2047 		isp->isp_osinfo.intsok = iok;
2048 		ISPLOCK_2_CAMLOCK(isp);
2049 		xpt_done(ccb);
2050 		break;
2051 	}
2052 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2053 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2054 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2055 	{
2056 		tstate_t *tptr =
2057 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2058 		if (tptr == NULL) {
2059 			ccb->ccb_h.status = CAM_LUN_INVALID;
2060 			xpt_done(ccb);
2061 			break;
2062 		}
2063 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2064 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2065 		ccb->ccb_h.flags = 0;
2066 
2067 		CAMLOCK_2_ISPLOCK(isp);
2068 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2069 			/*
2070 			 * Note that the command itself may not be done-
2071 			 * it may not even have had the first CTIO sent.
2072 			 */
2073 			tptr->atio_count++;
2074 			isp_prt(isp, ISP_LOGTDEBUG0,
2075 			    "Put FREE ATIO2, lun %d, count now %d",
2076 			    ccb->ccb_h.target_lun, tptr->atio_count);
2077 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2078 			    sim_links.sle);
2079 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2080 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2081 			    sim_links.sle);
2082 		} else {
2083 			;
2084 		}
2085 		rls_lun_statep(isp, tptr);
2086 		ccb->ccb_h.status = CAM_REQ_INPROG;
2087 		ISPLOCK_2_CAMLOCK(isp);
2088 		break;
2089 	}
2090 	case XPT_CONT_TARGET_IO:
2091 	{
2092 		CAMLOCK_2_ISPLOCK(isp);
2093 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2094 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2095 			isp_prt(isp, ISP_LOGWARN,
2096 			    "XPT_CONT_TARGET_IO: status 0x%x",
2097 			    ccb->ccb_h.status);
2098 			if (isp->isp_osinfo.simqfrozen == 0) {
2099 				xpt_freeze_simq(sim, 1);
2100 				xpt_print_path(ccb->ccb_h.path);
2101 				isp_prt(isp, ISP_LOGINFO,
2102 				    "XPT_CONT_TARGET_IO freeze simq");
2103 			}
2104 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
2105 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2106 			ISPLOCK_2_CAMLOCK(isp);
2107 			xpt_done(ccb);
2108 		} else {
2109 			ISPLOCK_2_CAMLOCK(isp);
2110 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2111 		}
2112 		break;
2113 	}
2114 #endif
2115 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2116 
2117 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2118 		tgt = ccb->ccb_h.target_id;
2119 		tgt |= (bus << 16);
2120 
2121 		CAMLOCK_2_ISPLOCK(isp);
2122 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2123 		ISPLOCK_2_CAMLOCK(isp);
2124 		if (error) {
2125 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2126 		} else {
2127 			ccb->ccb_h.status = CAM_REQ_CMP;
2128 		}
2129 		xpt_done(ccb);
2130 		break;
2131 	case XPT_ABORT:			/* Abort the specified CCB */
2132 	{
2133 		union ccb *accb = ccb->cab.abort_ccb;
2134 		CAMLOCK_2_ISPLOCK(isp);
2135 		switch (accb->ccb_h.func_code) {
2136 #ifdef	ISP_TARGET_MODE
2137 		case XPT_ACCEPT_TARGET_IO:
2138 		case XPT_IMMED_NOTIFY:
2139         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2140 			break;
2141 		case XPT_CONT_TARGET_IO:
2142 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2143 			ccb->ccb_h.status = CAM_UA_ABORT;
2144 			break;
2145 #endif
2146 		case XPT_SCSI_IO:
2147 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2148 			if (error) {
2149 				ccb->ccb_h.status = CAM_UA_ABORT;
2150 			} else {
2151 				ccb->ccb_h.status = CAM_REQ_CMP;
2152 			}
2153 			break;
2154 		default:
2155 			ccb->ccb_h.status = CAM_REQ_INVALID;
2156 			break;
2157 		}
2158 		ISPLOCK_2_CAMLOCK(isp);
2159 		xpt_done(ccb);
2160 		break;
2161 	}
2162 #ifdef	CAM_NEW_TRAN_CODE
2163 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2164 #else
2165 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2166 #endif
2167 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2168 		cts = &ccb->cts;
2169 		if (!IS_CURRENT_SETTINGS(cts)) {
2170 			ccb->ccb_h.status = CAM_REQ_INVALID;
2171 			xpt_done(ccb);
2172 			break;
2173 		}
2174 		tgt = cts->ccb_h.target_id;
2175 		CAMLOCK_2_ISPLOCK(isp);
2176 		if (IS_SCSI(isp)) {
2177 #ifndef	CAM_NEW_TRAN_CODE
2178 			sdparam *sdp = isp->isp_param;
2179 			u_int16_t *dptr;
2180 
2181 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2182 
2183 			sdp += bus;
2184 			/*
2185 			 * We always update (internally) from goal_flags
2186 			 * so any request to change settings just gets
2187 			 * vectored to that location.
2188 			 */
2189 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2190 
2191 			/*
2192 			 * Note that these operations affect the
2193 			 * the goal flags (goal_flags)- not
2194 			 * the current state flags. Then we mark
2195 			 * things so that the next operation to
2196 			 * this HBA will cause the update to occur.
2197 			 */
2198 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2199 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2200 					*dptr |= DPARM_DISC;
2201 				} else {
2202 					*dptr &= ~DPARM_DISC;
2203 				}
2204 			}
2205 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2206 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2207 					*dptr |= DPARM_TQING;
2208 				} else {
2209 					*dptr &= ~DPARM_TQING;
2210 				}
2211 			}
2212 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2213 				switch (cts->bus_width) {
2214 				case MSG_EXT_WDTR_BUS_16_BIT:
2215 					*dptr |= DPARM_WIDE;
2216 					break;
2217 				default:
2218 					*dptr &= ~DPARM_WIDE;
2219 				}
2220 			}
2221 			/*
2222 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2223 			 * of nonzero will cause us to go to the
2224 			 * selected (from NVRAM) maximum value for
2225 			 * this device. At a later point, we'll
2226 			 * allow finer control.
2227 			 */
2228 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2229 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2230 			    (cts->sync_offset > 0)) {
2231 				*dptr |= DPARM_SYNC;
2232 			} else {
2233 				*dptr &= ~DPARM_SYNC;
2234 			}
2235 			*dptr |= DPARM_SAFE_DFLT;
2236 #else
2237 			struct ccb_trans_settings_scsi *scsi =
2238 			    &cts->proto_specific.scsi;
2239 			struct ccb_trans_settings_spi *spi =
2240 			    &cts->xport_specific.spi;
2241 			sdparam *sdp = isp->isp_param;
2242 			u_int16_t *dptr;
2243 
2244 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2245 			sdp += bus;
2246 			/*
2247 			 * We always update (internally) from goal_flags
2248 			 * so any request to change settings just gets
2249 			 * vectored to that location.
2250 			 */
2251 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2252 
2253 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2254 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2255 					*dptr |= DPARM_DISC;
2256 				else
2257 					*dptr &= ~DPARM_DISC;
2258 			}
2259 
2260 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2261 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2262 					*dptr |= DPARM_TQING;
2263 				else
2264 					*dptr &= ~DPARM_TQING;
2265 			}
2266 
2267 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2268 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2269 					*dptr |= DPARM_WIDE;
2270 				else
2271 					*dptr &= ~DPARM_WIDE;
2272 			}
2273 
2274 			/*
2275 			 * XXX: FIX ME
2276 			 */
2277 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2278 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2279 			    (spi->sync_period && spi->sync_offset)) {
2280 				*dptr |= DPARM_SYNC;
2281 				/*
2282 				 * XXX: CHECK FOR LEGALITY
2283 				 */
2284 				sdp->isp_devparam[tgt].goal_period =
2285 				    spi->sync_period;
2286 				sdp->isp_devparam[tgt].goal_offset =
2287 				    spi->sync_offset;
2288 			} else {
2289 				*dptr &= ~DPARM_SYNC;
2290 			}
2291 #endif
2292 			isp_prt(isp, ISP_LOGDEBUG0,
2293 			    "SET bus %d targ %d to flags %x off %x per %x",
2294 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2295 			    sdp->isp_devparam[tgt].goal_offset,
2296 			    sdp->isp_devparam[tgt].goal_period);
2297 			sdp->isp_devparam[tgt].dev_update = 1;
2298 			isp->isp_update |= (1 << bus);
2299 		}
2300 		ISPLOCK_2_CAMLOCK(isp);
2301 		ccb->ccb_h.status = CAM_REQ_CMP;
2302 		xpt_done(ccb);
2303 		break;
2304 	case XPT_GET_TRAN_SETTINGS:
2305 		cts = &ccb->cts;
2306 		tgt = cts->ccb_h.target_id;
2307 		CAMLOCK_2_ISPLOCK(isp);
2308 		if (IS_FC(isp)) {
2309 #ifndef	CAM_NEW_TRAN_CODE
2310 			/*
2311 			 * a lot of normal SCSI things don't make sense.
2312 			 */
2313 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2314 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2315 			/*
2316 			 * How do you measure the width of a high
2317 			 * speed serial bus? Well, in bytes.
2318 			 *
2319 			 * Offset and period make no sense, though, so we set
2320 			 * (above) a 'base' transfer speed to be gigabit.
2321 			 */
2322 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2323 #else
2324 			fcparam *fcp = isp->isp_param;
2325 			struct ccb_trans_settings_fc *fc =
2326 			    &cts->xport_specific.fc;
2327 
2328 			cts->protocol = PROTO_SCSI;
2329 			cts->protocol_version = SCSI_REV_2;
2330 			cts->transport = XPORT_FC;
2331 			cts->transport_version = 0;
2332 
2333 			fc->valid = CTS_FC_VALID_SPEED;
2334 			if (fcp->isp_gbspeed == 2)
2335 				fc->bitrate = 200000;
2336 			else
2337 				fc->bitrate = 100000;
2338 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2339 				struct lportdb *lp = &fcp->portdb[tgt];
2340 				fc->wwnn = lp->node_wwn;
2341 				fc->wwpn = lp->port_wwn;
2342 				fc->port = lp->portid;
2343 				fc->valid |= CTS_FC_VALID_WWNN |
2344 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2345 			}
2346 #endif
2347 		} else {
2348 #ifdef	CAM_NEW_TRAN_CODE
2349 			struct ccb_trans_settings_scsi *scsi =
2350 			    &cts->proto_specific.scsi;
2351 			struct ccb_trans_settings_spi *spi =
2352 			    &cts->xport_specific.spi;
2353 #endif
2354 			sdparam *sdp = isp->isp_param;
2355 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2356 			u_int16_t dval, pval, oval;
2357 
2358 			sdp += bus;
2359 
2360 			if (IS_CURRENT_SETTINGS(cts)) {
2361 				sdp->isp_devparam[tgt].dev_refresh = 1;
2362 				isp->isp_update |= (1 << bus);
2363 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2364 				    NULL);
2365 				dval = sdp->isp_devparam[tgt].actv_flags;
2366 				oval = sdp->isp_devparam[tgt].actv_offset;
2367 				pval = sdp->isp_devparam[tgt].actv_period;
2368 			} else {
2369 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2370 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2371 				pval = sdp->isp_devparam[tgt].nvrm_period;
2372 			}
2373 
2374 #ifndef	CAM_NEW_TRAN_CODE
2375 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2376 
2377 			if (dval & DPARM_DISC) {
2378 				cts->flags |= CCB_TRANS_DISC_ENB;
2379 			}
2380 			if (dval & DPARM_TQING) {
2381 				cts->flags |= CCB_TRANS_TAG_ENB;
2382 			}
2383 			if (dval & DPARM_WIDE) {
2384 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2385 			} else {
2386 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2387 			}
2388 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2389 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2390 
2391 			if ((dval & DPARM_SYNC) && oval != 0) {
2392 				cts->sync_period = pval;
2393 				cts->sync_offset = oval;
2394 				cts->valid |=
2395 				    CCB_TRANS_SYNC_RATE_VALID |
2396 				    CCB_TRANS_SYNC_OFFSET_VALID;
2397 			}
2398 #else
2399 			cts->protocol = PROTO_SCSI;
2400 			cts->protocol_version = SCSI_REV_2;
2401 			cts->transport = XPORT_SPI;
2402 			cts->transport_version = 2;
2403 
2404 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2405 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2406 			if (dval & DPARM_DISC) {
2407 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2408 			}
2409 			if (dval & DPARM_TQING) {
2410 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2411 			}
2412 			if ((dval & DPARM_SYNC) && oval && pval) {
2413 				spi->sync_offset = oval;
2414 				spi->sync_period = pval;
2415 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2416 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2417 			}
2418 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2419 			if (dval & DPARM_WIDE) {
2420 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2421 			} else {
2422 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2423 			}
2424 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2425 				scsi->valid = CTS_SCSI_VALID_TQ;
2426 				spi->valid |= CTS_SPI_VALID_DISC;
2427 			} else {
2428 				scsi->valid = 0;
2429 			}
2430 #endif
2431 			isp_prt(isp, ISP_LOGDEBUG0,
2432 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2433 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2434 			    bus, tgt, dval, oval, pval);
2435 		}
2436 		ISPLOCK_2_CAMLOCK(isp);
2437 		ccb->ccb_h.status = CAM_REQ_CMP;
2438 		xpt_done(ccb);
2439 		break;
2440 
2441 	case XPT_CALC_GEOMETRY:
2442 	{
2443 		struct ccb_calc_geometry *ccg;
2444 		u_int32_t secs_per_cylinder;
2445 		u_int32_t size_mb;
2446 
2447 		ccg = &ccb->ccg;
2448 		if (ccg->block_size == 0) {
2449 			isp_prt(isp, ISP_LOGERR,
2450 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2451 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2452 			ccb->ccb_h.status = CAM_REQ_INVALID;
2453 			xpt_done(ccb);
2454 			break;
2455 		}
2456 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2457 		if (size_mb > 1024) {
2458 			ccg->heads = 255;
2459 			ccg->secs_per_track = 63;
2460 		} else {
2461 			ccg->heads = 64;
2462 			ccg->secs_per_track = 32;
2463 		}
2464 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2465 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2466 		ccb->ccb_h.status = CAM_REQ_CMP;
2467 		xpt_done(ccb);
2468 		break;
2469 	}
2470 	case XPT_RESET_BUS:		/* Reset the specified bus */
2471 		bus = cam_sim_bus(sim);
2472 		CAMLOCK_2_ISPLOCK(isp);
2473 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2474 		ISPLOCK_2_CAMLOCK(isp);
2475 		if (error)
2476 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2477 		else {
2478 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2479 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2480 			else if (isp->isp_path != NULL)
2481 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2482 			ccb->ccb_h.status = CAM_REQ_CMP;
2483 		}
2484 		xpt_done(ccb);
2485 		break;
2486 
2487 	case XPT_TERM_IO:		/* Terminate the I/O process */
2488 		ccb->ccb_h.status = CAM_REQ_INVALID;
2489 		xpt_done(ccb);
2490 		break;
2491 
2492 	case XPT_PATH_INQ:		/* Path routing inquiry */
2493 	{
2494 		struct ccb_pathinq *cpi = &ccb->cpi;
2495 
2496 		cpi->version_num = 1;
2497 #ifdef	ISP_TARGET_MODE
2498 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2499 #else
2500 		cpi->target_sprt = 0;
2501 #endif
2502 		cpi->hba_eng_cnt = 0;
2503 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2504 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2505 		cpi->bus_id = cam_sim_bus(sim);
2506 		if (IS_FC(isp)) {
2507 			cpi->hba_misc = PIM_NOBUSRESET;
2508 			/*
2509 			 * Because our loop ID can shift from time to time,
2510 			 * make our initiator ID out of range of our bus.
2511 			 */
2512 			cpi->initiator_id = cpi->max_target + 1;
2513 
2514 			/*
2515 			 * Set base transfer capabilities for Fibre Channel.
2516 			 * Technically not correct because we don't know
2517 			 * what media we're running on top of- but we'll
2518 			 * look good if we always say 100MB/s.
2519 			 */
2520 			if (FCPARAM(isp)->isp_gbspeed == 2)
2521 				cpi->base_transfer_speed = 200000;
2522 			else
2523 				cpi->base_transfer_speed = 100000;
2524 			cpi->hba_inquiry = PI_TAG_ABLE;
2525 #ifdef	CAM_NEW_TRAN_CODE
2526 			cpi->transport = XPORT_FC;
2527 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2528 #endif
2529 		} else {
2530 			sdparam *sdp = isp->isp_param;
2531 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2532 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2533 			cpi->hba_misc = 0;
2534 			cpi->initiator_id = sdp->isp_initiator_id;
2535 			cpi->base_transfer_speed = 3300;
2536 #ifdef	CAM_NEW_TRAN_CODE
2537 			cpi->transport = XPORT_SPI;
2538 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2539 #endif
2540 		}
2541 #ifdef	CAM_NEW_TRAN_CODE
2542 		cpi->protocol = PROTO_SCSI;
2543 		cpi->protocol_version = SCSI_REV_2;
2544 #endif
2545 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2546 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2547 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2548 		cpi->unit_number = cam_sim_unit(sim);
2549 		cpi->ccb_h.status = CAM_REQ_CMP;
2550 		xpt_done(ccb);
2551 		break;
2552 	}
2553 	default:
2554 		ccb->ccb_h.status = CAM_REQ_INVALID;
2555 		xpt_done(ccb);
2556 		break;
2557 	}
2558 }
2559 
2560 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2561 void
2562 isp_done(struct ccb_scsiio *sccb)
2563 {
2564 	struct ispsoftc *isp = XS_ISP(sccb);
2565 
2566 	if (XS_NOERR(sccb))
2567 		XS_SETERR(sccb, CAM_REQ_CMP);
2568 
2569 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2570 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2571 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2572 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2573 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2574 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2575 		} else {
2576 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2577 		}
2578 	}
2579 
2580 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2581 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2582 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2583 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2584 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2585 			if (sccb->scsi_status != SCSI_STATUS_OK)
2586 				isp_prt(isp, ISP_LOGDEBUG2,
2587 				    "freeze devq %d.%d %x %x",
2588 				    sccb->ccb_h.target_id,
2589 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
2590 				    sccb->scsi_status);
2591 		}
2592 	}
2593 
2594 	/*
2595 	 * If we were frozen waiting resources, clear that we were frozen
2596 	 * waiting for resources. If we are no longer frozen, and the devq
2597 	 * isn't frozen, mark the completing CCB to have the XPT layer
2598 	 * release the simq.
2599 	 */
2600 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
2601 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
2602 		if (isp->isp_osinfo.simqfrozen == 0) {
2603 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2604 				isp_prt(isp, ISP_LOGDEBUG2,
2605 				    "isp_done->relsimq");
2606 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2607 			} else {
2608 				isp_prt(isp, ISP_LOGDEBUG2,
2609 				    "isp_done->devq frozen");
2610 			}
2611 		} else {
2612 			isp_prt(isp, ISP_LOGDEBUG2,
2613 			    "isp_done -> simqfrozen = %x",
2614 			    isp->isp_osinfo.simqfrozen);
2615 		}
2616 	}
2617 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2618 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2619 		xpt_print_path(sccb->ccb_h.path);
2620 		isp_prt(isp, ISP_LOGINFO,
2621 		    "cam completion status 0x%x", sccb->ccb_h.status);
2622 	}
2623 
2624 	XS_CMD_S_DONE(sccb);
2625 	if (XS_CMD_WDOG_P(sccb) == 0) {
2626 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2627 		if (XS_CMD_GRACE_P(sccb)) {
2628 			isp_prt(isp, ISP_LOGDEBUG2,
2629 			    "finished command on borrowed time");
2630 		}
2631 		XS_CMD_S_CLEAR(sccb);
2632 		ISPLOCK_2_CAMLOCK(isp);
2633 		xpt_done((union ccb *) sccb);
2634 		CAMLOCK_2_ISPLOCK(isp);
2635 	}
2636 }
2637 
2638 int
2639 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2640 {
2641 	int bus, rv = 0;
2642 	switch (cmd) {
2643 	case ISPASYNC_NEW_TGT_PARAMS:
2644 	{
2645 #ifdef	CAM_NEW_TRAN_CODE
2646 		struct ccb_trans_settings_scsi *scsi;
2647 		struct ccb_trans_settings_spi *spi;
2648 #endif
2649 		int flags, tgt;
2650 		sdparam *sdp = isp->isp_param;
2651 		struct ccb_trans_settings cts;
2652 		struct cam_path *tmppath;
2653 
2654 		bzero(&cts, sizeof (struct ccb_trans_settings));
2655 
2656 		tgt = *((int *)arg);
2657 		bus = (tgt >> 16) & 0xffff;
2658 		tgt &= 0xffff;
2659 		sdp += bus;
2660 		ISPLOCK_2_CAMLOCK(isp);
2661 		if (xpt_create_path(&tmppath, NULL,
2662 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2663 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2664 			CAMLOCK_2_ISPLOCK(isp);
2665 			isp_prt(isp, ISP_LOGWARN,
2666 			    "isp_async cannot make temp path for %d.%d",
2667 			    tgt, bus);
2668 			rv = -1;
2669 			break;
2670 		}
2671 		CAMLOCK_2_ISPLOCK(isp);
2672 		flags = sdp->isp_devparam[tgt].actv_flags;
2673 #ifdef	CAM_NEW_TRAN_CODE
2674 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2675 		cts.protocol = PROTO_SCSI;
2676 		cts.transport = XPORT_SPI;
2677 
2678 		scsi = &cts.proto_specific.scsi;
2679 		spi = &cts.xport_specific.spi;
2680 
2681 		if (flags & DPARM_TQING) {
2682 			scsi->valid |= CTS_SCSI_VALID_TQ;
2683 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2684 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2685 		}
2686 
2687 		if (flags & DPARM_DISC) {
2688 			spi->valid |= CTS_SPI_VALID_DISC;
2689 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2690 		}
2691 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2692 		if (flags & DPARM_WIDE) {
2693 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2694 		} else {
2695 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2696 		}
2697 		if (flags & DPARM_SYNC) {
2698 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2699 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2700 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2701 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2702 		}
2703 #else
2704 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2705 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2706 		if (flags & DPARM_DISC) {
2707 			cts.flags |= CCB_TRANS_DISC_ENB;
2708 		}
2709 		if (flags & DPARM_TQING) {
2710 			cts.flags |= CCB_TRANS_TAG_ENB;
2711 		}
2712 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2713 		cts.bus_width = (flags & DPARM_WIDE)?
2714 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2715 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2716 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2717 		if (flags & DPARM_SYNC) {
2718 			cts.valid |=
2719 			    CCB_TRANS_SYNC_RATE_VALID |
2720 			    CCB_TRANS_SYNC_OFFSET_VALID;
2721 		}
2722 #endif
2723 		isp_prt(isp, ISP_LOGDEBUG2,
2724 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2725 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2726 		    sdp->isp_devparam[tgt].actv_offset, flags);
2727 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2728 		ISPLOCK_2_CAMLOCK(isp);
2729 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2730 		xpt_free_path(tmppath);
2731 		CAMLOCK_2_ISPLOCK(isp);
2732 		break;
2733 	}
2734 	case ISPASYNC_BUS_RESET:
2735 		bus = *((int *)arg);
2736 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2737 		    bus);
2738 		if (bus > 0 && isp->isp_path2) {
2739 			ISPLOCK_2_CAMLOCK(isp);
2740 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2741 			CAMLOCK_2_ISPLOCK(isp);
2742 		} else if (isp->isp_path) {
2743 			ISPLOCK_2_CAMLOCK(isp);
2744 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2745 			CAMLOCK_2_ISPLOCK(isp);
2746 		}
2747 		break;
2748 	case ISPASYNC_LIP:
2749 		if (isp->isp_path) {
2750 			if (isp->isp_osinfo.simqfrozen == 0) {
2751 				isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq");
2752 				ISPLOCK_2_CAMLOCK(isp);
2753 				xpt_freeze_simq(isp->isp_sim, 1);
2754 				CAMLOCK_2_ISPLOCK(isp);
2755 			}
2756 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2757 		}
2758 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2759 		break;
2760 	case ISPASYNC_LOOP_RESET:
2761 		if (isp->isp_path) {
2762 			if (isp->isp_osinfo.simqfrozen == 0) {
2763 				isp_prt(isp, ISP_LOGDEBUG0,
2764 				    "Loop Reset freeze simq");
2765 				ISPLOCK_2_CAMLOCK(isp);
2766 				xpt_freeze_simq(isp->isp_sim, 1);
2767 				CAMLOCK_2_ISPLOCK(isp);
2768 			}
2769 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2770 		}
2771 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2772 		break;
2773 	case ISPASYNC_LOOP_DOWN:
2774 		if (isp->isp_path) {
2775 			if (isp->isp_osinfo.simqfrozen == 0) {
2776 				isp_prt(isp, ISP_LOGDEBUG0,
2777 				    "loop down freeze simq");
2778 				ISPLOCK_2_CAMLOCK(isp);
2779 				xpt_freeze_simq(isp->isp_sim, 1);
2780 				CAMLOCK_2_ISPLOCK(isp);
2781 			}
2782 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2783 		}
2784 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2785 		break;
2786 	case ISPASYNC_LOOP_UP:
2787 		/*
2788 		 * Now we just note that Loop has come up. We don't
2789 		 * actually do anything because we're waiting for a
2790 		 * Change Notify before activating the FC cleanup
2791 		 * thread to look at the state of the loop again.
2792 		 */
2793 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2794 		break;
2795 	case ISPASYNC_PROMENADE:
2796 	{
2797 		struct cam_path *tmppath;
2798 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2799 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2800 		static const char *roles[4] = {
2801 		    "(none)", "Target", "Initiator", "Target/Initiator"
2802 		};
2803 		fcparam *fcp = isp->isp_param;
2804 		int tgt = *((int *) arg);
2805 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2806 		struct lportdb *lp = &fcp->portdb[tgt];
2807 
2808 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2809 		    roles[lp->roles & 0x3],
2810 		    (lp->valid)? "Arrived" : "Departed",
2811 		    (u_int32_t) (lp->port_wwn >> 32),
2812 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2813 		    (u_int32_t) (lp->node_wwn >> 32),
2814 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2815 
2816 		ISPLOCK_2_CAMLOCK(isp);
2817 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2818 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2819 			CAMLOCK_2_ISPLOCK(isp);
2820                         break;
2821                 }
2822 		/*
2823 		 * Policy: only announce targets.
2824 		 */
2825 		if (lp->roles & is_tgt_mask) {
2826 			if (lp->valid) {
2827 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2828 			} else {
2829 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2830 			}
2831 		}
2832 		xpt_free_path(tmppath);
2833 		CAMLOCK_2_ISPLOCK(isp);
2834 		break;
2835 	}
2836 	case ISPASYNC_CHANGE_NOTIFY:
2837 		if (arg == ISPASYNC_CHANGE_PDB) {
2838 			isp_prt(isp, ISP_LOGINFO,
2839 			    "Port Database Changed");
2840 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2841 			isp_prt(isp, ISP_LOGINFO,
2842 			    "Name Server Database Changed");
2843 		}
2844 		cv_signal(&isp->isp_osinfo.kthread_cv);
2845 		break;
2846 	case ISPASYNC_FABRIC_DEV:
2847 	{
2848 		int target, base, lim;
2849 		fcparam *fcp = isp->isp_param;
2850 		struct lportdb *lp = NULL;
2851 		struct lportdb *clp = (struct lportdb *) arg;
2852 		char *pt;
2853 
2854 		switch (clp->port_type) {
2855 		case 1:
2856 			pt = "   N_Port";
2857 			break;
2858 		case 2:
2859 			pt = "  NL_Port";
2860 			break;
2861 		case 3:
2862 			pt = "F/NL_Port";
2863 			break;
2864 		case 0x7f:
2865 			pt = "  Nx_Port";
2866 			break;
2867 		case 0x81:
2868 			pt = "  F_port";
2869 			break;
2870 		case 0x82:
2871 			pt = "  FL_Port";
2872 			break;
2873 		case 0x84:
2874 			pt = "   E_port";
2875 			break;
2876 		default:
2877 			pt = " ";
2878 			break;
2879 		}
2880 
2881 		isp_prt(isp, ISP_LOGINFO,
2882 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2883 
2884 		/*
2885 		 * If we don't have an initiator role we bail.
2886 		 *
2887 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2888 		 */
2889 
2890 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2891 			break;
2892 		}
2893 
2894 		/*
2895 		 * Is this entry for us? If so, we bail.
2896 		 */
2897 
2898 		if (fcp->isp_portid == clp->portid) {
2899 			break;
2900 		}
2901 
2902 		/*
2903 		 * Else, the default policy is to find room for it in
2904 		 * our local port database. Later, when we execute
2905 		 * the call to isp_pdb_sync either this newly arrived
2906 		 * or already logged in device will be (re)announced.
2907 		 */
2908 
2909 		if (fcp->isp_topo == TOPO_FL_PORT)
2910 			base = FC_SNS_ID+1;
2911 		else
2912 			base = 0;
2913 
2914 		if (fcp->isp_topo == TOPO_N_PORT)
2915 			lim = 1;
2916 		else
2917 			lim = MAX_FC_TARG;
2918 
2919 		/*
2920 		 * Is it already in our list?
2921 		 */
2922 		for (target = base; target < lim; target++) {
2923 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2924 				continue;
2925 			}
2926 			lp = &fcp->portdb[target];
2927 			if (lp->port_wwn == clp->port_wwn &&
2928 			    lp->node_wwn == clp->node_wwn) {
2929 				lp->fabric_dev = 1;
2930 				break;
2931 			}
2932 		}
2933 		if (target < lim) {
2934 			break;
2935 		}
2936 		for (target = base; target < lim; target++) {
2937 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2938 				continue;
2939 			}
2940 			lp = &fcp->portdb[target];
2941 			if (lp->port_wwn == 0) {
2942 				break;
2943 			}
2944 		}
2945 		if (target == lim) {
2946 			isp_prt(isp, ISP_LOGWARN,
2947 			    "out of space for fabric devices");
2948 			break;
2949 		}
2950 		lp->port_type = clp->port_type;
2951 		lp->fc4_type = clp->fc4_type;
2952 		lp->node_wwn = clp->node_wwn;
2953 		lp->port_wwn = clp->port_wwn;
2954 		lp->portid = clp->portid;
2955 		lp->fabric_dev = 1;
2956 		break;
2957 	}
2958 #ifdef	ISP_TARGET_MODE
2959 	case ISPASYNC_TARGET_MESSAGE:
2960 	{
2961 		tmd_msg_t *mp = arg;
2962 		isp_prt(isp, ISP_LOGALL,
2963 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2964 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2965 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2966 		    mp->nt_msg[0]);
2967 		break;
2968 	}
2969 	case ISPASYNC_TARGET_EVENT:
2970 	{
2971 		tmd_event_t *ep = arg;
2972 		isp_prt(isp, ISP_LOGALL,
2973 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2974 		break;
2975 	}
2976 	case ISPASYNC_TARGET_ACTION:
2977 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2978 		default:
2979 			isp_prt(isp, ISP_LOGWARN,
2980 			   "event 0x%x for unhandled target action",
2981 			    ((isphdr_t *)arg)->rqs_entry_type);
2982 			break;
2983 		case RQSTYPE_NOTIFY:
2984 			if (IS_SCSI(isp)) {
2985 				rv = isp_handle_platform_notify_scsi(isp,
2986 				    (in_entry_t *) arg);
2987 			} else {
2988 				rv = isp_handle_platform_notify_fc(isp,
2989 				    (in_fcentry_t *) arg);
2990 			}
2991 			break;
2992 		case RQSTYPE_ATIO:
2993 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2994 			break;
2995 		case RQSTYPE_ATIO2:
2996 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2997 			break;
2998 		case RQSTYPE_CTIO2:
2999 		case RQSTYPE_CTIO:
3000 			rv = isp_handle_platform_ctio(isp, arg);
3001 			break;
3002 		case RQSTYPE_ENABLE_LUN:
3003 		case RQSTYPE_MODIFY_LUN:
3004 			if (IS_DUALBUS(isp)) {
3005 				bus =
3006 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3007 			} else {
3008 				bus = 0;
3009 			}
3010 			isp_cv_signal_rqe(isp, bus,
3011 			    ((lun_entry_t *)arg)->le_status);
3012 			break;
3013 		}
3014 		break;
3015 #endif
3016 	case ISPASYNC_FW_CRASH:
3017 	{
3018 		u_int16_t mbox1, mbox6;
3019 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3020 		if (IS_DUALBUS(isp)) {
3021 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3022 		} else {
3023 			mbox6 = 0;
3024 		}
3025                 isp_prt(isp, ISP_LOGERR,
3026                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3027                     mbox6, mbox1);
3028 		isp_reinit(isp);
3029 		break;
3030 	}
3031 	case ISPASYNC_UNHANDLED_RESPONSE:
3032 		break;
3033 	default:
3034 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3035 		break;
3036 	}
3037 	return (rv);
3038 }
3039 
3040 
3041 /*
3042  * Locks are held before coming here.
3043  */
3044 void
3045 isp_uninit(struct ispsoftc *isp)
3046 {
3047 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3048 	DISABLE_INTS(isp);
3049 }
3050 
3051 void
3052 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3053 {
3054 	va_list ap;
3055 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3056 		return;
3057 	}
3058 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3059 	va_start(ap, fmt);
3060 	vprintf(fmt, ap);
3061 	va_end(ap);
3062 	printf("\n");
3063 }
3064