xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 1b6c76a2fe091c74f08427e6c870851025a9cf67)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/ioccom.h>
34 #include <dev/isp/isp_ioctl.h>
35 
36 
37 static d_ioctl_t ispioctl;
38 static void isp_intr_enable(void *);
39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
40 static void isp_poll(struct cam_sim *);
41 #if	0
42 static void isp_relsim(void *);
43 #endif
44 static timeout_t isp_watchdog;
45 static void isp_kthread(void *);
46 static void isp_action(struct cam_sim *, union ccb *);
47 
48 
49 #define ISP_CDEV_MAJOR	248
50 static struct cdevsw isp_cdevsw = {
51 	/* open */	nullopen,
52 	/* close */	nullclose,
53 	/* read */	noread,
54 	/* write */	nowrite,
55 	/* ioctl */	ispioctl,
56 	/* poll */	nopoll,
57 	/* mmap */	nommap,
58 	/* strategy */	nostrategy,
59 	/* name */	"isp",
60 	/* maj */	ISP_CDEV_MAJOR,
61 	/* dump */	nodump,
62 	/* psize */	nopsize,
63 	/* flags */	D_TAPE,
64 };
65 
66 static struct ispsoftc *isplist = NULL;
67 
68 void
69 isp_attach(struct ispsoftc *isp)
70 {
71 	int primary, secondary;
72 	struct ccb_setasync csa;
73 	struct cam_devq *devq;
74 	struct cam_sim *sim;
75 	struct cam_path *path;
76 
77 	/*
78 	 * Establish (in case of 12X0) which bus is the primary.
79 	 */
80 
81 	primary = 0;
82 	secondary = 1;
83 
84 	/*
85 	 * Create the device queue for our SIM(s).
86 	 */
87 	devq = cam_simq_alloc(isp->isp_maxcmds);
88 	if (devq == NULL) {
89 		return;
90 	}
91 
92 	/*
93 	 * Construct our SIM entry.
94 	 */
95 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
96 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
97 	if (sim == NULL) {
98 		cam_simq_free(devq);
99 		return;
100 	}
101 
102 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
103 	isp->isp_osinfo.ehook.ich_arg = isp;
104 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
105 		isp_prt(isp, ISP_LOGERR,
106 		    "could not establish interrupt enable hook");
107 		cam_sim_free(sim, TRUE);
108 		return;
109 	}
110 
111 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
112 		cam_sim_free(sim, TRUE);
113 		return;
114 	}
115 
116 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
117 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
118 		xpt_bus_deregister(cam_sim_path(sim));
119 		cam_sim_free(sim, TRUE);
120 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
121 		return;
122 	}
123 
124 	xpt_setup_ccb(&csa.ccb_h, path, 5);
125 	csa.ccb_h.func_code = XPT_SASYNC_CB;
126 	csa.event_enable = AC_LOST_DEVICE;
127 	csa.callback = isp_cam_async;
128 	csa.callback_arg = sim;
129 	xpt_action((union ccb *)&csa);
130 	isp->isp_sim = sim;
131 	isp->isp_path = path;
132 	/*
133 	 * Create a kernel thread for fibre channel instances. We
134 	 * don't have dual channel FC cards.
135 	 */
136 	if (IS_FC(isp)) {
137 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
138 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
139 		    RFHIGHPID, "%s: fc_thrd",
140 		    device_get_nameunit(isp->isp_dev))) {
141 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
142 			xpt_bus_deregister(cam_sim_path(sim));
143 			cam_sim_free(sim, TRUE);
144 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
145 			return;
146 		}
147 	}
148 
149 
150 	/*
151 	 * If we have a second channel, construct SIM entry for that.
152 	 */
153 	if (IS_DUALBUS(isp)) {
154 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
155 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
156 		if (sim == NULL) {
157 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
158 			xpt_free_path(isp->isp_path);
159 			cam_simq_free(devq);
160 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
161 			return;
162 		}
163 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
164 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
165 			xpt_free_path(isp->isp_path);
166 			cam_sim_free(sim, TRUE);
167 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
168 			return;
169 		}
170 
171 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
172 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
173 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
174 			xpt_free_path(isp->isp_path);
175 			xpt_bus_deregister(cam_sim_path(sim));
176 			cam_sim_free(sim, TRUE);
177 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
178 			return;
179 		}
180 
181 		xpt_setup_ccb(&csa.ccb_h, path, 5);
182 		csa.ccb_h.func_code = XPT_SASYNC_CB;
183 		csa.event_enable = AC_LOST_DEVICE;
184 		csa.callback = isp_cam_async;
185 		csa.callback_arg = sim;
186 		xpt_action((union ccb *)&csa);
187 		isp->isp_sim2 = sim;
188 		isp->isp_path2 = path;
189 	}
190 
191 	/*
192 	 * Create device nodes
193 	 */
194 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
195 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
196 
197 	if (isp->isp_role != ISP_ROLE_NONE) {
198 		isp->isp_state = ISP_RUNSTATE;
199 		ENABLE_INTS(isp);
200 	}
201 	if (isplist == NULL) {
202 		isplist = isp;
203 	} else {
204 		struct ispsoftc *tmp = isplist;
205 		while (tmp->isp_osinfo.next) {
206 			tmp = tmp->isp_osinfo.next;
207 		}
208 		tmp->isp_osinfo.next = isp;
209 	}
210 
211 }
212 
213 static int
214 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
215 {
216 	struct ispsoftc *isp;
217 	int retval = ENOTTY;
218 
219 	isp = isplist;
220 	while (isp) {
221 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
222 			break;
223 		}
224 		isp = isp->isp_osinfo.next;
225 	}
226 	if (isp == NULL)
227 		return (ENXIO);
228 
229 	switch (cmd) {
230 	case ISP_SDBLEV:
231 	{
232 		int olddblev = isp->isp_dblev;
233 		isp->isp_dblev = *(int *)addr;
234 		*(int *)addr = olddblev;
235 		retval = 0;
236 		break;
237 	}
238 	case ISP_RESETHBA:
239 		ISP_LOCK(isp);
240 		isp_reinit(isp);
241 		ISP_UNLOCK(isp);
242 		retval = 0;
243 		break;
244 	case ISP_FC_RESCAN:
245 		if (IS_FC(isp)) {
246 			ISP_LOCK(isp);
247 			if (isp_fc_runstate(isp, 5 * 1000000)) {
248 				retval = EIO;
249 			} else {
250 				retval = 0;
251 			}
252 			ISP_UNLOCK(isp);
253 		}
254 		break;
255 	case ISP_FC_LIP:
256 		if (IS_FC(isp)) {
257 			ISP_LOCK(isp);
258 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
259 				retval = EIO;
260 			} else {
261 				retval = 0;
262 			}
263 			ISP_UNLOCK(isp);
264 		}
265 		break;
266 	case ISP_FC_GETDINFO:
267 	{
268 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
269 		struct lportdb *lp;
270 
271 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
272 			retval = EINVAL;
273 			break;
274 		}
275 		ISP_LOCK(isp);
276 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
277 		if (lp->valid) {
278 			ifc->loopid = lp->loopid;
279 			ifc->portid = lp->portid;
280 			ifc->node_wwn = lp->node_wwn;
281 			ifc->port_wwn = lp->port_wwn;
282 			retval = 0;
283 		} else {
284 			retval = ENODEV;
285 		}
286 		ISP_UNLOCK(isp);
287 		break;
288 	}
289 	default:
290 		break;
291 	}
292 	return (retval);
293 }
294 
295 static void
296 isp_intr_enable(void *arg)
297 {
298 	struct ispsoftc *isp = arg;
299 	if (isp->isp_role != ISP_ROLE_NONE) {
300 		ENABLE_INTS(isp);
301 		isp->isp_osinfo.intsok = 1;
302 	}
303 	/* Release our hook so that the boot can continue. */
304 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
305 }
306 
307 /*
308  * Put the target mode functions here, because some are inlines
309  */
310 
311 #ifdef	ISP_TARGET_MODE
312 
313 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
314 static __inline int are_any_luns_enabled(struct ispsoftc *, int);
315 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
316 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
317 static __inline int isp_psema_sig_rqe(struct ispsoftc *);
318 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int);
319 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int);
320 static __inline void isp_vsema_rqe(struct ispsoftc *);
321 static cam_status
322 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
323 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
324 static void isp_en_lun(struct ispsoftc *, union ccb *);
325 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
326 static timeout_t isp_refire_putback_atio;
327 static void isp_complete_ctio(union ccb *);
328 static void isp_target_putback_atio(union ccb *);
329 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
330 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
331 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
332 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
333 
334 static __inline int
335 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
336 {
337 	tstate_t *tptr;
338 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
339 	if (tptr == NULL) {
340 		ISP_UNLOCK(isp);
341 		return (0);
342 	}
343 	do {
344 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
345 			ISP_UNLOCK(isp);
346 			return (1);
347 		}
348 	} while ((tptr = tptr->next) != NULL);
349 	return (0);
350 }
351 
352 static __inline int
353 are_any_luns_enabled(struct ispsoftc *isp, int port)
354 {
355 	int lo, hi;
356 	if (IS_DUALBUS(isp)) {
357 		lo = (port * (LUN_HASH_SIZE >> 1));
358 		hi = lo + (LUN_HASH_SIZE >> 1);
359 	} else {
360 		lo = 0;
361 		hi = LUN_HASH_SIZE;
362 	}
363 	for (lo = 0; lo < hi; lo++) {
364 		if (isp->isp_osinfo.lun_hash[lo]) {
365 			return (1);
366 		}
367 	}
368 	return (0);
369 }
370 
371 static __inline tstate_t *
372 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
373 {
374 	tstate_t *tptr;
375 
376 	if (lun == CAM_LUN_WILDCARD) {
377 		tptr = &isp->isp_osinfo.tsdflt[bus];
378 		tptr->hold++;
379 		return (tptr);
380 	} else {
381 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
382 	}
383 	if (tptr == NULL) {
384 		return (NULL);
385 	}
386 
387 	do {
388 		if (tptr->lun == lun && tptr->bus == bus) {
389 			tptr->hold++;
390 			return (tptr);
391 		}
392 	} while ((tptr = tptr->next) != NULL);
393 	return (tptr);
394 }
395 
396 static __inline void
397 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
398 {
399 	if (tptr->hold)
400 		tptr->hold--;
401 }
402 
403 static __inline int
404 isp_psema_sig_rqe(struct ispsoftc *isp)
405 {
406 	while (isp->isp_osinfo.tmflags & TM_BUSY) {
407 		isp->isp_osinfo.tmflags |= TM_WANTED;
408 		if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) {
409 			return (-1);
410 		}
411 		isp->isp_osinfo.tmflags |= TM_BUSY;
412 	}
413 	return (0);
414 }
415 
416 static __inline int
417 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo)
418 {
419 	if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) {
420 		ISP_UNLOCK(isp);
421 		return (-1);
422 	}
423 	return (0);
424 }
425 
426 static __inline void
427 isp_cv_signal_rqe(struct ispsoftc *isp, int status)
428 {
429 	isp->isp_osinfo.rstatus = status;
430 	wakeup(&isp->isp_osinfo.rstatus);
431 }
432 
433 static __inline void
434 isp_vsema_rqe(struct ispsoftc *isp)
435 {
436 	if (isp->isp_osinfo.tmflags & TM_WANTED) {
437 		isp->isp_osinfo.tmflags &= ~TM_WANTED;
438 		wakeup(&isp->isp_osinfo.tmflags);
439 	}
440 	isp->isp_osinfo.tmflags &= ~TM_BUSY;
441 }
442 
443 static cam_status
444 create_lun_state(struct ispsoftc *isp, int bus,
445     struct cam_path *path, tstate_t **rslt)
446 {
447 	cam_status status;
448 	lun_id_t lun;
449 	int hfx;
450 	tstate_t *tptr, *new;
451 
452 	lun = xpt_path_lun_id(path);
453 	if (lun < 0) {
454 		return (CAM_LUN_INVALID);
455 	}
456 	if (is_lun_enabled(isp, bus, lun)) {
457 		return (CAM_LUN_ALRDY_ENA);
458 	}
459 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
460 	if (new == NULL) {
461 		return (CAM_RESRC_UNAVAIL);
462 	}
463 
464 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
465 	    xpt_path_target_id(path), xpt_path_lun_id(path));
466 	if (status != CAM_REQ_CMP) {
467 		free(new, M_DEVBUF);
468 		return (status);
469 	}
470 	new->bus = bus;
471 	new->lun = lun;
472 	SLIST_INIT(&new->atios);
473 	SLIST_INIT(&new->inots);
474 	new->hold = 1;
475 
476 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
477 	tptr = isp->isp_osinfo.lun_hash[hfx];
478 	if (tptr == NULL) {
479 		isp->isp_osinfo.lun_hash[hfx] = new;
480 	} else {
481 		while (tptr->next)
482 			tptr = tptr->next;
483 		tptr->next = new;
484 	}
485 	*rslt = new;
486 	return (CAM_REQ_CMP);
487 }
488 
489 static __inline void
490 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
491 {
492 	int hfx;
493 	tstate_t *lw, *pw;
494 
495 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
496 	if (tptr->hold) {
497 		return;
498 	}
499 	pw = isp->isp_osinfo.lun_hash[hfx];
500 	if (pw == NULL) {
501 		return;
502 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
503 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
504 	} else {
505 		lw = pw;
506 		pw = lw->next;
507 		while (pw) {
508 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
509 				lw->next = pw->next;
510 				break;
511 			}
512 			lw = pw;
513 			pw = pw->next;
514 		}
515 		if (pw == NULL) {
516 			ISP_UNLOCK(isp);
517 			return;
518 		}
519 	}
520 	free(tptr, M_DEVBUF);
521 }
522 
523 /*
524  * we enter with our locks held.
525  */
526 static void
527 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
528 {
529 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
530 	struct ccb_en_lun *cel = &ccb->cel;
531 	tstate_t *tptr;
532 	u_int16_t rstat;
533 	int bus, cmd, frozen = 0;
534 	lun_id_t lun;
535 	target_id_t tgt;
536 
537 
538 	bus = XS_CHANNEL(ccb) & 0x1;
539 	tgt = ccb->ccb_h.target_id;
540 	lun = ccb->ccb_h.target_lun;
541 
542 	/*
543 	 * Do some sanity checking first.
544 	 */
545 
546 	if ((lun != CAM_LUN_WILDCARD) &&
547 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
548 		ccb->ccb_h.status = CAM_LUN_INVALID;
549 		return;
550 	}
551 	if (IS_SCSI(isp)) {
552 		sdparam *sdp = isp->isp_param;
553 		sdp += bus;
554 		if (tgt != CAM_TARGET_WILDCARD &&
555 		    tgt != sdp->isp_initiator_id) {
556 			ccb->ccb_h.status = CAM_TID_INVALID;
557 			return;
558 		}
559 	} else {
560 		if (tgt != CAM_TARGET_WILDCARD &&
561 		    tgt != FCPARAM(isp)->isp_iid) {
562 			ccb->ccb_h.status = CAM_TID_INVALID;
563 			return;
564 		}
565 	}
566 
567 	if (tgt == CAM_TARGET_WILDCARD) {
568 		if (lun != CAM_LUN_WILDCARD) {
569 			ccb->ccb_h.status = CAM_LUN_INVALID;
570 			return;
571 		}
572 	}
573 
574 	/*
575 	 * If Fibre Channel, stop and drain all activity to this bus.
576 	 */
577 #if	0
578 	if (IS_FC(isp)) {
579 		ISP_LOCK(isp);
580 		frozen = 1;
581 		xpt_freeze_simq(isp->isp_sim, 1);
582 		isp->isp_osinfo.drain = 1;
583 		while (isp->isp_osinfo.drain) {
584 			(void) msleep(&isp->isp_osinfo.drain, &isp->isp_lock,
585 			    PRIBIO, "ispdrain", 10 * hz);
586 		}
587 		ISP_UNLOCK(isp);
588 	}
589 #endif
590 
591 	/*
592 	 * Check to see if we're enabling on fibre channel and
593 	 * don't yet have a notion of who the heck we are (no
594 	 * loop yet).
595 	 */
596 	if (IS_FC(isp) && cel->enable &&
597 	    (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
598 		fcparam *fcp = isp->isp_param;
599 		int rv;
600 
601 		rv = isp_fc_runstate(isp, 2 * 1000000);
602 		if (fcp->isp_fwstate != FW_READY ||
603 		    fcp->isp_loopstate != LOOP_READY) {
604 			xpt_print_path(ccb->ccb_h.path);
605 			isp_prt(isp, ISP_LOGWARN,
606 			    "could not get a good port database read");
607 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
608 			if (frozen) {
609 				ISPLOCK_2_CAMLOCK(isp);
610 				xpt_release_simq(isp->isp_sim, 1);
611 				CAMLOCK_2_ISPLOCK(isp);
612 			}
613 			return;
614 		}
615 	}
616 
617 
618 	/*
619 	 * Next check to see whether this is a target/lun wildcard action.
620 	 *
621 	 * If so, we enable/disable target mode but don't do any lun enabling.
622 	 */
623 	if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) {
624 		int av = bus << 31;
625 		tptr = &isp->isp_osinfo.tsdflt[bus];
626 		if (cel->enable) {
627 			if (isp->isp_osinfo.tmflags & (1 << bus)) {
628 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
629 				if (frozen) {
630 					ISPLOCK_2_CAMLOCK(isp);
631 					xpt_release_simq(isp->isp_sim, 1);
632 					CAMLOCK_2_ISPLOCK(isp);
633 				}
634 				return;
635 			}
636 			ccb->ccb_h.status =
637 			    xpt_create_path(&tptr->owner, NULL,
638 			    xpt_path_path_id(ccb->ccb_h.path),
639 			    xpt_path_target_id(ccb->ccb_h.path),
640 			    xpt_path_lun_id(ccb->ccb_h.path));
641 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
642 				if (frozen) {
643 					ISPLOCK_2_CAMLOCK(isp);
644 					xpt_release_simq(isp->isp_sim, 1);
645 					CAMLOCK_2_ISPLOCK(isp);
646 				}
647 				return;
648 			}
649 			SLIST_INIT(&tptr->atios);
650 			SLIST_INIT(&tptr->inots);
651 			av |= ENABLE_TARGET_FLAG;
652 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
653 			if (av) {
654 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
655 				xpt_free_path(tptr->owner);
656 				if (frozen) {
657 					ISPLOCK_2_CAMLOCK(isp);
658 					xpt_release_simq(isp->isp_sim, 1);
659 					CAMLOCK_2_ISPLOCK(isp);
660 				}
661 				return;
662 			}
663 			isp->isp_osinfo.tmflags |= (1 << bus);
664 		} else {
665 			if ((isp->isp_osinfo.tmflags & (1 << bus)) == 0) {
666 				ccb->ccb_h.status = CAM_LUN_INVALID;
667 				if (frozen) {
668 					ISPLOCK_2_CAMLOCK(isp);
669 					xpt_release_simq(isp->isp_sim, 1);
670 					CAMLOCK_2_ISPLOCK(isp);
671 				}
672 				return;
673 			}
674 			if (are_any_luns_enabled(isp, bus)) {
675 				ccb->ccb_h.status = CAM_SCSI_BUSY;
676 				if (frozen) {
677 					ISPLOCK_2_CAMLOCK(isp);
678 					xpt_release_simq(isp->isp_sim, 1);
679 					CAMLOCK_2_ISPLOCK(isp);
680 				}
681 				return;
682 			}
683 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
684 			if (av) {
685 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
686 				if (frozen) {
687 					ISPLOCK_2_CAMLOCK(isp);
688 					xpt_release_simq(isp->isp_sim, 1);
689 					CAMLOCK_2_ISPLOCK(isp);
690 				}
691 				return;
692 			}
693 			isp->isp_osinfo.tmflags &= ~(1 << bus);
694 			ccb->ccb_h.status = CAM_REQ_CMP;
695 		}
696 		xpt_print_path(ccb->ccb_h.path);
697 		isp_prt(isp, ISP_LOGINFO, "Target Mode %sabled on channel %d",
698 		    (cel->enable) ? "en" : "dis", bus);
699 		if (frozen) {
700 			ISPLOCK_2_CAMLOCK(isp);
701 			xpt_release_simq(isp->isp_sim, 1);
702 			CAMLOCK_2_ISPLOCK(isp);
703 		}
704 		return;
705 	}
706 
707 	/*
708 	 * We can move along now...
709 	 */
710 
711 	if (frozen) {
712 		ISPLOCK_2_CAMLOCK(isp);
713 		xpt_release_simq(isp->isp_sim, 1);
714 		CAMLOCK_2_ISPLOCK(isp);
715 	}
716 
717 
718 	if (cel->enable) {
719 		ccb->ccb_h.status =
720 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
721 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
722 			return;
723 		}
724 	} else {
725 		tptr = get_lun_statep(isp, bus, lun);
726 		if (tptr == NULL) {
727 			ccb->ccb_h.status = CAM_LUN_INVALID;
728 			return;
729 		}
730 	}
731 
732 	if (isp_psema_sig_rqe(isp)) {
733 		rls_lun_statep(isp, tptr);
734 		if (cel->enable)
735 			destroy_lun_state(isp, tptr);
736 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
737 		return;
738 	}
739 
740 	if (cel->enable) {
741 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
742 		int c, n, ulun = lun;
743 
744 		cmd = RQSTYPE_ENABLE_LUN;
745 		c = DFLT_CMND_CNT;
746 		n = DFLT_INOT_CNT;
747 		if (IS_FC(isp) && lun != 0) {
748 			cmd = RQSTYPE_MODIFY_LUN;
749 			n = 0;
750 			/*
751 		 	 * For SCC firmware, we only deal with setting
752 			 * (enabling or modifying) lun 0.
753 			 */
754 			ulun = 0;
755 		}
756 		rstat = LUN_ERR;
757 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
758 			xpt_print_path(ccb->ccb_h.path);
759 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
760 			goto out;
761 		}
762 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
763 			xpt_print_path(ccb->ccb_h.path);
764 			isp_prt(isp, ISP_LOGERR,
765 			    "wait for ENABLE/MODIFY LUN timed out");
766 			goto out;
767 		}
768 		rstat = isp->isp_osinfo.rstatus;
769 		if (rstat != LUN_OK) {
770 			xpt_print_path(ccb->ccb_h.path);
771 			isp_prt(isp, ISP_LOGERR,
772 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
773 			goto out;
774 		}
775 	} else {
776 		int c, n, ulun = lun;
777 		u_int32_t seq;
778 
779 		rstat = LUN_ERR;
780 		seq = isp->isp_osinfo.rollinfo++;
781 		cmd = -RQSTYPE_MODIFY_LUN;
782 
783 		c = DFLT_CMND_CNT;
784 		n = DFLT_INOT_CNT;
785 		if (IS_FC(isp) && lun != 0) {
786 			n = 0;
787 			/*
788 		 	 * For SCC firmware, we only deal with setting
789 			 * (enabling or modifying) lun 0.
790 			 */
791 			ulun = 0;
792 		}
793 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
794 			xpt_print_path(ccb->ccb_h.path);
795 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
796 			goto out;
797 		}
798 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
799 			xpt_print_path(ccb->ccb_h.path);
800 			isp_prt(isp, ISP_LOGERR,
801 			    "wait for MODIFY LUN timed out");
802 			goto out;
803 		}
804 		rstat = isp->isp_osinfo.rstatus;
805 		if (rstat != LUN_OK) {
806 			xpt_print_path(ccb->ccb_h.path);
807 			isp_prt(isp, ISP_LOGERR,
808 			    "MODIFY LUN returned 0x%x", rstat);
809 			goto out;
810 		}
811 		if (IS_FC(isp) && lun) {
812 			goto out;
813 		}
814 
815 		seq = isp->isp_osinfo.rollinfo++;
816 
817 		rstat = LUN_ERR;
818 		cmd = -RQSTYPE_ENABLE_LUN;
819 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
820 			xpt_print_path(ccb->ccb_h.path);
821 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
822 			goto out;
823 		}
824 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
825 			xpt_print_path(ccb->ccb_h.path);
826 			isp_prt(isp, ISP_LOGERR,
827 			     "wait for DISABLE LUN timed out");
828 			goto out;
829 		}
830 		rstat = isp->isp_osinfo.rstatus;
831 		if (rstat != LUN_OK) {
832 			xpt_print_path(ccb->ccb_h.path);
833 			isp_prt(isp, ISP_LOGWARN,
834 			    "DISABLE LUN returned 0x%x", rstat);
835 			goto out;
836 		}
837 	}
838 out:
839 	isp_vsema_rqe(isp);
840 
841 	if (rstat != LUN_OK) {
842 		xpt_print_path(ccb->ccb_h.path);
843 		isp_prt(isp, ISP_LOGWARN,
844 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
845 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
846 		rls_lun_statep(isp, tptr);
847 		if (cel->enable)
848 			destroy_lun_state(isp, tptr);
849 	} else {
850 		xpt_print_path(ccb->ccb_h.path);
851 		isp_prt(isp, ISP_LOGINFO, lfmt,
852 		    (cel->enable) ? "en" : "dis", bus);
853 		rls_lun_statep(isp, tptr);
854 		if (cel->enable == 0) {
855 			destroy_lun_state(isp, tptr);
856 		}
857 		ccb->ccb_h.status = CAM_REQ_CMP;
858 	}
859 }
860 
861 static cam_status
862 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
863 {
864 	tstate_t *tptr;
865 	struct ccb_hdr_slist *lp;
866 	struct ccb_hdr *curelm;
867 	int found;
868 	union ccb *accb = ccb->cab.abort_ccb;
869 
870 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
871 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
872 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
873 			return (CAM_PATH_INVALID);
874 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
875 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
876 			return (CAM_PATH_INVALID);
877 		}
878 	}
879 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
880 	if (tptr == NULL) {
881 		return (CAM_PATH_INVALID);
882 	}
883 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
884 		lp = &tptr->atios;
885 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
886 		lp = &tptr->inots;
887 	} else {
888 		rls_lun_statep(isp, tptr);
889 		return (CAM_UA_ABORT);
890 	}
891 	curelm = SLIST_FIRST(lp);
892 	found = 0;
893 	if (curelm == &accb->ccb_h) {
894 		found = 1;
895 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
896 	} else {
897 		while(curelm != NULL) {
898 			struct ccb_hdr *nextelm;
899 
900 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
901 			if (nextelm == &accb->ccb_h) {
902 				found = 1;
903 				SLIST_NEXT(curelm, sim_links.sle) =
904 				    SLIST_NEXT(nextelm, sim_links.sle);
905 				break;
906 			}
907 			curelm = nextelm;
908 		}
909 	}
910 	rls_lun_statep(isp, tptr);
911 	if (found) {
912 		accb->ccb_h.status = CAM_REQ_ABORTED;
913 		return (CAM_REQ_CMP);
914 	}
915 	return(CAM_PATH_INVALID);
916 }
917 
918 static cam_status
919 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
920 {
921 	void *qe;
922 	struct ccb_scsiio *cso = &ccb->csio;
923 	u_int16_t *hp, save_handle;
924 	u_int16_t iptr, optr;
925 
926 
927 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
928 		xpt_print_path(ccb->ccb_h.path);
929 		printf("Request Queue Overflow in isp_target_start_ctio\n");
930 		return (CAM_RESRC_UNAVAIL);
931 	}
932 	bzero(qe, QENTRY_LEN);
933 
934 	/*
935 	 * We're either moving data or completing a command here.
936 	 */
937 
938 	if (IS_FC(isp)) {
939 		struct ccb_accept_tio *atiop;
940 		ct2_entry_t *cto = qe;
941 
942 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
943 		cto->ct_header.rqs_entry_count = 1;
944 		cto->ct_iid = cso->init_id;
945 		if (isp->isp_maxluns <= 16) {
946 			cto->ct_lun = ccb->ccb_h.target_lun;
947 		}
948 		/*
949 		 * Start with a residual based on what the original datalength
950 		 * was supposed to be. Basically, we ignore what CAM has set
951 		 * for residuals. The data transfer routines will knock off
952 		 * the residual for each byte actually moved- and also will
953 		 * be responsible for setting the underrun flag.
954 		 */
955 		/* HACK! HACK! */
956 		if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) {
957 			cto->ct_resid = atiop->ccb_h.spriv_field0;
958 		}
959 
960 		cto->ct_rxid = cso->tag_id;
961 		if (cso->dxfer_len == 0) {
962 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
963 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
964 				cto->ct_flags |= CT2_SENDSTATUS;
965 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
966 			}
967 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
968 				int m = min(cso->sense_len, MAXRESPLEN);
969 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
970 				cto->rsp.m1.ct_senselen = m;
971 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
972 			}
973 		} else {
974 			cto->ct_flags |= CT2_FLAG_MODE0;
975 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
976 				cto->ct_flags |= CT2_DATA_IN;
977 			} else {
978 				cto->ct_flags |= CT2_DATA_OUT;
979 			}
980 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
981 				cto->ct_flags |= CT2_SENDSTATUS;
982 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
983 			}
984 			/*
985 			 * If we're sending data and status back together,
986 			 * we can't also send back sense data as well.
987 			 */
988 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
989 		}
990 		if (cto->ct_flags & CT2_SENDSTATUS) {
991 			isp_prt(isp, ISP_LOGTDEBUG1,
992 			    "CTIO2[%x] SCSI STATUS 0x%x datalength %u",
993 			    cto->ct_rxid, cso->scsi_status, cto->ct_resid);
994 		}
995 		if  (cto->ct_flags & CT2_SENDSTATUS)
996 			cto->ct_flags |= CT2_CCINCR;
997 		cto->ct_timeout = 10;
998 		hp = &cto->ct_syshandle;
999 	} else {
1000 		ct_entry_t *cto = qe;
1001 
1002 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1003 		cto->ct_header.rqs_entry_count = 1;
1004 		cto->ct_iid = cso->init_id;
1005 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1006 		cto->ct_tgt = ccb->ccb_h.target_id;
1007 		cto->ct_lun = ccb->ccb_h.target_lun;
1008 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1009 		if (AT_HAS_TAG(cso->tag_id)) {
1010 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1011 			cto->ct_flags |= CT_TQAE;
1012 		}
1013 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1014 			cto->ct_flags |= CT_NODISC;
1015 		}
1016 		if (cso->dxfer_len == 0) {
1017 			cto->ct_flags |= CT_NO_DATA;
1018 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1019 			cto->ct_flags |= CT_DATA_IN;
1020 		} else {
1021 			cto->ct_flags |= CT_DATA_OUT;
1022 		}
1023 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1024 			cto->ct_flags |= CT_SENDSTATUS;
1025 			cto->ct_scsi_status = cso->scsi_status;
1026 			cto->ct_resid = cso->resid;
1027 		}
1028 		if (cto->ct_flags & CT_SENDSTATUS) {
1029 			isp_prt(isp, ISP_LOGTDEBUG1,
1030 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1031 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1032 			    cso->tag_id);
1033 		}
1034 		cto->ct_timeout = 10;
1035 		hp = &cto->ct_syshandle;
1036 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1037 		if (cto->ct_flags & CT_SENDSTATUS)
1038 			cto->ct_flags |= CT_CCINCR;
1039 	}
1040 
1041 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1042 		xpt_print_path(ccb->ccb_h.path);
1043 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1044 		return (CAM_RESRC_UNAVAIL);
1045 	}
1046 
1047 
1048 	/*
1049 	 * Call the dma setup routines for this entry (and any subsequent
1050 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1051 	 * new things to play with. As with isp_start's usage of DMA setup,
1052 	 * any swizzling is done in the machine dependent layer. Because
1053 	 * of this, we put the request onto the queue area first in native
1054 	 * format.
1055 	 */
1056 
1057 	save_handle = *hp;
1058 
1059 	switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) {
1060 	case CMD_QUEUED:
1061 		ISP_ADD_REQUEST(isp, iptr);
1062 		return (CAM_REQ_INPROG);
1063 
1064 	case CMD_EAGAIN:
1065 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1066 		isp_destroy_handle(isp, save_handle);
1067 		return (CAM_RESRC_UNAVAIL);
1068 
1069 	default:
1070 		isp_destroy_handle(isp, save_handle);
1071 		return (XS_ERR(ccb));
1072 	}
1073 }
1074 
1075 static void
1076 isp_refire_putback_atio(void *arg)
1077 {
1078 	int s = splcam();
1079 	isp_target_putback_atio(arg);
1080 	splx(s);
1081 }
1082 
1083 static void
1084 isp_target_putback_atio(union ccb *ccb)
1085 {
1086 	struct ispsoftc *isp;
1087 	struct ccb_scsiio *cso;
1088 	u_int16_t iptr, optr;
1089 	void *qe;
1090 
1091 	isp = XS_ISP(ccb);
1092 
1093 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
1094 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1095 		isp_prt(isp, ISP_LOGWARN,
1096 		    "isp_target_putback_atio: Request Queue Overflow");
1097 		return;
1098 	}
1099 	bzero(qe, QENTRY_LEN);
1100 	cso = &ccb->csio;
1101 	if (IS_FC(isp)) {
1102 		at2_entry_t *at = qe;
1103 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1104 		at->at_header.rqs_entry_count = 1;
1105 		if (isp->isp_maxluns > 16) {
1106 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1107 		} else {
1108 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1109 		}
1110 		at->at_status = CT_OK;
1111 		at->at_rxid = cso->tag_id;
1112 		ISP_SWIZ_ATIO2(isp, qe, qe);
1113 	} else {
1114 		at_entry_t *at = qe;
1115 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1116 		at->at_header.rqs_entry_count = 1;
1117 		at->at_iid = cso->init_id;
1118 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1119 		at->at_tgt = cso->ccb_h.target_id;
1120 		at->at_lun = cso->ccb_h.target_lun;
1121 		at->at_status = CT_OK;
1122 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1123 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1124 		ISP_SWIZ_ATIO(isp, qe, qe);
1125 	}
1126 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1127 	ISP_ADD_REQUEST(isp, iptr);
1128 	isp_complete_ctio(ccb);
1129 }
1130 
1131 static void
1132 isp_complete_ctio(union ccb *ccb)
1133 {
1134 	struct ispsoftc *isp = XS_ISP(ccb);
1135 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1136 		ccb->ccb_h.status |= CAM_REQ_CMP;
1137 	}
1138 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1139 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1140 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1141 		if (isp->isp_osinfo.simqfrozen == 0) {
1142 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1143 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1144 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1145 			} else {
1146 				isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen");
1147 			}
1148 		} else {
1149 			isp_prt(isp, ISP_LOGWARN,
1150 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1151 		}
1152 	}
1153 	xpt_done(ccb);
1154 }
1155 
1156 /*
1157  * Handle ATIO stuff that the generic code can't.
1158  * This means handling CDBs.
1159  */
1160 
1161 static int
1162 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1163 {
1164 	tstate_t *tptr;
1165 	int status, bus;
1166 	struct ccb_accept_tio *atiop;
1167 
1168 	/*
1169 	 * The firmware status (except for the QLTM_SVALID bit)
1170 	 * indicates why this ATIO was sent to us.
1171 	 *
1172 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1173 	 *
1174 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1175 	 * we're still connected on the SCSI bus.
1176 	 */
1177 	status = aep->at_status;
1178 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1179 		/*
1180 		 * Bus Phase Sequence error. We should have sense data
1181 		 * suggested by the f/w. I'm not sure quite yet what
1182 		 * to do about this for CAM.
1183 		 */
1184 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1185 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1186 		return (0);
1187 	}
1188 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1189 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1190 		    status);
1191 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1192 		return (0);
1193 	}
1194 
1195 	bus = GET_BUS_VAL(aep->at_iid);
1196 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1197 	if (tptr == NULL) {
1198 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1199 	}
1200 
1201 	if (tptr == NULL) {
1202 		/*
1203 		 * Because we can't autofeed sense data back with
1204 		 * a command for parallel SCSI, we can't give back
1205 		 * a CHECK CONDITION. We'll give back a BUSY status
1206 		 * instead. This works out okay because the only
1207 		 * time we should, in fact, get this, is in the
1208 		 * case that somebody configured us without the
1209 		 * blackhole driver, so they get what they deserve.
1210 		 */
1211 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1212 		return (0);
1213 	}
1214 
1215 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1216 	if (atiop == NULL) {
1217 		/*
1218 		 * Because we can't autofeed sense data back with
1219 		 * a command for parallel SCSI, we can't give back
1220 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1221 		 * instead. This works out okay because the only time we
1222 		 * should, in fact, get this, is in the case that we've
1223 		 * run out of ATIOS.
1224 		 */
1225 		xpt_print_path(tptr->owner);
1226 		isp_prt(isp, ISP_LOGWARN,
1227 		    "no ATIOS for lun %d from initiator %d on channel %d",
1228 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1229 		rls_lun_statep(isp, tptr);
1230 		if (aep->at_flags & AT_TQAE)
1231 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1232 		else
1233 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1234 		return (0);
1235 	}
1236 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1237 	if (tptr == &isp->isp_osinfo.tsdflt[bus]) {
1238 		atiop->ccb_h.target_id = aep->at_tgt;
1239 		atiop->ccb_h.target_lun = aep->at_lun;
1240 	}
1241 	if (aep->at_flags & AT_NODISC) {
1242 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1243 	} else {
1244 		atiop->ccb_h.flags = 0;
1245 	}
1246 
1247 	if (status & QLTM_SVALID) {
1248 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1249 		atiop->sense_len = amt;
1250 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1251 	} else {
1252 		atiop->sense_len = 0;
1253 	}
1254 
1255 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1256 	atiop->cdb_len = aep->at_cdblen;
1257 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1258 	atiop->ccb_h.status = CAM_CDB_RECVD;
1259 	/*
1260 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1261 	 * and the handle (which we have to preserve).
1262 	 */
1263 	AT_MAKE_TAGID(atiop->tag_id, aep);
1264 	if (aep->at_flags & AT_TQAE) {
1265 		atiop->tag_action = aep->at_tag_type;
1266 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1267 	}
1268 	xpt_done((union ccb*)atiop);
1269 	isp_prt(isp, ISP_LOGTDEBUG1,
1270 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1271 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1272 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1273 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1274 	    "nondisc" : "disconnecting");
1275 	rls_lun_statep(isp, tptr);
1276 	return (0);
1277 }
1278 
1279 static int
1280 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1281 {
1282 	lun_id_t lun;
1283 	tstate_t *tptr;
1284 	struct ccb_accept_tio *atiop;
1285 
1286 	/*
1287 	 * The firmware status (except for the QLTM_SVALID bit)
1288 	 * indicates why this ATIO was sent to us.
1289 	 *
1290 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1291 	 */
1292 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1293 		isp_prt(isp, ISP_LOGWARN,
1294 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1295 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1296 		return (0);
1297 	}
1298 
1299 	if (isp->isp_maxluns > 16) {
1300 		lun = aep->at_scclun;
1301 	} else {
1302 		lun = aep->at_lun;
1303 	}
1304 	tptr = get_lun_statep(isp, 0, lun);
1305 	if (tptr == NULL) {
1306 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1307 	}
1308 
1309 	if (tptr == NULL) {
1310 		/*
1311 		 * What we'd like to know is whether or not we have a listener
1312 		 * upstream that really hasn't configured yet. If we do, then
1313 		 * we can give a more sensible reply here. If not, then we can
1314 		 * reject this out of hand.
1315 		 *
1316 		 * Choices for what to send were
1317 		 *
1318                  *	Not Ready, Unit Not Self-Configured Yet
1319 		 *	(0x2,0x3e,0x00)
1320 		 *
1321 		 * for the former and
1322 		 *
1323 		 *	Illegal Request, Logical Unit Not Supported
1324 		 *	(0x5,0x25,0x00)
1325 		 *
1326 		 * for the latter.
1327 		 *
1328 		 * We used to decide whether there was at least one listener
1329 		 * based upon whether the black hole driver was configured.
1330 		 * However, recent config(8) changes have made this hard to do
1331 		 * at this time.
1332 		 *
1333 		 */
1334 		u_int32_t ccode = SCSI_STATUS_BUSY;
1335 
1336 		/*
1337 		 * Because we can't autofeed sense data back with
1338 		 * a command for parallel SCSI, we can't give back
1339 		 * a CHECK CONDITION. We'll give back a BUSY status
1340 		 * instead. This works out okay because the only
1341 		 * time we should, in fact, get this, is in the
1342 		 * case that somebody configured us without the
1343 		 * blackhole driver, so they get what they deserve.
1344 		 */
1345 		isp_endcmd(isp, aep, ccode, 0);
1346 		return (0);
1347 	}
1348 
1349 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1350 	if (atiop == NULL) {
1351 		/*
1352 		 * Because we can't autofeed sense data back with
1353 		 * a command for parallel SCSI, we can't give back
1354 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1355 		 * instead. This works out okay because the only time we
1356 		 * should, in fact, get this, is in the case that we've
1357 		 * run out of ATIOS.
1358 		 */
1359 		xpt_print_path(tptr->owner);
1360 		isp_prt(isp, ISP_LOGWARN,
1361 		    "no ATIOS for lun %d from initiator %d", lun, aep->at_iid);
1362 		rls_lun_statep(isp, tptr);
1363 		if (aep->at_flags & AT_TQAE)
1364 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1365 		else
1366 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1367 		return (0);
1368 	}
1369 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1370 
1371 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1372 		atiop->ccb_h.target_id =
1373 			((fcparam *)isp->isp_param)->isp_loopid;
1374 		atiop->ccb_h.target_lun = lun;
1375 	}
1376 	/*
1377 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1378 	 */
1379 	atiop->sense_len = 0;
1380 
1381 	atiop->init_id = aep->at_iid;
1382 	atiop->cdb_len = ATIO2_CDBLEN;
1383 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1384 	atiop->ccb_h.status = CAM_CDB_RECVD;
1385 	atiop->tag_id = aep->at_rxid;
1386 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1387 	case ATIO2_TC_ATTR_SIMPLEQ:
1388 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1389 		break;
1390         case ATIO2_TC_ATTR_HEADOFQ:
1391 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1392 		break;
1393         case ATIO2_TC_ATTR_ORDERED:
1394 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1395 		break;
1396         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1397 	case ATIO2_TC_ATTR_UNTAGGED:
1398 	default:
1399 		atiop->tag_action = 0;
1400 		break;
1401 	}
1402 	if (atiop->tag_action != 0) {
1403 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1404 	}
1405 
1406 	/*
1407 	 * Preserve overall command datalength in private field.
1408 	 */
1409 	atiop->ccb_h.spriv_field0 = aep->at_datalen;
1410 
1411 	xpt_done((union ccb*)atiop);
1412 	isp_prt(isp, ISP_LOGTDEBUG1,
1413 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1414 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1415 	    lun, aep->at_taskflags, aep->at_datalen);
1416 	rls_lun_statep(isp, tptr);
1417 	return (0);
1418 }
1419 
1420 static int
1421 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1422 {
1423 	union ccb *ccb;
1424 	int sentstatus, ok, notify_cam, resid = 0;
1425 
1426 	/*
1427 	 * CTIO and CTIO2 are close enough....
1428 	 */
1429 
1430 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1431 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1432 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1433 
1434 	if (IS_FC(isp)) {
1435 		ct2_entry_t *ct = arg;
1436 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1437 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1438 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1439 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1440 		}
1441 		isp_prt(isp, ISP_LOGTDEBUG1,
1442 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d %s",
1443 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1444 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1445 		    sentstatus? "FIN" : "MID");
1446 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1447 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1448 			resid = ct->ct_resid;
1449 		}
1450 	} else {
1451 		ct_entry_t *ct = arg;
1452 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1453 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1454 		isp_prt(isp, ISP_LOGTDEBUG1,
1455 		    "CTIO[%x] tag %x iid %x tgt %d lun %d sts 0x%x flg %x %s",
1456 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_tgt,
1457 		    ct->ct_lun, ct->ct_status, ct->ct_flags,
1458 		    sentstatus? "FIN" : "MID");
1459 
1460 		/*
1461 		 * We *ought* to be able to get back to the original ATIO
1462 		 * here, but for some reason this gets lost. It's just as
1463 		 * well because it's squirrelled away as part of periph
1464 		 * private data.
1465 		 *
1466 		 * We can live without it as long as we continue to use
1467 		 * the auto-replenish feature for CTIOs.
1468 		 */
1469 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1470 		if (ct->ct_status & QLTM_SVALID) {
1471 			char *sp = (char *)ct;
1472 			sp += CTIO_SENSE_OFFSET;
1473 			ccb->csio.sense_len =
1474 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1475 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1476 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1477 		}
1478 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1479 			resid = ct->ct_resid;
1480 		}
1481 	}
1482 	ccb->csio.resid += resid;
1483 
1484 	/*
1485 	 * We're here either because intermediate data transfers are done
1486 	 * and/or the final status CTIO (which may have joined with a
1487 	 * Data Transfer) is done.
1488 	 *
1489 	 * In any case, for this platform, the upper layers figure out
1490 	 * what to do next, so all we do here is collect status and
1491 	 * pass information along. Any DMA handles have already been
1492 	 * freed.
1493 	 */
1494 	if (notify_cam == 0) {
1495 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO done");
1496 		return (0);
1497 	}
1498 
1499 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO done (resid %d)",
1500 	    (sentstatus)? "  FINAL " : "MIDTERM ", ccb->csio.resid);
1501 
1502 	if (!ok) {
1503 		isp_target_putback_atio(ccb);
1504 	} else {
1505 		isp_complete_ctio(ccb);
1506 
1507 	}
1508 	return (0);
1509 }
1510 #endif
1511 
1512 static void
1513 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1514 {
1515 	struct cam_sim *sim;
1516 	struct ispsoftc *isp;
1517 
1518 	sim = (struct cam_sim *)cbarg;
1519 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1520 	switch (code) {
1521 	case AC_LOST_DEVICE:
1522 		if (IS_SCSI(isp)) {
1523 			u_int16_t oflags, nflags;
1524 			sdparam *sdp = isp->isp_param;
1525 			int tgt;
1526 
1527 			tgt = xpt_path_target_id(path);
1528 			ISP_LOCK(isp);
1529 			sdp += cam_sim_bus(sim);
1530 #ifndef	ISP_TARGET_MODE
1531 			if (tgt == sdp->isp_initiator_id) {
1532 				nflags = DPARM_DEFAULT;
1533 			} else {
1534 				nflags = DPARM_SAFE_DFLT;
1535 				if (isp->isp_loaded_fw) {
1536 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1537 				}
1538 			}
1539 #else
1540 			nflags = DPARM_DEFAULT;
1541 #endif
1542 			oflags = sdp->isp_devparam[tgt].dev_flags;
1543 			sdp->isp_devparam[tgt].dev_flags = nflags;
1544 			sdp->isp_devparam[tgt].dev_update = 1;
1545 			isp->isp_update |= (1 << cam_sim_bus(sim));
1546 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1547 			sdp->isp_devparam[tgt].dev_flags = oflags;
1548 			ISP_UNLOCK(isp);
1549 		}
1550 		break;
1551 	default:
1552 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1553 		break;
1554 	}
1555 }
1556 
1557 static void
1558 isp_poll(struct cam_sim *sim)
1559 {
1560 	struct ispsoftc *isp = cam_sim_softc(sim);
1561 	ISP_LOCK(isp);
1562 	(void) isp_intr(isp);
1563 	ISP_UNLOCK(isp);
1564 }
1565 
1566 #if	0
1567 static void
1568 isp_relsim(void *arg)
1569 {
1570 	struct ispsoftc *isp = arg;
1571 	ISP_LOCK(isp);
1572 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1573 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1574 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1575 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1576 			xpt_release_simq(isp->isp_sim, 1);
1577 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1578 		}
1579 	}
1580 	ISP_UNLOCK(isp);
1581 }
1582 #endif
1583 
1584 static void
1585 isp_watchdog(void *arg)
1586 {
1587 	XS_T *xs = arg;
1588 	struct ispsoftc *isp = XS_ISP(xs);
1589 	u_int32_t handle;
1590 
1591 	/*
1592 	 * We've decided this command is dead. Make sure we're not trying
1593 	 * to kill a command that's already dead by getting it's handle and
1594 	 * and seeing whether it's still alive.
1595 	 */
1596 	ISP_LOCK(isp);
1597 	handle = isp_find_handle(isp, xs);
1598 	if (handle) {
1599 		u_int16_t r;
1600 
1601 		if (XS_CMD_DONE_P(xs)) {
1602 			isp_prt(isp, ISP_LOGDEBUG1,
1603 			    "watchdog found done cmd (handle 0x%x)", handle);
1604 			ISP_UNLOCK(isp);
1605 			return;
1606 		}
1607 
1608 		if (XS_CMD_WDOG_P(xs)) {
1609 			isp_prt(isp, ISP_LOGDEBUG2,
1610 			    "recursive watchdog (handle 0x%x)", handle);
1611 			ISP_UNLOCK(isp);
1612 			return;
1613 		}
1614 
1615 		XS_CMD_S_WDOG(xs);
1616 
1617 		r = ISP_READ(isp, BIU_ISR);
1618 
1619 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
1620 			isp_prt(isp, ISP_LOGDEBUG2,
1621 			    "watchdog cleanup (%x, %x)", handle, r);
1622 			xpt_done((union ccb *) xs);
1623 		} else if (XS_CMD_GRACE_P(xs)) {
1624 			/*
1625 			 * Make sure the command is *really* dead before we
1626 			 * release the handle (and DMA resources) for reuse.
1627 			 */
1628 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1629 
1630 			/*
1631 			 * After this point, the comamnd is really dead.
1632 			 */
1633 			if (XS_XFRLEN(xs)) {
1634 				ISP_DMAFREE(isp, xs, handle);
1635                 	}
1636 			isp_destroy_handle(isp, handle);
1637 			xpt_print_path(xs->ccb_h.path);
1638 			isp_prt(isp, ISP_LOGWARN,
1639 			    "watchdog timeout (%x, %x)", handle, r);
1640 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1641 			XS_CMD_C_WDOG(xs);
1642 			isp_done(xs);
1643 		} else {
1644 			u_int16_t iptr, optr;
1645 			ispreq_t *mp;
1646 
1647 			XS_CMD_C_WDOG(xs);
1648 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1649 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
1650 				ISP_UNLOCK(isp);
1651 				return;
1652 			}
1653 			XS_CMD_S_GRACE(xs);
1654 			MEMZERO((void *) mp, sizeof (*mp));
1655 			mp->req_header.rqs_entry_count = 1;
1656 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1657 			mp->req_modifier = SYNC_ALL;
1658 			mp->req_target = XS_CHANNEL(xs) << 7;
1659 			ISP_SWIZZLE_REQUEST(isp, mp);
1660 			ISP_ADD_REQUEST(isp, iptr);
1661 		}
1662 	} else {
1663 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1664 	}
1665 	ISP_UNLOCK(isp);
1666 }
1667 
1668 #ifdef	ISP_SMPLOCK
1669 static void
1670 isp_kthread(void *arg)
1671 {
1672 	int wasfrozen;
1673 	struct ispsoftc *isp = arg;
1674 
1675 	mtx_lock(&isp->isp_lock);
1676 	for (;;) {
1677 		isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state");
1678 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1679 #if	0
1680 			msleep(&lbolt, &isp->isp_lock,
1681 			    PRIBIO, "isp_fcthrd", 0);
1682 #else
1683 			msleep(isp_kthread, &isp->isp_lock,
1684 			    PRIBIO, "isp_fcthrd", hz);
1685 #endif
1686 		}
1687 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1688 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1689 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1690 			isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq");
1691 			ISPLOCK_2_CAMLOCK(isp);
1692 			xpt_release_simq(isp->isp_sim, 1);
1693 			CAMLOCK_2_ISPLOCK(isp);
1694 		}
1695 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
1696 	}
1697 }
1698 #else
1699 static void
1700 isp_kthread(void *arg)
1701 {
1702 	int wasfrozen;
1703 	struct ispsoftc *isp = arg;
1704 
1705 	mtx_lock(&Giant);
1706 	for (;;) {
1707 		isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state");
1708 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1709 			tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1710 		}
1711 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1712 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1713 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1714 			isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq");
1715 			ISPLOCK_2_CAMLOCK(isp);
1716 			xpt_release_simq(isp->isp_sim, 1);
1717 			CAMLOCK_2_ISPLOCK(isp);
1718 		}
1719 		tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "isp_fc_worker", 0);
1720 	}
1721 }
1722 #endif
1723 static void
1724 isp_action(struct cam_sim *sim, union ccb *ccb)
1725 {
1726 	int bus, tgt, error;
1727 	struct ispsoftc *isp;
1728 	struct ccb_trans_settings *cts;
1729 
1730 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1731 
1732 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1733 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1734 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1735 	if (isp->isp_state != ISP_RUNSTATE &&
1736 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1737 		CAMLOCK_2_ISPLOCK(isp);
1738 		isp_init(isp);
1739 		if (isp->isp_state != ISP_INITSTATE) {
1740 			ISP_UNLOCK(isp);
1741 			/*
1742 			 * Lie. Say it was a selection timeout.
1743 			 */
1744 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1745 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1746 			xpt_done(ccb);
1747 			return;
1748 		}
1749 		isp->isp_state = ISP_RUNSTATE;
1750 		ISPLOCK_2_CAMLOCK(isp);
1751 	}
1752 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1753 
1754 
1755 	switch (ccb->ccb_h.func_code) {
1756 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1757 		/*
1758 		 * Do a couple of preliminary checks...
1759 		 */
1760 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1761 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1762 				ccb->ccb_h.status = CAM_REQ_INVALID;
1763 				xpt_done(ccb);
1764 				break;
1765 			}
1766 		}
1767 #ifdef	DIAGNOSTIC
1768 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1769 			ccb->ccb_h.status = CAM_PATH_INVALID;
1770 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1771 			ccb->ccb_h.status = CAM_PATH_INVALID;
1772 		}
1773 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1774 			isp_prt(isp, ISP_LOGERR,
1775 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
1776 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1777 			xpt_done(ccb);
1778 			break;
1779 		}
1780 #endif
1781 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1782 		CAMLOCK_2_ISPLOCK(isp);
1783 		error = isp_start((XS_T *) ccb);
1784 		switch (error) {
1785 		case CMD_QUEUED:
1786 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1787 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1788 				u_int64_t ticks = (u_int64_t) hz;
1789 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1790 					ticks = 60 * 1000 * ticks;
1791 				else
1792 					ticks = ccb->ccb_h.timeout * hz;
1793 				ticks = ((ticks + 999) / 1000) + hz + hz;
1794 				if (ticks >= 0x80000000) {
1795 					isp_prt(isp, ISP_LOGERR,
1796 					    "timeout overflow");
1797 					ticks = 0x80000000;
1798 				}
1799 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
1800 				    (caddr_t)ccb, (int)ticks);
1801 			} else {
1802 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1803 			}
1804 			ISPLOCK_2_CAMLOCK(isp);
1805 			break;
1806 		case CMD_RQLATER:
1807 #ifdef	ISP_SMPLOCK
1808 			cv_signal(&isp->isp_osinfo.kthread_cv);
1809 #else
1810 			wakeup(&isp->isp_osinfo.kthread_cv);
1811 #endif
1812 			if (isp->isp_osinfo.simqfrozen == 0) {
1813 				isp_prt(isp, ISP_LOGDEBUG2,
1814 				    "RQLATER freeze simq");
1815 #if	0
1816 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED;
1817 				timeout(isp_relsim, isp, 500);
1818 #else
1819 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
1820 #endif
1821 				ISPLOCK_2_CAMLOCK(isp);
1822 				xpt_freeze_simq(sim, 1);
1823 			} else {
1824 				ISPLOCK_2_CAMLOCK(isp);
1825 			}
1826 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1827 			xpt_done(ccb);
1828 			break;
1829 		case CMD_EAGAIN:
1830 			if (isp->isp_osinfo.simqfrozen == 0) {
1831 				xpt_freeze_simq(sim, 1);
1832 				isp_prt(isp, ISP_LOGDEBUG2,
1833 				    "EAGAIN freeze simq");
1834 			}
1835 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1836 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1837 			ISPLOCK_2_CAMLOCK(isp);
1838 			xpt_done(ccb);
1839 			break;
1840 		case CMD_COMPLETE:
1841 			isp_done((struct ccb_scsiio *) ccb);
1842 			ISPLOCK_2_CAMLOCK(isp);
1843 			break;
1844 		default:
1845 			isp_prt(isp, ISP_LOGERR,
1846 			    "What's this? 0x%x at %d in file %s",
1847 			    error, __LINE__, __FILE__);
1848 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1849 			xpt_done(ccb);
1850 			ISPLOCK_2_CAMLOCK(isp);
1851 		}
1852 		break;
1853 
1854 #ifdef	ISP_TARGET_MODE
1855 	case XPT_EN_LUN:		/* Enable LUN as a target */
1856 		CAMLOCK_2_ISPLOCK(isp);
1857 		isp_en_lun(isp, ccb);
1858 		ISPLOCK_2_CAMLOCK(isp);
1859 		xpt_done(ccb);
1860 		break;
1861 
1862 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1863 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1864 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1865 	{
1866 		tstate_t *tptr =
1867 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
1868 		if (tptr == NULL) {
1869 			ccb->ccb_h.status = CAM_LUN_INVALID;
1870 			xpt_done(ccb);
1871 			break;
1872 		}
1873 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1874 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1875 		CAMLOCK_2_ISPLOCK(isp);
1876 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1877 			SLIST_INSERT_HEAD(&tptr->atios,
1878 			    &ccb->ccb_h, sim_links.sle);
1879 		} else {
1880 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1881 			    sim_links.sle);
1882 		}
1883 		rls_lun_statep(isp, tptr);
1884 		ccb->ccb_h.status = CAM_REQ_INPROG;
1885 		ISPLOCK_2_CAMLOCK(isp);
1886 		break;
1887 	}
1888 	case XPT_CONT_TARGET_IO:
1889 	{
1890 		CAMLOCK_2_ISPLOCK(isp);
1891 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1892 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1893 			if (isp->isp_osinfo.simqfrozen == 0) {
1894 				xpt_freeze_simq(sim, 1);
1895 				xpt_print_path(ccb->ccb_h.path);
1896 				isp_prt(isp, ISP_LOGINFO,
1897 				    "XPT_CONT_TARGET_IO freeze simq");
1898 			}
1899 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1900 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1901 			ISPLOCK_2_CAMLOCK(isp);
1902 			xpt_done(ccb);
1903 		} else {
1904 			ISPLOCK_2_CAMLOCK(isp);
1905 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1906 		}
1907 		break;
1908 	}
1909 #endif
1910 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
1911 
1912 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1913 		tgt = ccb->ccb_h.target_id;
1914 		tgt |= (bus << 16);
1915 
1916 		CAMLOCK_2_ISPLOCK(isp);
1917 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
1918 		ISPLOCK_2_CAMLOCK(isp);
1919 		if (error) {
1920 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1921 		} else {
1922 			ccb->ccb_h.status = CAM_REQ_CMP;
1923 		}
1924 		xpt_done(ccb);
1925 		break;
1926 	case XPT_ABORT:			/* Abort the specified CCB */
1927 	{
1928 		union ccb *accb = ccb->cab.abort_ccb;
1929 		CAMLOCK_2_ISPLOCK(isp);
1930 		switch (accb->ccb_h.func_code) {
1931 #ifdef	ISP_TARGET_MODE
1932 		case XPT_ACCEPT_TARGET_IO:
1933 		case XPT_IMMED_NOTIFY:
1934         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
1935 			break;
1936 		case XPT_CONT_TARGET_IO:
1937 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
1938 			ccb->ccb_h.status = CAM_UA_ABORT;
1939 			break;
1940 #endif
1941 		case XPT_SCSI_IO:
1942 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
1943 			if (error) {
1944 				ccb->ccb_h.status = CAM_UA_ABORT;
1945 			} else {
1946 				ccb->ccb_h.status = CAM_REQ_CMP;
1947 			}
1948 			break;
1949 		default:
1950 			ccb->ccb_h.status = CAM_REQ_INVALID;
1951 			break;
1952 		}
1953 		ISPLOCK_2_CAMLOCK(isp);
1954 		xpt_done(ccb);
1955 		break;
1956 	}
1957 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1958 
1959 		cts = &ccb->cts;
1960 		tgt = cts->ccb_h.target_id;
1961 		CAMLOCK_2_ISPLOCK(isp);
1962 		if (IS_SCSI(isp)) {
1963 			sdparam *sdp = isp->isp_param;
1964 			u_int16_t *dptr;
1965 
1966 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1967 
1968 			sdp += bus;
1969 #if	0
1970 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS)
1971 				dptr = &sdp->isp_devparam[tgt].cur_dflags;
1972 			else
1973 				dptr = &sdp->isp_devparam[tgt].dev_flags;
1974 #else
1975 			/*
1976 			 * We always update (internally) from dev_flags
1977 			 * so any request to change settings just gets
1978 			 * vectored to that location.
1979 			 */
1980 			dptr = &sdp->isp_devparam[tgt].dev_flags;
1981 #endif
1982 
1983 			/*
1984 			 * Note that these operations affect the
1985 			 * the goal flags (dev_flags)- not
1986 			 * the current state flags. Then we mark
1987 			 * things so that the next operation to
1988 			 * this HBA will cause the update to occur.
1989 			 */
1990 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1991 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
1992 					*dptr |= DPARM_DISC;
1993 				} else {
1994 					*dptr &= ~DPARM_DISC;
1995 				}
1996 			}
1997 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1998 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
1999 					*dptr |= DPARM_TQING;
2000 				} else {
2001 					*dptr &= ~DPARM_TQING;
2002 				}
2003 			}
2004 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2005 				switch (cts->bus_width) {
2006 				case MSG_EXT_WDTR_BUS_16_BIT:
2007 					*dptr |= DPARM_WIDE;
2008 					break;
2009 				default:
2010 					*dptr &= ~DPARM_WIDE;
2011 				}
2012 			}
2013 			/*
2014 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2015 			 * of nonzero will cause us to go to the
2016 			 * selected (from NVRAM) maximum value for
2017 			 * this device. At a later point, we'll
2018 			 * allow finer control.
2019 			 */
2020 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2021 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2022 			    (cts->sync_offset > 0)) {
2023 				*dptr |= DPARM_SYNC;
2024 			} else {
2025 				*dptr &= ~DPARM_SYNC;
2026 			}
2027 			*dptr |= DPARM_SAFE_DFLT;
2028 			isp_prt(isp, ISP_LOGDEBUG0,
2029 			    "%d.%d set %s period 0x%x offset 0x%x flags 0x%x",
2030 			    bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
2031 			    "current" : "user",
2032 			    sdp->isp_devparam[tgt].sync_period,
2033 			    sdp->isp_devparam[tgt].sync_offset,
2034 			    sdp->isp_devparam[tgt].dev_flags);
2035 			sdp->isp_devparam[tgt].dev_update = 1;
2036 			isp->isp_update |= (1 << bus);
2037 		}
2038 		ISPLOCK_2_CAMLOCK(isp);
2039 		ccb->ccb_h.status = CAM_REQ_CMP;
2040 		xpt_done(ccb);
2041 		break;
2042 
2043 	case XPT_GET_TRAN_SETTINGS:
2044 
2045 		cts = &ccb->cts;
2046 		tgt = cts->ccb_h.target_id;
2047 		if (IS_FC(isp)) {
2048 			/*
2049 			 * a lot of normal SCSI things don't make sense.
2050 			 */
2051 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2052 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2053 			/*
2054 			 * How do you measure the width of a high
2055 			 * speed serial bus? Well, in bytes.
2056 			 *
2057 			 * Offset and period make no sense, though, so we set
2058 			 * (above) a 'base' transfer speed to be gigabit.
2059 			 */
2060 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2061 		} else {
2062 			sdparam *sdp = isp->isp_param;
2063 			u_int16_t dval, pval, oval;
2064 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2065 
2066 			CAMLOCK_2_ISPLOCK(isp);
2067 			sdp += bus;
2068 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) {
2069 				sdp->isp_devparam[tgt].dev_refresh = 1;
2070 				isp->isp_update |= (1 << bus);
2071 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2072 				    NULL);
2073 				dval = sdp->isp_devparam[tgt].cur_dflags;
2074 				oval = sdp->isp_devparam[tgt].cur_offset;
2075 				pval = sdp->isp_devparam[tgt].cur_period;
2076 			} else {
2077 				dval = sdp->isp_devparam[tgt].dev_flags;
2078 				oval = sdp->isp_devparam[tgt].sync_offset;
2079 				pval = sdp->isp_devparam[tgt].sync_period;
2080 			}
2081 
2082 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2083 
2084 			if (dval & DPARM_DISC) {
2085 				cts->flags |= CCB_TRANS_DISC_ENB;
2086 			}
2087 			if (dval & DPARM_TQING) {
2088 				cts->flags |= CCB_TRANS_TAG_ENB;
2089 			}
2090 			if (dval & DPARM_WIDE) {
2091 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2092 			} else {
2093 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2094 			}
2095 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2096 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2097 
2098 			if ((dval & DPARM_SYNC) && oval != 0) {
2099 				cts->sync_period = pval;
2100 				cts->sync_offset = oval;
2101 				cts->valid |=
2102 				    CCB_TRANS_SYNC_RATE_VALID |
2103 				    CCB_TRANS_SYNC_OFFSET_VALID;
2104 			}
2105 			ISPLOCK_2_CAMLOCK(isp);
2106 			isp_prt(isp, ISP_LOGDEBUG0,
2107 			    "%d.%d get %s period 0x%x offset 0x%x flags 0x%x",
2108 			    bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
2109 			    "current" : "user", pval, oval, dval);
2110 		}
2111 		ccb->ccb_h.status = CAM_REQ_CMP;
2112 		xpt_done(ccb);
2113 		break;
2114 
2115 	case XPT_CALC_GEOMETRY:
2116 	{
2117 		struct ccb_calc_geometry *ccg;
2118 		u_int32_t secs_per_cylinder;
2119 		u_int32_t size_mb;
2120 
2121 		ccg = &ccb->ccg;
2122 		if (ccg->block_size == 0) {
2123 			isp_prt(isp, ISP_LOGERR,
2124 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2125 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2126 			ccb->ccb_h.status = CAM_REQ_INVALID;
2127 			xpt_done(ccb);
2128 			break;
2129 		}
2130 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2131 		if (size_mb > 1024) {
2132 			ccg->heads = 255;
2133 			ccg->secs_per_track = 63;
2134 		} else {
2135 			ccg->heads = 64;
2136 			ccg->secs_per_track = 32;
2137 		}
2138 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2139 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2140 		ccb->ccb_h.status = CAM_REQ_CMP;
2141 		xpt_done(ccb);
2142 		break;
2143 	}
2144 	case XPT_RESET_BUS:		/* Reset the specified bus */
2145 		bus = cam_sim_bus(sim);
2146 		CAMLOCK_2_ISPLOCK(isp);
2147 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2148 		ISPLOCK_2_CAMLOCK(isp);
2149 		if (error)
2150 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2151 		else {
2152 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2153 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2154 			else if (isp->isp_path != NULL)
2155 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2156 			ccb->ccb_h.status = CAM_REQ_CMP;
2157 		}
2158 		xpt_done(ccb);
2159 		break;
2160 
2161 	case XPT_TERM_IO:		/* Terminate the I/O process */
2162 		ccb->ccb_h.status = CAM_REQ_INVALID;
2163 		xpt_done(ccb);
2164 		break;
2165 
2166 	case XPT_PATH_INQ:		/* Path routing inquiry */
2167 	{
2168 		struct ccb_pathinq *cpi = &ccb->cpi;
2169 
2170 		cpi->version_num = 1;
2171 #ifdef	ISP_TARGET_MODE
2172 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2173 #else
2174 		cpi->target_sprt = 0;
2175 #endif
2176 		cpi->hba_eng_cnt = 0;
2177 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2178 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2179 		cpi->bus_id = cam_sim_bus(sim);
2180 		if (IS_FC(isp)) {
2181 			cpi->hba_misc = PIM_NOBUSRESET;
2182 			/*
2183 			 * Because our loop ID can shift from time to time,
2184 			 * make our initiator ID out of range of our bus.
2185 			 */
2186 			cpi->initiator_id = cpi->max_target + 1;
2187 
2188 			/*
2189 			 * Set base transfer capabilities for Fibre Channel.
2190 			 * Technically not correct because we don't know
2191 			 * what media we're running on top of- but we'll
2192 			 * look good if we always say 100MB/s.
2193 			 */
2194 			cpi->base_transfer_speed = 100000;
2195 			cpi->hba_inquiry = PI_TAG_ABLE;
2196 		} else {
2197 			sdparam *sdp = isp->isp_param;
2198 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2199 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2200 			cpi->hba_misc = 0;
2201 			cpi->initiator_id = sdp->isp_initiator_id;
2202 			cpi->base_transfer_speed = 3300;
2203 		}
2204 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2205 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2206 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2207 		cpi->unit_number = cam_sim_unit(sim);
2208 		cpi->ccb_h.status = CAM_REQ_CMP;
2209 		xpt_done(ccb);
2210 		break;
2211 	}
2212 	default:
2213 		ccb->ccb_h.status = CAM_REQ_INVALID;
2214 		xpt_done(ccb);
2215 		break;
2216 	}
2217 }
2218 
2219 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2220 void
2221 isp_done(struct ccb_scsiio *sccb)
2222 {
2223 	struct ispsoftc *isp = XS_ISP(sccb);
2224 
2225 	if (XS_NOERR(sccb))
2226 		XS_SETERR(sccb, CAM_REQ_CMP);
2227 
2228 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2229 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2230 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2231 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2232 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2233 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2234 		} else {
2235 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2236 		}
2237 	}
2238 
2239 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2240 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2241 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2242 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2243 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2244 			if (sccb->scsi_status != SCSI_STATUS_OK)
2245 				isp_prt(isp, ISP_LOGDEBUG2,
2246 				    "freeze devq %d.%d %x %x",
2247 				    sccb->ccb_h.target_id,
2248 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
2249 				    sccb->scsi_status);
2250 		}
2251 	}
2252 
2253 	/*
2254 	 * If we were frozen waiting resources, clear that we were frozen
2255 	 * waiting for resources. If we are no longer frozen, and the devq
2256 	 * isn't frozen, mark the completing CCB to have the XPT layer
2257 	 * release the simq.
2258 	 */
2259 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
2260 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
2261 		if (isp->isp_osinfo.simqfrozen == 0) {
2262 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2263 				isp_prt(isp, ISP_LOGDEBUG2,
2264 				    "isp_done->relsimq");
2265 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2266 			} else {
2267 				isp_prt(isp, ISP_LOGDEBUG2,
2268 				    "isp_done->devq frozen");
2269 			}
2270 		} else {
2271 			isp_prt(isp, ISP_LOGDEBUG2,
2272 			    "isp_done -> simqfrozen = %x",
2273 			    isp->isp_osinfo.simqfrozen);
2274 		}
2275 	}
2276 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2277 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2278 		xpt_print_path(sccb->ccb_h.path);
2279 		isp_prt(isp, ISP_LOGINFO,
2280 		    "cam completion status 0x%x", sccb->ccb_h.status);
2281 	}
2282 
2283 	XS_CMD_S_DONE(sccb);
2284 	if (XS_CMD_WDOG_P(sccb) == 0) {
2285 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2286 		if (XS_CMD_GRACE_P(sccb)) {
2287 			isp_prt(isp, ISP_LOGDEBUG2,
2288 			    "finished command on borrowed time");
2289 		}
2290 		XS_CMD_S_CLEAR(sccb);
2291 		ISPLOCK_2_CAMLOCK(isp);
2292 		xpt_done((union ccb *) sccb);
2293 		CAMLOCK_2_ISPLOCK(isp);
2294 	}
2295 }
2296 
2297 int
2298 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2299 {
2300 	int bus, rv = 0;
2301 	switch (cmd) {
2302 	case ISPASYNC_NEW_TGT_PARAMS:
2303 	{
2304 		int flags, tgt;
2305 		sdparam *sdp = isp->isp_param;
2306 		struct ccb_trans_settings neg;
2307 		struct cam_path *tmppath;
2308 
2309 		tgt = *((int *)arg);
2310 		bus = (tgt >> 16) & 0xffff;
2311 		tgt &= 0xffff;
2312 		sdp += bus;
2313 		if (xpt_create_path(&tmppath, NULL,
2314 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2315 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2316 			isp_prt(isp, ISP_LOGWARN,
2317 			    "isp_async cannot make temp path for %d.%d",
2318 			    tgt, bus);
2319 			rv = -1;
2320 			break;
2321 		}
2322 		flags = sdp->isp_devparam[tgt].cur_dflags;
2323 		neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2324 		if (flags & DPARM_DISC) {
2325 			neg.flags |= CCB_TRANS_DISC_ENB;
2326 		}
2327 		if (flags & DPARM_TQING) {
2328 			neg.flags |= CCB_TRANS_TAG_ENB;
2329 		}
2330 		neg.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2331 		neg.bus_width = (flags & DPARM_WIDE)?
2332 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2333 		neg.sync_period = sdp->isp_devparam[tgt].cur_period;
2334 		neg.sync_offset = sdp->isp_devparam[tgt].cur_offset;
2335 		if (flags & DPARM_SYNC) {
2336 			neg.valid |=
2337 			    CCB_TRANS_SYNC_RATE_VALID |
2338 			    CCB_TRANS_SYNC_OFFSET_VALID;
2339 		}
2340 		isp_prt(isp, ISP_LOGDEBUG2,
2341 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2342 		    bus, tgt, neg.sync_period, neg.sync_offset, flags);
2343 		xpt_setup_ccb(&neg.ccb_h, tmppath, 1);
2344 		ISPLOCK_2_CAMLOCK(isp);
2345 		xpt_async(AC_TRANSFER_NEG, tmppath, &neg);
2346 		CAMLOCK_2_ISPLOCK(isp);
2347 		xpt_free_path(tmppath);
2348 		break;
2349 	}
2350 	case ISPASYNC_BUS_RESET:
2351 		bus = *((int *)arg);
2352 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2353 		    bus);
2354 		if (bus > 0 && isp->isp_path2) {
2355 			ISPLOCK_2_CAMLOCK(isp);
2356 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2357 			CAMLOCK_2_ISPLOCK(isp);
2358 		} else if (isp->isp_path) {
2359 			ISPLOCK_2_CAMLOCK(isp);
2360 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2361 			CAMLOCK_2_ISPLOCK(isp);
2362 		}
2363 		break;
2364 	case ISPASYNC_LIP:
2365 		if (isp->isp_path) {
2366 			if (isp->isp_osinfo.simqfrozen == 0) {
2367 				isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq");
2368 				ISPLOCK_2_CAMLOCK(isp);
2369 				xpt_freeze_simq(isp->isp_sim, 1);
2370 				CAMLOCK_2_ISPLOCK(isp);
2371 			}
2372 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2373 		}
2374 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2375 		break;
2376 	case ISPASYNC_LOOP_RESET:
2377 		if (isp->isp_path) {
2378 			if (isp->isp_osinfo.simqfrozen == 0) {
2379 				isp_prt(isp, ISP_LOGDEBUG0,
2380 				    "Loop Reset freeze simq");
2381 				ISPLOCK_2_CAMLOCK(isp);
2382 				xpt_freeze_simq(isp->isp_sim, 1);
2383 				CAMLOCK_2_ISPLOCK(isp);
2384 			}
2385 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2386 		}
2387 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2388 		break;
2389 	case ISPASYNC_LOOP_DOWN:
2390 		if (isp->isp_path) {
2391 			if (isp->isp_osinfo.simqfrozen == 0) {
2392 				isp_prt(isp, ISP_LOGDEBUG0,
2393 				    "loop down freeze simq");
2394 				ISPLOCK_2_CAMLOCK(isp);
2395 				xpt_freeze_simq(isp->isp_sim, 1);
2396 				CAMLOCK_2_ISPLOCK(isp);
2397 			}
2398 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2399 		}
2400 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2401 		break;
2402 	case ISPASYNC_LOOP_UP:
2403 		/*
2404 		 * Now we just note that Loop has come up. We don't
2405 		 * actually do anything because we're waiting for a
2406 		 * Change Notify before activating the FC cleanup
2407 		 * thread to look at the state of the loop again.
2408 		 */
2409 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2410 		break;
2411 	case ISPASYNC_PROMENADE:
2412 	{
2413 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2414 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2415 		static const char *roles[4] = {
2416 		    "(none)", "Target", "Initiator", "Target/Initiator"
2417 		};
2418 		fcparam *fcp = isp->isp_param;
2419 		int tgt = *((int *) arg);
2420 		struct lportdb *lp = &fcp->portdb[tgt];
2421 
2422 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2423 		    roles[lp->roles & 0x3],
2424 		    (lp->valid)? "Arrived" : "Departed",
2425 		    (u_int32_t) (lp->port_wwn >> 32),
2426 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2427 		    (u_int32_t) (lp->node_wwn >> 32),
2428 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2429 		break;
2430 	}
2431 	case ISPASYNC_CHANGE_NOTIFY:
2432 		if (arg == (void *) 1) {
2433 			isp_prt(isp, ISP_LOGINFO,
2434 			    "Name Server Database Changed");
2435 		} else {
2436 			isp_prt(isp, ISP_LOGINFO,
2437 			    "Name Server Database Changed");
2438 		}
2439 #ifdef	ISP_SMPLOCK
2440 		cv_signal(&isp->isp_osinfo.kthread_cv);
2441 #else
2442 		wakeup(&isp->isp_osinfo.kthread_cv);
2443 #endif
2444 		break;
2445 	case ISPASYNC_FABRIC_DEV:
2446 	{
2447 		int target, lrange;
2448 		struct lportdb *lp = NULL;
2449 		char *pt;
2450 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2451 		u_int32_t portid;
2452 		u_int64_t wwpn, wwnn;
2453 		fcparam *fcp = isp->isp_param;
2454 
2455 		portid =
2456 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2457 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2458 		    (((u_int32_t) resp->snscb_port_id[2]));
2459 
2460 		wwpn =
2461 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2462 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2463 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2464 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2465 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2466 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2467 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2468 		    (((u_int64_t)resp->snscb_portname[7]));
2469 
2470 		wwnn =
2471 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2472 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2473 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2474 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2475 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2476 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2477 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2478 		    (((u_int64_t)resp->snscb_nodename[7]));
2479 		if (portid == 0 || wwpn == 0) {
2480 			break;
2481 		}
2482 
2483 		switch (resp->snscb_port_type) {
2484 		case 1:
2485 			pt = "   N_Port";
2486 			break;
2487 		case 2:
2488 			pt = "  NL_Port";
2489 			break;
2490 		case 3:
2491 			pt = "F/NL_Port";
2492 			break;
2493 		case 0x7f:
2494 			pt = "  Nx_Port";
2495 			break;
2496 		case 0x81:
2497 			pt = "  F_port";
2498 			break;
2499 		case 0x82:
2500 			pt = "  FL_Port";
2501 			break;
2502 		case 0x84:
2503 			pt = "   E_port";
2504 			break;
2505 		default:
2506 			pt = "?";
2507 			break;
2508 		}
2509 		isp_prt(isp, ISP_LOGINFO,
2510 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2511 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2512 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2513 		/*
2514 		 * We're only interested in SCSI_FCP types (for now)
2515 		 */
2516 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
2517 			break;
2518 		}
2519 		if (fcp->isp_topo != TOPO_F_PORT)
2520 			lrange = FC_SNS_ID+1;
2521 		else
2522 			lrange = 0;
2523 		/*
2524 		 * Is it already in our list?
2525 		 */
2526 		for (target = lrange; target < MAX_FC_TARG; target++) {
2527 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2528 				continue;
2529 			}
2530 			lp = &fcp->portdb[target];
2531 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
2532 				lp->fabric_dev = 1;
2533 				break;
2534 			}
2535 		}
2536 		if (target < MAX_FC_TARG) {
2537 			break;
2538 		}
2539 		for (target = lrange; target < MAX_FC_TARG; target++) {
2540 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2541 				continue;
2542 			}
2543 			lp = &fcp->portdb[target];
2544 			if (lp->port_wwn == 0) {
2545 				break;
2546 			}
2547 		}
2548 		if (target == MAX_FC_TARG) {
2549 			isp_prt(isp, ISP_LOGWARN,
2550 			    "no more space for fabric devices");
2551 			break;
2552 		}
2553 		lp->node_wwn = wwnn;
2554 		lp->port_wwn = wwpn;
2555 		lp->portid = portid;
2556 		lp->fabric_dev = 1;
2557 		break;
2558 	}
2559 #ifdef	ISP_TARGET_MODE
2560 	case ISPASYNC_TARGET_MESSAGE:
2561 	{
2562 		tmd_msg_t *mp = arg;
2563 		isp_prt(isp, ISP_LOGDEBUG2,
2564 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2565 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2566 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2567 		    mp->nt_msg[0]);
2568 		break;
2569 	}
2570 	case ISPASYNC_TARGET_EVENT:
2571 	{
2572 		tmd_event_t *ep = arg;
2573 		isp_prt(isp, ISP_LOGDEBUG2,
2574 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2575 		break;
2576 	}
2577 	case ISPASYNC_TARGET_ACTION:
2578 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2579 		default:
2580 			isp_prt(isp, ISP_LOGWARN,
2581 			   "event 0x%x for unhandled target action",
2582 			    ((isphdr_t *)arg)->rqs_entry_type);
2583 			break;
2584 		case RQSTYPE_ATIO:
2585 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2586 			break;
2587 		case RQSTYPE_ATIO2:
2588 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2589 			break;
2590 		case RQSTYPE_CTIO2:
2591 		case RQSTYPE_CTIO:
2592 			rv = isp_handle_platform_ctio(isp, arg);
2593 			break;
2594 		case RQSTYPE_ENABLE_LUN:
2595 		case RQSTYPE_MODIFY_LUN:
2596 			isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status);
2597 			break;
2598 		}
2599 		break;
2600 #endif
2601 	default:
2602 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2603 		rv = -1;
2604 		break;
2605 	}
2606 	return (rv);
2607 }
2608 
2609 
2610 /*
2611  * Locks are held before coming here.
2612  */
2613 void
2614 isp_uninit(struct ispsoftc *isp)
2615 {
2616 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2617 	DISABLE_INTS(isp);
2618 }
2619 
2620 void
2621 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2622 {
2623 	va_list ap;
2624 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2625 		return;
2626 	}
2627 	printf("%s: ", device_get_nameunit(isp->isp_dev));
2628 	va_start(ap, fmt);
2629 	vprintf(fmt, ap);
2630 	va_end(ap);
2631 	printf("\n");
2632 }
2633