xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 5069714534cba67f1985e6dfe23b145178372b5f)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <machine/stdarg.h>	/* for use by isp_prt below */
30 
31 static void isp_intr_enable(void *);
32 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
33 static void isp_poll(struct cam_sim *);
34 static void isp_relsim(void *);
35 static timeout_t isp_watchdog;
36 static void isp_action(struct cam_sim *, union ccb *);
37 
38 
39 static struct ispsoftc *isplist = NULL;
40 
41 void
42 isp_attach(struct ispsoftc *isp)
43 {
44 	int primary, secondary;
45 	struct ccb_setasync csa;
46 	struct cam_devq *devq;
47 	struct cam_sim *sim;
48 	struct cam_path *path;
49 
50 	/*
51 	 * Establish (in case of 12X0) which bus is the primary.
52 	 */
53 
54 	primary = 0;
55 	secondary = 1;
56 
57 	/*
58 	 * Create the device queue for our SIM(s).
59 	 */
60 	devq = cam_simq_alloc(isp->isp_maxcmds);
61 	if (devq == NULL) {
62 		return;
63 	}
64 
65 	/*
66 	 * Construct our SIM entry.
67 	 */
68 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
69 	    isp->isp_unit, 1, isp->isp_maxcmds, devq);
70 	if (sim == NULL) {
71 		cam_simq_free(devq);
72 		return;
73 	}
74 
75 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
76 	isp->isp_osinfo.ehook.ich_arg = isp;
77 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
78 		isp_prt(isp, ISP_LOGERR,
79 		    "could not establish interrupt enable hook");
80 		cam_sim_free(sim, TRUE);
81 		return;
82 	}
83 
84 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
85 		cam_sim_free(sim, TRUE);
86 		return;
87 	}
88 
89 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
90 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
91 		xpt_bus_deregister(cam_sim_path(sim));
92 		cam_sim_free(sim, TRUE);
93 		return;
94 	}
95 
96 	xpt_setup_ccb(&csa.ccb_h, path, 5);
97 	csa.ccb_h.func_code = XPT_SASYNC_CB;
98 	csa.event_enable = AC_LOST_DEVICE;
99 	csa.callback = isp_cam_async;
100 	csa.callback_arg = sim;
101 	xpt_action((union ccb *)&csa);
102 	isp->isp_sim = sim;
103 	isp->isp_path = path;
104 
105 	/*
106 	 * If we have a second channel, construct SIM entry for that.
107 	 */
108 	if (IS_DUALBUS(isp)) {
109 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
110 		    isp->isp_unit, 1, isp->isp_maxcmds, devq);
111 		if (sim == NULL) {
112 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
113 			xpt_free_path(isp->isp_path);
114 			cam_simq_free(devq);
115 			return;
116 		}
117 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
118 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
119 			xpt_free_path(isp->isp_path);
120 			cam_sim_free(sim, TRUE);
121 			return;
122 		}
123 
124 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
125 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
126 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
127 			xpt_free_path(isp->isp_path);
128 			xpt_bus_deregister(cam_sim_path(sim));
129 			cam_sim_free(sim, TRUE);
130 			return;
131 		}
132 
133 		xpt_setup_ccb(&csa.ccb_h, path, 5);
134 		csa.ccb_h.func_code = XPT_SASYNC_CB;
135 		csa.event_enable = AC_LOST_DEVICE;
136 		csa.callback = isp_cam_async;
137 		csa.callback_arg = sim;
138 		xpt_action((union ccb *)&csa);
139 		isp->isp_sim2 = sim;
140 		isp->isp_path2 = path;
141 	}
142 	isp->isp_state = ISP_RUNSTATE;
143 	ENABLE_INTS(isp);
144 	if (isplist == NULL) {
145 		isplist = isp;
146 	} else {
147 		struct ispsoftc *tmp = isplist;
148 		while (tmp->isp_osinfo.next) {
149 			tmp = tmp->isp_osinfo.next;
150 		}
151 		tmp->isp_osinfo.next = isp;
152 	}
153 }
154 
155 static void
156 isp_intr_enable(void *arg)
157 {
158 	struct ispsoftc *isp = arg;
159 	ENABLE_INTS(isp);
160 	isp->isp_osinfo.intsok = 1;
161 	/* Release our hook so that the boot can continue. */
162 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
163 }
164 
165 /*
166  * Put the target mode functions here, because some are inlines
167  */
168 
169 #ifdef	ISP_TARGET_MODE
170 
171 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t);
172 static __inline int are_any_luns_enabled(struct ispsoftc *);
173 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t);
174 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
175 static __inline int isp_psema_sig_rqe(struct ispsoftc *);
176 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int);
177 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int);
178 static __inline void isp_vsema_rqe(struct ispsoftc *);
179 static cam_status
180 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **);
181 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
182 static void isp_en_lun(struct ispsoftc *, union ccb *);
183 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
184 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
185 static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *);
186 static timeout_t isp_refire_putback_atio;
187 
188 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
189 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
190 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
191 static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *);
192 
193 static __inline int
194 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun)
195 {
196 	tstate_t *tptr;
197 	ISP_LOCK(isp);
198 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
199 		ISP_UNLOCK(isp);
200 		return (0);
201 	}
202 	do {
203 		if (tptr->lun == (lun_id_t) lun) {
204 			ISP_UNLOCK(isp);
205 			return (1);
206 		}
207 	} while ((tptr = tptr->next) != NULL);
208 	ISP_UNLOCK(isp);
209 	return (0);
210 }
211 
212 static __inline int
213 are_any_luns_enabled(struct ispsoftc *isp)
214 {
215 	int i;
216 	for (i = 0; i < LUN_HASH_SIZE; i++) {
217 		if (isp->isp_osinfo.lun_hash[i]) {
218 			return (1);
219 		}
220 	}
221 	return (0);
222 }
223 
224 static __inline tstate_t *
225 get_lun_statep(struct ispsoftc *isp, lun_id_t lun)
226 {
227 	tstate_t *tptr;
228 
229 	ISP_LOCK(isp);
230 	if (lun == CAM_LUN_WILDCARD) {
231 		tptr = &isp->isp_osinfo.tsdflt;
232 		tptr->hold++;
233 		ISP_UNLOCK(isp);
234 		return (tptr);
235 	} else {
236 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)];
237 	}
238 	if (tptr == NULL) {
239 		ISP_UNLOCK(isp);
240 		return (NULL);
241 	}
242 
243 	do {
244 		if (tptr->lun == lun) {
245 			tptr->hold++;
246 			ISP_UNLOCK(isp);
247 			return (tptr);
248 		}
249 	} while ((tptr = tptr->next) != NULL);
250 	ISP_UNLOCK(isp);
251 	return (tptr);
252 }
253 
254 static __inline void
255 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
256 {
257 	if (tptr->hold)
258 		tptr->hold--;
259 }
260 
261 static __inline int
262 isp_psema_sig_rqe(struct ispsoftc *isp)
263 {
264 	ISP_LOCK(isp);
265 	while (isp->isp_osinfo.tmflags & TM_BUSY) {
266 		isp->isp_osinfo.tmflags |= TM_WANTED;
267 		if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) {
268 			ISP_UNLOCK(isp);
269 			return (-1);
270 		}
271 		isp->isp_osinfo.tmflags |= TM_BUSY;
272 	}
273 	ISP_UNLOCK(isp);
274 	return (0);
275 }
276 
277 static __inline int
278 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo)
279 {
280 	ISP_LOCK(isp);
281 	if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) {
282 		ISP_UNLOCK(isp);
283 		return (-1);
284 	}
285 	ISP_UNLOCK(isp);
286 	return (0);
287 }
288 
289 static __inline void
290 isp_cv_signal_rqe(struct ispsoftc *isp, int status)
291 {
292 	isp->isp_osinfo.rstatus = status;
293 	wakeup(&isp->isp_osinfo.rstatus);
294 }
295 
296 static __inline void
297 isp_vsema_rqe(struct ispsoftc *isp)
298 {
299 	ISP_LOCK(isp);
300 	if (isp->isp_osinfo.tmflags & TM_WANTED) {
301 		isp->isp_osinfo.tmflags &= ~TM_WANTED;
302 		wakeup(&isp->isp_osinfo.tmflags);
303 	}
304 	isp->isp_osinfo.tmflags &= ~TM_BUSY;
305 	ISP_UNLOCK(isp);
306 }
307 
308 static cam_status
309 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt)
310 {
311 	cam_status status;
312 	lun_id_t lun;
313 	tstate_t *tptr, *new;
314 
315 	lun = xpt_path_lun_id(path);
316 	if (lun < 0) {
317 		return (CAM_LUN_INVALID);
318 	}
319 	if (is_lun_enabled(isp, lun)) {
320 		return (CAM_LUN_ALRDY_ENA);
321 	}
322 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
323 	if (new == NULL) {
324 		return (CAM_RESRC_UNAVAIL);
325 	}
326 
327 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
328 	    xpt_path_target_id(path), xpt_path_lun_id(path));
329 	if (status != CAM_REQ_CMP) {
330 		free(new, M_DEVBUF);
331 		return (status);
332 	}
333 	new->lun = lun;
334 	SLIST_INIT(&new->atios);
335 	SLIST_INIT(&new->inots);
336 	new->hold = 1;
337 
338 	ISP_LOCK(isp);
339 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
340 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new;
341 	} else {
342 		while (tptr->next)
343 			tptr = tptr->next;
344 		tptr->next = new;
345 	}
346 	ISP_UNLOCK(isp);
347 	*rslt = new;
348 	return (CAM_REQ_CMP);
349 }
350 
351 static __inline void
352 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
353 {
354 	tstate_t *lw, *pw;
355 
356 	ISP_LOCK(isp);
357 	if (tptr->hold) {
358 		ISP_UNLOCK(isp);
359 		return;
360 	}
361 	pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)];
362 	if (pw == NULL) {
363 		ISP_UNLOCK(isp);
364 		return;
365 	} else if (pw->lun == tptr->lun) {
366 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next;
367 	} else {
368 		lw = pw;
369 		pw = lw->next;
370 		while (pw) {
371 			if (pw->lun == tptr->lun) {
372 				lw->next = pw->next;
373 				break;
374 			}
375 			lw = pw;
376 			pw = pw->next;
377 		}
378 		if (pw == NULL) {
379 			ISP_UNLOCK(isp);
380 			return;
381 		}
382 	}
383 	free(tptr, M_DEVBUF);
384 	ISP_UNLOCK(isp);
385 }
386 
387 static void
388 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
389 {
390 	const char *lfmt = "Lun now %sabled for target mode\n";
391 	struct ccb_en_lun *cel = &ccb->cel;
392 	tstate_t *tptr;
393 	u_int16_t rstat;
394 	int bus, frozen = 0;
395 	lun_id_t lun;
396 	target_id_t tgt;
397 
398 
399 	bus = XS_CHANNEL(ccb);
400 	tgt = ccb->ccb_h.target_id;
401 	lun = ccb->ccb_h.target_lun;
402 
403 	/*
404 	 * Do some sanity checking first.
405 	 */
406 
407 	if (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns) {
408 		ccb->ccb_h.status = CAM_LUN_INVALID;
409 		return;
410 	}
411 	if (IS_SCSI(isp)) {
412 		if (tgt != CAM_TARGET_WILDCARD &&
413 		    tgt != ((sdparam *) isp->isp_param)->isp_initiator_id) {
414 			ccb->ccb_h.status = CAM_TID_INVALID;
415 			return;
416 		}
417 	} else {
418 		if (tgt != CAM_TARGET_WILDCARD &&
419 		    tgt != ((fcparam *) isp->isp_param)->isp_loopid) {
420 			ccb->ccb_h.status = CAM_TID_INVALID;
421 			return;
422 		}
423 	}
424 
425 	/*
426 	 * If Fibre Channel, stop and drain all activity to this bus.
427 	 */
428 	if (IS_FC(isp)) {
429 		ISP_LOCK(isp);
430 		frozen = 1;
431 		xpt_freeze_simq(isp->isp_sim, 1);
432 		isp->isp_osinfo.drain = 1;
433 		/* ISP_UNLOCK(isp);  XXX NEED CV_WAIT HERE XXX */
434 		while (isp->isp_osinfo.drain) {
435 			tsleep(&isp->isp_osinfo.drain, PRIBIO, "ispdrain", 0);
436 		}
437 		ISP_UNLOCK(isp);
438 	}
439 
440 	/*
441 	 * Check to see if we're enabling on fibre channel and
442 	 * don't yet have a notion of who the heck we are (no
443 	 * loop yet).
444 	 */
445 	if (IS_FC(isp) && cel->enable &&
446 	    (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
447 		int rv= 2 * 1000000;
448 		fcparam *fcp = isp->isp_param;
449 
450 		ISP_LOCK(isp);
451 		rv = isp_control(isp, ISPCTL_FCLINK_TEST, &rv);
452 		ISP_UNLOCK(isp);
453 		if (rv || fcp->isp_fwstate != FW_READY) {
454 			xpt_print_path(ccb->ccb_h.path);
455 			printf("link status not good yet\n");
456 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
457 			if (frozen)
458 				xpt_release_simq(isp->isp_sim, 1);
459 			return;
460 		}
461 		ISP_LOCK(isp);
462 		rv = isp_control(isp, ISPCTL_PDB_SYNC, NULL);
463 		ISP_UNLOCK(isp);
464 		if (rv || fcp->isp_fwstate != FW_READY) {
465 			xpt_print_path(ccb->ccb_h.path);
466 			printf("could not get a good port database read\n");
467 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
468 			if (frozen)
469 				xpt_release_simq(isp->isp_sim, 1);
470 			return;
471 		}
472 	}
473 
474 
475 	/*
476 	 * Next check to see whether this is a target/lun wildcard action.
477 	 *
478 	 * If so, we enable/disable target mode but don't do any lun enabling.
479 	 */
480 	if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) {
481 		int av;
482 		tptr = &isp->isp_osinfo.tsdflt;
483 		if (cel->enable) {
484 			if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) {
485 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
486 				if (frozen)
487 					xpt_release_simq(isp->isp_sim, 1);
488 				return;
489 			}
490 			ccb->ccb_h.status =
491 			    xpt_create_path(&tptr->owner, NULL,
492 			    xpt_path_path_id(ccb->ccb_h.path),
493 			    xpt_path_target_id(ccb->ccb_h.path),
494 			    xpt_path_lun_id(ccb->ccb_h.path));
495 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
496 				if (frozen)
497 					xpt_release_simq(isp->isp_sim, 1);
498 				return;
499 			}
500 			SLIST_INIT(&tptr->atios);
501 			SLIST_INIT(&tptr->inots);
502 			av = 1;
503 			ISP_LOCK(isp);
504 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
505 			if (av) {
506 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
507 				xpt_free_path(tptr->owner);
508 				ISP_UNLOCK(isp);
509 				if (frozen)
510 					xpt_release_simq(isp->isp_sim, 1);
511 				return;
512 			}
513 			isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED;
514 			ISP_UNLOCK(isp);
515 		} else {
516 			if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
517 				ccb->ccb_h.status = CAM_LUN_INVALID;
518 				if (frozen)
519 					xpt_release_simq(isp->isp_sim, 1);
520 				return;
521 			}
522 			if (are_any_luns_enabled(isp)) {
523 				ccb->ccb_h.status = CAM_SCSI_BUSY;
524 				if (frozen)
525 					xpt_release_simq(isp->isp_sim, 1);
526 				return;
527 			}
528 			av = 0;
529 			ISP_LOCK(isp);
530 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
531 			if (av) {
532 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
533 				ISP_UNLOCK(isp);
534 				if (frozen)
535 					xpt_release_simq(isp->isp_sim, 1);
536 				return;
537 			}
538 			isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED;
539 			ISP_UNLOCK(isp);
540 			ccb->ccb_h.status = CAM_REQ_CMP;
541 		}
542 		xpt_print_path(ccb->ccb_h.path);
543 		printf(lfmt, (cel->enable) ? "en" : "dis");
544 		if (frozen)
545 			xpt_release_simq(isp->isp_sim, 1);
546 		return;
547 	}
548 
549 	/*
550 	 * We can move along now...
551 	 */
552 
553 	if (frozen)
554 		xpt_release_simq(isp->isp_sim, 1);
555 
556 
557 	if (cel->enable) {
558 		ccb->ccb_h.status =
559 		    create_lun_state(isp, ccb->ccb_h.path, &tptr);
560 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
561 			return;
562 		}
563 	} else {
564 		tptr = get_lun_statep(isp, lun);
565 		if (tptr == NULL) {
566 			ccb->ccb_h.status = CAM_LUN_INVALID;
567 			return;
568 		}
569 	}
570 
571 	if (isp_psema_sig_rqe(isp)) {
572 		rls_lun_statep(isp, tptr);
573 		if (cel->enable)
574 			destroy_lun_state(isp, tptr);
575 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
576 		return;
577 	}
578 
579 	ISP_LOCK(isp);
580 	if (cel->enable) {
581 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
582 		rstat = LUN_ERR;
583 		if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
584 			xpt_print_path(ccb->ccb_h.path);
585 			printf("isp_lun_cmd failed\n");
586 			goto out;
587 		}
588 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
589 			xpt_print_path(ccb->ccb_h.path);
590 			printf("wait for ENABLE LUN timed out\n");
591 			goto out;
592 		}
593 		rstat = isp->isp_osinfo.rstatus;
594 		if (rstat != LUN_OK) {
595 			xpt_print_path(ccb->ccb_h.path);
596 			printf("ENABLE LUN returned 0x%x\n", rstat);
597 			goto out;
598 		}
599 	} else {
600 		u_int32_t seq;
601 
602 		seq = isp->isp_osinfo.rollinfo++;
603 		rstat = LUN_ERR;
604 
605 		if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) {
606 			xpt_print_path(ccb->ccb_h.path);
607 			printf("isp_lun_cmd failed\n");
608 			goto out;
609 		}
610 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
611 			xpt_print_path(ccb->ccb_h.path);
612 			printf("wait for MODIFY LUN timed out\n");
613 			goto out;
614 		}
615 		rstat = isp->isp_osinfo.rstatus;
616 		if (rstat != LUN_OK) {
617 			xpt_print_path(ccb->ccb_h.path);
618 			printf("MODIFY LUN returned 0x%x\n", rstat);
619 			goto out;
620 		}
621 		rstat = LUN_ERR;
622 		seq = isp->isp_osinfo.rollinfo++;
623 
624 		if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
625 			xpt_print_path(ccb->ccb_h.path);
626 			printf("isp_lun_cmd failed\n");
627 			goto out;
628 		}
629 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
630 			xpt_print_path(ccb->ccb_h.path);
631 			printf("wait for ENABLE LUN timed out\n");
632 			goto out;
633 		}
634 		rstat = isp->isp_osinfo.rstatus;
635 		if (rstat != LUN_OK) {
636 			xpt_print_path(ccb->ccb_h.path);
637 			printf("ENABLE LUN returned 0x%x\n", rstat);
638 			goto out;
639 		}
640 	}
641 out:
642 	isp_vsema_rqe(isp);
643 	ISP_UNLOCK(isp);
644 
645 	if (rstat != LUN_OK) {
646 		xpt_print_path(ccb->ccb_h.path);
647 		printf("lun %sable failed\n", (cel->enable) ? "en" : "dis");
648 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
649 		rls_lun_statep(isp, tptr);
650 		if (cel->enable)
651 			destroy_lun_state(isp, tptr);
652 	} else {
653 		xpt_print_path(ccb->ccb_h.path);
654 		printf(lfmt, (cel->enable) ? "en" : "dis");
655 		rls_lun_statep(isp, tptr);
656 		if (cel->enable == 0) {
657 			destroy_lun_state(isp, tptr);
658 		}
659 		ccb->ccb_h.status = CAM_REQ_CMP;
660 	}
661 }
662 
663 static cam_status
664 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
665 {
666 	tstate_t *tptr;
667 	struct ccb_hdr_slist *lp;
668 	struct ccb_hdr *curelm;
669 	int found;
670 	union ccb *accb = ccb->cab.abort_ccb;
671 
672 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
673 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
674 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
675 			return (CAM_PATH_INVALID);
676 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
677 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
678 			return (CAM_PATH_INVALID);
679 		}
680 	}
681 	tptr = get_lun_statep(isp, accb->ccb_h.target_lun);
682 	if (tptr == NULL) {
683 		return (CAM_PATH_INVALID);
684 	}
685 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
686 		lp = &tptr->atios;
687 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
688 		lp = &tptr->inots;
689 	} else {
690 		rls_lun_statep(isp, tptr);
691 		return (CAM_UA_ABORT);
692 	}
693 	curelm = SLIST_FIRST(lp);
694 	found = 0;
695 	if (curelm == &accb->ccb_h) {
696 		found = 1;
697 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
698 	} else {
699 		while(curelm != NULL) {
700 			struct ccb_hdr *nextelm;
701 
702 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
703 			if (nextelm == &accb->ccb_h) {
704 				found = 1;
705 				SLIST_NEXT(curelm, sim_links.sle) =
706 				    SLIST_NEXT(nextelm, sim_links.sle);
707 				break;
708 			}
709 			curelm = nextelm;
710 		}
711 	}
712 	rls_lun_statep(isp, tptr);
713 	if (found) {
714 		accb->ccb_h.status = CAM_REQ_ABORTED;
715 		return (CAM_REQ_CMP);
716 	}
717 	return(CAM_PATH_INVALID);
718 }
719 
720 static cam_status
721 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
722 {
723 	void *qe;
724 	struct ccb_scsiio *cso = &ccb->csio;
725 	u_int32_t *hp, save_handle;
726 	u_int16_t iptr, optr;
727 
728 
729 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
730 		xpt_print_path(ccb->ccb_h.path);
731 		printf("Request Queue Overflow in isp_target_start_ctio\n");
732 		return (CAM_RESRC_UNAVAIL);
733 	}
734 	bzero(qe, QENTRY_LEN);
735 
736 	/*
737 	 * We're either moving data or completing a command here.
738 	 */
739 
740 	if (IS_FC(isp)) {
741 		struct ccb_accept_tio *atiop;
742 		ct2_entry_t *cto = qe;
743 
744 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
745 		cto->ct_header.rqs_entry_count = 1;
746 		cto->ct_iid = cso->init_id;
747 		if (isp->isp_maxluns <= 16) {
748 			cto->ct_lun = ccb->ccb_h.target_lun;
749 		}
750 		/*
751 		 * Start with a residual based on what the original datalength
752 		 * was supposed to be. Basically, we ignore what CAM has set
753 		 * for residuals. The data transfer routines will knock off
754 		 * the residual for each byte actually moved- and also will
755 		 * be responsible for setting the underrun flag.
756 		 */
757 		/* HACK! HACK! */
758 		if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) {
759 			cto->ct_resid = atiop->ccb_h.spriv_field0;
760 		}
761 
762 		/*
763 		 * We always have to use the tag_id- it has the RX_ID
764 		 * for this exchage.
765 		 */
766 		cto->ct_rxid = cso->tag_id;
767 		if (cso->dxfer_len == 0) {
768 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
769 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
770 				cto->ct_flags |= CT2_SENDSTATUS;
771 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
772 			}
773 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
774 				int m = min(cso->sense_len, MAXRESPLEN);
775 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
776 				cto->rsp.m1.ct_senselen = m;
777 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
778 			}
779 		} else {
780 			cto->ct_flags |= CT2_FLAG_MODE0;
781 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
782 				cto->ct_flags |= CT2_DATA_IN;
783 			} else {
784 				cto->ct_flags |= CT2_DATA_OUT;
785 			}
786 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
787 				cto->ct_flags |= CT2_SENDSTATUS;
788 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
789 			}
790 			/*
791 			 * If we're sending data and status back together,
792 			 * we can't also send back sense data as well.
793 			 */
794 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
795 		}
796 		if (cto->ct_flags & CAM_SEND_STATUS) {
797 			isp_prt(isp, ISP_LOGTDEBUG2,
798 			    "CTIO2 RX_ID 0x%x SCSI STATUS 0x%x datalength %u",
799 			    cto->ct_rxid, cso->scsi_status, cto->ct_resid);
800 		}
801 		hp = &cto->ct_reserved;
802 	} else {
803 		ct_entry_t *cto = qe;
804 
805 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
806 		cto->ct_header.rqs_entry_count = 1;
807 		cto->ct_iid = cso->init_id;
808 		cto->ct_tgt = ccb->ccb_h.target_id;
809 		cto->ct_lun = ccb->ccb_h.target_lun;
810 		if (cso->tag_id && cso->tag_action) {
811 			/*
812 			 * We don't specify a tag type for regular SCSI.
813 			 * Just the tag value and set the flag.
814 			 */
815 			cto->ct_tag_val = cso->tag_id;
816 			cto->ct_flags |= CT_TQAE;
817 		}
818 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
819 			cto->ct_flags |= CT_NODISC;
820 		}
821 		if (cso->dxfer_len == 0) {
822 			cto->ct_flags |= CT_NO_DATA;
823 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
824 			cto->ct_flags |= CT_DATA_IN;
825 		} else {
826 			cto->ct_flags |= CT_DATA_OUT;
827 		}
828 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
829 			cto->ct_flags |= CT_SENDSTATUS;
830 			cto->ct_scsi_status = cso->scsi_status;
831 			cto->ct_resid = cso->resid;
832 		}
833 		if (cto->ct_flags & CAM_SEND_STATUS) {
834 			isp_prt(isp, ISP_LOGTDEBUG2,
835 			    "CTIO SCSI STATUS 0x%x resid %d",
836 			    cso->scsi_status, cso->resid);
837 		}
838 		hp = &cto->ct_reserved;
839 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
840 	}
841 
842 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
843 		xpt_print_path(ccb->ccb_h.path);
844 		printf("No XFLIST pointers for isp_target_start_ctio\n");
845 		return (CAM_RESRC_UNAVAIL);
846 	}
847 
848 
849 	/*
850 	 * Call the dma setup routines for this entry (and any subsequent
851 	 * CTIOs) if there's data to move, and then tell the f/w it's got
852 	 * new things to play with. As with isp_start's usage of DMA setup,
853 	 * any swizzling is done in the machine dependent layer. Because
854 	 * of this, we put the request onto the queue area first in native
855 	 * format.
856 	 */
857 
858 	save_handle = *hp;
859 	switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) {
860 	case CMD_QUEUED:
861 		ISP_ADD_REQUEST(isp, iptr);
862 		return (CAM_REQ_INPROG);
863 
864 	case CMD_EAGAIN:
865 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
866 		isp_destroy_handle(isp, save_handle);
867 		return (CAM_RESRC_UNAVAIL);
868 
869 	default:
870 		isp_destroy_handle(isp, save_handle);
871 		return (XS_ERR(ccb));
872 	}
873 }
874 
875 static cam_status
876 isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb)
877 {
878 	void *qe;
879 	struct ccb_accept_tio *atiop;
880 	u_int16_t iptr, optr;
881 
882 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
883 		xpt_print_path(ccb->ccb_h.path);
884 		printf("Request Queue Overflow in isp_target_putback_atio\n");
885 		return (CAM_RESRC_UNAVAIL);
886 	}
887 	bzero(qe, QENTRY_LEN);
888 	atiop = (struct ccb_accept_tio *) ccb;
889 	if (IS_FC(isp)) {
890 		at2_entry_t *at = qe;
891 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
892 		at->at_header.rqs_entry_count = 1;
893 		if (isp->isp_maxluns > 16) {
894 			at->at_scclun = (uint16_t) atiop->ccb_h.target_lun;
895 		} else {
896 			at->at_lun = (uint8_t) atiop->ccb_h.target_lun;
897 		}
898 		at->at_status = CT_OK;
899 		at->at_rxid = atiop->tag_id;
900 		ISP_SWIZ_ATIO2(isp, qe, qe);
901 	} else {
902 		at_entry_t *at = qe;
903 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
904 		at->at_header.rqs_entry_count = 1;
905 		at->at_iid = atiop->init_id;
906 		at->at_tgt = atiop->ccb_h.target_id;
907 		at->at_lun = atiop->ccb_h.target_lun;
908 		at->at_status = CT_OK;
909 		if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) {
910 			at->at_tag_type = atiop->tag_action;
911 		}
912 		at->at_tag_val = atiop->tag_id;
913 		ISP_SWIZ_ATIO(isp, qe, qe);
914 	}
915 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
916 	ISP_ADD_REQUEST(isp, iptr);
917 	return (CAM_REQ_CMP);
918 }
919 
920 static void
921 isp_refire_putback_atio(void *arg)
922 {
923 	union ccb *ccb = arg;
924 	int s = splcam();
925 	if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) {
926 		(void) timeout(isp_refire_putback_atio, ccb, 10);
927 	} else {
928 		isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb);
929 	}
930 	splx(s);
931 }
932 
933 /*
934  * Handle ATIO stuff that the generic code can't.
935  * This means handling CDBs.
936  */
937 
938 static int
939 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
940 {
941 	tstate_t *tptr;
942 	int status;
943 	struct ccb_accept_tio *atiop;
944 
945 	/*
946 	 * The firmware status (except for the QLTM_SVALID bit)
947 	 * indicates why this ATIO was sent to us.
948 	 *
949 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
950 	 *
951 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
952 	 * we're still connected on the SCSI bus - i.e. the initiator
953 	 * did not set DiscPriv in the identify message. We don't care
954 	 * about this so it's ignored.
955 	 */
956 	status = aep->at_status;
957 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
958 		/*
959 		 * Bus Phase Sequence error. We should have sense data
960 		 * suggested by the f/w. I'm not sure quite yet what
961 		 * to do about this for CAM.
962 		 */
963 		printf("%s: PHASE ERROR\n", isp->isp_name);
964 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
965 		return (0);
966 	}
967 	if ((status & ~QLTM_SVALID) != AT_CDB) {
968 		printf("%s: bogus atio (0x%x) leaked to platform\n",
969 		    isp->isp_name, status);
970 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
971 		return (0);
972 	}
973 
974 	tptr = get_lun_statep(isp, aep->at_lun);
975 	if (tptr == NULL) {
976 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
977 	}
978 
979 	if (tptr == NULL) {
980 		/*
981 		 * Because we can't autofeed sense data back with
982 		 * a command for parallel SCSI, we can't give back
983 		 * a CHECK CONDITION. We'll give back a BUSY status
984 		 * instead. This works out okay because the only
985 		 * time we should, in fact, get this, is in the
986 		 * case that somebody configured us without the
987 		 * blackhole driver, so they get what they deserve.
988 		 */
989 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
990 		return (0);
991 	}
992 
993 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
994 	if (atiop == NULL) {
995 		/*
996 		 * Because we can't autofeed sense data back with
997 		 * a command for parallel SCSI, we can't give back
998 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
999 		 * instead. This works out okay because the only time we
1000 		 * should, in fact, get this, is in the case that we've
1001 		 * run out of ATIOS.
1002 		 */
1003 		xpt_print_path(tptr->owner);
1004 		printf("no ATIOS for lun %d from initiator %d\n",
1005 		    aep->at_lun, aep->at_iid);
1006 		rls_lun_statep(isp, tptr);
1007 		if (aep->at_flags & AT_TQAE)
1008 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1009 		else
1010 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1011 		return (0);
1012 	}
1013 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1014 	if (tptr == &isp->isp_osinfo.tsdflt) {
1015 		atiop->ccb_h.target_id = aep->at_tgt;
1016 		atiop->ccb_h.target_lun = aep->at_lun;
1017 	}
1018 	if (aep->at_flags & AT_NODISC) {
1019 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1020 	} else {
1021 		atiop->ccb_h.flags = 0;
1022 	}
1023 
1024 	if (status & QLTM_SVALID) {
1025 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1026 		atiop->sense_len = amt;
1027 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1028 	} else {
1029 		atiop->sense_len = 0;
1030 	}
1031 
1032 	atiop->init_id = aep->at_iid;
1033 	atiop->cdb_len = aep->at_cdblen;
1034 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1035 	atiop->ccb_h.status = CAM_CDB_RECVD;
1036 	atiop->tag_id = aep->at_tag_val;
1037 	if ((atiop->tag_action = aep->at_tag_type) != 0) {
1038 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1039 	}
1040 	xpt_done((union ccb*)atiop);
1041 	isp_prt(isp, ISP_LOGTDEBUG2,
1042 	    "ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s",
1043 	    aep->at_cdb[0] & 0xff, aep->at_iid, aep->at_lun,
1044 	    aep->at_tag_val & 0xff, aep->at_tag_type,
1045 	    (aep->at_flags & AT_NODISC)? "nondisc" : "disconnecting");
1046 	rls_lun_statep(isp, tptr);
1047 	return (0);
1048 }
1049 
1050 static int
1051 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1052 {
1053 	lun_id_t lun;
1054 	tstate_t *tptr;
1055 	struct ccb_accept_tio *atiop;
1056 
1057 	/*
1058 	 * The firmware status (except for the QLTM_SVALID bit)
1059 	 * indicates why this ATIO was sent to us.
1060 	 *
1061 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1062 	 */
1063 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1064 		printf("%s: bogus atio (0x%x) leaked to platform\n",
1065 		    isp->isp_name, aep->at_status);
1066 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1067 		return (0);
1068 	}
1069 
1070 	if (isp->isp_maxluns > 16) {
1071 		lun = aep->at_scclun;
1072 	} else {
1073 		lun = aep->at_lun;
1074 	}
1075 	tptr = get_lun_statep(isp, lun);
1076 	if (tptr == NULL) {
1077 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
1078 	}
1079 
1080 	if (tptr == NULL) {
1081 		/*
1082 		 * What we'd like to know is whether or not we have a listener
1083 		 * upstream that really hasn't configured yet. If we do, then
1084 		 * we can give a more sensible reply here. If not, then we can
1085 		 * reject this out of hand.
1086 		 *
1087 		 * Choices for what to send were
1088 		 *
1089                  *	Not Ready, Unit Not Self-Configured Yet
1090 		 *	(0x2,0x3e,0x00)
1091 		 *
1092 		 * for the former and
1093 		 *
1094 		 *	Illegal Request, Logical Unit Not Supported
1095 		 *	(0x5,0x25,0x00)
1096 		 *
1097 		 * for the latter.
1098 		 *
1099 		 * We used to decide whether there was at least one listener
1100 		 * based upon whether the black hole driver was configured.
1101 		 * However, recent config(8) changes have made this hard to do
1102 		 * at this time.
1103 		 *
1104 		 */
1105 		u_int32_t ccode = SCSI_STATUS_BUSY;
1106 
1107 		/*
1108 		 * Because we can't autofeed sense data back with
1109 		 * a command for parallel SCSI, we can't give back
1110 		 * a CHECK CONDITION. We'll give back a BUSY status
1111 		 * instead. This works out okay because the only
1112 		 * time we should, in fact, get this, is in the
1113 		 * case that somebody configured us without the
1114 		 * blackhole driver, so they get what they deserve.
1115 		 */
1116 		isp_endcmd(isp, aep, ccode, 0);
1117 		return (0);
1118 	}
1119 
1120 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1121 	if (atiop == NULL) {
1122 		/*
1123 		 * Because we can't autofeed sense data back with
1124 		 * a command for parallel SCSI, we can't give back
1125 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1126 		 * instead. This works out okay because the only time we
1127 		 * should, in fact, get this, is in the case that we've
1128 		 * run out of ATIOS.
1129 		 */
1130 		xpt_print_path(tptr->owner);
1131 		printf("no ATIOS for lun %d from initiator %d\n",
1132 		    lun, aep->at_iid);
1133 		rls_lun_statep(isp, tptr);
1134 		if (aep->at_flags & AT_TQAE)
1135 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1136 		else
1137 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1138 		return (0);
1139 	}
1140 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1141 
1142 	if (tptr == &isp->isp_osinfo.tsdflt) {
1143 		atiop->ccb_h.target_id =
1144 			((fcparam *)isp->isp_param)->isp_loopid;
1145 		atiop->ccb_h.target_lun = lun;
1146 	}
1147 	if (aep->at_status & QLTM_SVALID) {
1148 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1149 		atiop->sense_len = amt;
1150 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1151 	} else {
1152 		atiop->sense_len = 0;
1153 	}
1154 
1155 	atiop->init_id = aep->at_iid;
1156 	atiop->cdb_len = ATIO2_CDBLEN;
1157 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1158 	atiop->ccb_h.status = CAM_CDB_RECVD;
1159 	atiop->tag_id = aep->at_rxid;
1160 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1161 	case ATIO2_TC_ATTR_SIMPLEQ:
1162 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1163 		break;
1164         case ATIO2_TC_ATTR_HEADOFQ:
1165 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1166 		break;
1167         case ATIO2_TC_ATTR_ORDERED:
1168 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1169 		break;
1170         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1171 	case ATIO2_TC_ATTR_UNTAGGED:
1172 	default:
1173 		atiop->tag_action = 0;
1174 		break;
1175 	}
1176 	if (atiop->tag_action != 0) {
1177 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1178 	}
1179 
1180 	/*
1181 	 * Preserve overall command datalength in private field.
1182 	 */
1183 	atiop->ccb_h.spriv_field0 = aep->at_datalen;
1184 
1185 	xpt_done((union ccb*)atiop);
1186 	isp_prt(isp, ISP_LOGTDEBUG2,
1187 	    "ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1188 	    aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff, aep->at_iid,
1189 	    lun, aep->at_taskflags, aep->at_datalen);
1190 	rls_lun_statep(isp, tptr);
1191 	return (0);
1192 }
1193 
1194 static int
1195 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1196 {
1197 	union ccb *ccb;
1198 	int sentstatus, ok, notify_cam;
1199 
1200 	/*
1201 	 * CTIO and CTIO2 are close enough....
1202 	 */
1203 
1204 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_reserved);
1205 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1206 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_reserved);
1207 
1208 	if (IS_FC(isp)) {
1209 		ct2_entry_t *ct = arg;
1210 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1211 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1212 		if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1213 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1214 		}
1215 		isp_prt(isp, ISP_LOGTDEBUG2,
1216 		    "CTIO2 RX_ID 0x%x sts 0x%x flg 0x%x sns %d FIN",
1217 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1218 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0);
1219 		notify_cam = ct->ct_header.rqs_seqno;
1220 	} else {
1221 		ct_entry_t *ct = arg;
1222 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1223 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1224 		isp_prt(isp, ISP_LOGTDEBUG2,
1225 		    "CTIO tag 0x%x sts 0x%x flg 0x%x FIN",
1226 		    ct->ct_tag_val, ct->ct_status, ct->ct_flags);
1227 		notify_cam = ct->ct_header.rqs_seqno;
1228 	}
1229 
1230 	/*
1231 	 * We're here either because data transfers are done (and
1232 	 * it's time to send a final status CTIO) or because the final
1233 	 * status CTIO is done. We don't get called for all intermediate
1234 	 * CTIOs that happen for a large data transfer.
1235 	 *
1236 	 * In any case, for this platform, the upper layers figure out
1237 	 * what to do next, so all we do here is collect status and
1238 	 * pass information along. The exception is that we clear
1239 	 * the notion of handling a non-disconnecting command here.
1240 	 */
1241 
1242 	if (sentstatus) {
1243 		/*
1244 		 * Data transfer done. See if all went okay.
1245 		 */
1246 		if (ok) {
1247 			ccb->csio.resid = 0;
1248 		} else {
1249 			ccb->csio.resid = ccb->csio.dxfer_len;
1250 		}
1251 	}
1252 
1253 	if (notify_cam == 0) {
1254 		isp_prt(isp, ISP_LOGTDEBUG1, "Intermediate CTIO done");
1255 		return (0);
1256 	}
1257 	isp_prt(isp, ISP_LOGTDEBUG1, "Final CTIO done");
1258 	if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) {
1259 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1260 	} else {
1261 		isp_handle_platform_ctio_part2(isp, ccb);
1262 	}
1263 	return (0);
1264 }
1265 
1266 static void
1267 isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb)
1268 {
1269 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1270 		ccb->ccb_h.status |= CAM_REQ_CMP;
1271 	}
1272 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1273 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1274 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1275 		if (isp->isp_osinfo.simqfrozen == 0) {
1276 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1277 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1278 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1279 			} else {
1280 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->devqfrozen");
1281 			}
1282 		} else {
1283 			isp_prt(isp, ISP_LOGDEBUG2,
1284 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1285 		}
1286 	}
1287 	xpt_done(ccb);
1288 }
1289 #endif
1290 
1291 static void
1292 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1293 {
1294 	struct cam_sim *sim;
1295 	struct ispsoftc *isp;
1296 
1297 	sim = (struct cam_sim *)cbarg;
1298 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1299 	switch (code) {
1300 	case AC_LOST_DEVICE:
1301 		if (IS_SCSI(isp)) {
1302 			u_int16_t oflags, nflags;
1303 			sdparam *sdp = isp->isp_param;
1304 			int rvf, tgt;
1305 
1306 			tgt = xpt_path_target_id(path);
1307 			rvf = ISP_FW_REVX(isp->isp_fwrev);
1308 			ISP_LOCK(isp);
1309 			sdp += cam_sim_bus(sim);
1310 			isp->isp_update |= (1 << cam_sim_bus(sim));
1311 			nflags = DPARM_SAFE_DFLT;
1312 			if (rvf >= ISP_FW_REV(7, 55, 0) ||
1313 			   (ISP_FW_REV(4, 55, 0) <= rvf &&
1314 			   (rvf < ISP_FW_REV(5, 0, 0)))) {
1315 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1316 			}
1317 			oflags = sdp->isp_devparam[tgt].dev_flags;
1318 			sdp->isp_devparam[tgt].dev_flags = nflags;
1319 			sdp->isp_devparam[tgt].dev_update = 1;
1320 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1321 			sdp->isp_devparam[tgt].dev_flags = oflags;
1322 			ISP_UNLOCK(isp);
1323 		}
1324 		break;
1325 	default:
1326 		printf("%s: isp_attach Async Code 0x%x\n", isp->isp_name, code);
1327 		break;
1328 	}
1329 }
1330 
1331 static void
1332 isp_poll(struct cam_sim *sim)
1333 {
1334 	struct ispsoftc *isp = cam_sim_softc(sim);
1335 	ISP_LOCK(isp);
1336 	(void) isp_intr(isp);
1337 	ISP_UNLOCK(isp);
1338 }
1339 
1340 static void
1341 isp_relsim(void *arg)
1342 {
1343 	struct ispsoftc *isp = arg;
1344 	ISP_LOCK(isp);
1345 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1346 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1347 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1348 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1349 			xpt_release_simq(isp->isp_sim, 1);
1350 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1351 		}
1352 	}
1353 	ISP_UNLOCK(isp);
1354 }
1355 
1356 static void
1357 isp_watchdog(void *arg)
1358 {
1359 	XS_T *xs = arg;
1360 	struct ispsoftc *isp = XS_ISP(xs);
1361 	u_int32_t handle;
1362 
1363 	/*
1364 	 * We've decided this command is dead. Make sure we're not trying
1365 	 * to kill a command that's already dead by getting it's handle and
1366 	 * and seeing whether it's still alive.
1367 	 */
1368 	ISP_LOCK(isp);
1369 	handle = isp_find_handle(isp, xs);
1370 	if (handle) {
1371 		u_int16_t r;
1372 
1373 		if (XS_CMD_DONE_P(xs)) {
1374 			isp_prt(isp, ISP_LOGDEBUG1,
1375 			    "watchdog found done cmd (handle 0x%x)", handle);
1376 			ISP_UNLOCK(isp);
1377 			return;
1378 		}
1379 
1380 		if (XS_CMD_WDOG_P(xs)) {
1381 			isp_prt(isp, ISP_LOGDEBUG2,
1382 			    "recursive watchdog (handle 0x%x)", handle);
1383 			ISP_UNLOCK(isp);
1384 			return;
1385 		}
1386 
1387 		XS_CMD_S_WDOG(xs);
1388 
1389 		r = ISP_READ(isp, BIU_ISR);
1390 
1391 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
1392 			isp_prt(isp, ISP_LOGDEBUG2,
1393 			    "watchdog cleanup (%x, %x)", handle, r);
1394 			xpt_done((union ccb *) xs);
1395 		} else if (XS_CMD_GRACE_P(xs)) {
1396 			/*
1397 			 * Make sure the command is *really* dead before we
1398 			 * release the handle (and DMA resources) for reuse.
1399 			 */
1400 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1401 
1402 			/*
1403 			 * After this point, the comamnd is really dead.
1404 			 */
1405 			if (XS_XFRLEN(xs)) {
1406 				ISP_DMAFREE(isp, xs, handle);
1407                 	}
1408 			isp_destroy_handle(isp, handle);
1409 			xpt_print_path(xs->ccb_h.path);
1410 			printf("%s: watchdog timeout (%x, %x)\n",
1411 			    isp->isp_name, handle, r);
1412 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1413 			XS_CMD_C_WDOG(xs);
1414 			isp_done(xs);
1415 		} else {
1416 			u_int16_t iptr, optr;
1417 			ispreq_t *mp;
1418 
1419 			XS_CMD_C_WDOG(xs);
1420 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1421 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
1422 				ISP_UNLOCK(isp);
1423 				return;
1424 			}
1425 			XS_CMD_S_GRACE(xs);
1426 			MEMZERO((void *) mp, sizeof (*mp));
1427 			mp->req_header.rqs_entry_count = 1;
1428 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1429 			mp->req_modifier = SYNC_ALL;
1430 			mp->req_target = XS_CHANNEL(xs) << 7;
1431 			ISP_SWIZZLE_REQUEST(isp, mp);
1432 			ISP_ADD_REQUEST(isp, iptr);
1433 		}
1434 	} else {
1435 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1436 	}
1437 	ISP_UNLOCK(isp);
1438 }
1439 
1440 static void
1441 isp_action(struct cam_sim *sim, union ccb *ccb)
1442 {
1443 	int bus, tgt, error;
1444 	struct ispsoftc *isp;
1445 	struct ccb_trans_settings *cts;
1446 
1447 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1448 
1449 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1450 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1451 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1452 	if (isp->isp_state != ISP_RUNSTATE &&
1453 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1454 		ISP_LOCK(isp);
1455 		isp_init(isp);
1456 		if (isp->isp_state != ISP_INITSTATE) {
1457 			ISP_UNLOCK(isp);
1458 			/*
1459 			 * Lie. Say it was a selection timeout.
1460 			 */
1461 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1462 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1463 			xpt_done(ccb);
1464 			return;
1465 		}
1466 		isp->isp_state = ISP_RUNSTATE;
1467 		ISP_UNLOCK(isp);
1468 	}
1469 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1470 
1471 	switch (ccb->ccb_h.func_code) {
1472 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1473 		/*
1474 		 * Do a couple of preliminary checks...
1475 		 */
1476 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1477 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1478 				ccb->ccb_h.status = CAM_REQ_INVALID;
1479 				xpt_done(ccb);
1480 				break;
1481 			}
1482 		}
1483 #ifdef	DIAGNOSTIC
1484 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1485 			ccb->ccb_h.status = CAM_PATH_INVALID;
1486 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1487 			ccb->ccb_h.status = CAM_PATH_INVALID;
1488 		}
1489 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1490 			isp_prt(isp, ISP_LOGERR,
1491 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
1492 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1493 			xpt_done(ccb);
1494 			break;
1495 		}
1496 #endif
1497 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1498 		ISP_LOCK(isp);
1499 		error = isp_start((XS_T *) ccb);
1500 		ISP_UNLOCK(isp);
1501 		switch (error) {
1502 		case CMD_QUEUED:
1503 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1504 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1505 				u_int64_t ticks = (u_int64_t) hz;
1506 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1507 					ticks = 60 * 1000 * ticks;
1508 				else
1509 					ticks = ccb->ccb_h.timeout * hz;
1510 				ticks = ((ticks + 999) / 1000) + hz + hz;
1511 				if (ticks >= 0x80000000) {
1512 					isp_prt(isp, ISP_LOGERR,
1513 					    "timeout overflow");
1514 					ticks = 0x80000000;
1515 				}
1516 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
1517 				    (caddr_t)ccb, (int)ticks);
1518 			} else {
1519 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1520 			}
1521 			break;
1522 		case CMD_RQLATER:
1523 			if (isp->isp_osinfo.simqfrozen == 0) {
1524 				isp_prt(isp, ISP_LOGDEBUG2,
1525 				    "RQLATER freeze simq");
1526 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED;
1527 				timeout(isp_relsim, isp, 500);
1528 				xpt_freeze_simq(sim, 1);
1529 			}
1530 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1531 			xpt_done(ccb);
1532 			break;
1533 		case CMD_EAGAIN:
1534 			if (isp->isp_osinfo.simqfrozen == 0) {
1535 				xpt_freeze_simq(sim, 1);
1536 				isp_prt(isp, ISP_LOGDEBUG2,
1537 				    "EAGAIN freeze simq");
1538 			}
1539 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1540 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1541 			xpt_done(ccb);
1542 			break;
1543 		case CMD_COMPLETE:
1544 			ISP_LOCK(isp);
1545 			isp_done((struct ccb_scsiio *) ccb);
1546 			ISP_UNLOCK(isp);
1547 			break;
1548 		default:
1549 			isp_prt(isp, ISP_LOGERR,
1550 			    "What's this? 0x%x at %d in file %s",
1551 			    error, __LINE__, __FILE__);
1552 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1553 			xpt_done(ccb);
1554 		}
1555 		break;
1556 
1557 #ifdef	ISP_TARGET_MODE
1558 	case XPT_EN_LUN:		/* Enable LUN as a target */
1559 		isp_en_lun(isp, ccb);
1560 		xpt_done(ccb);
1561 		break;
1562 
1563 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1564 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1565 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1566 	{
1567 		tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun);
1568 		if (tptr == NULL) {
1569 			ccb->ccb_h.status = CAM_LUN_INVALID;
1570 			xpt_done(ccb);
1571 			break;
1572 		}
1573 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1574 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1575 		ISP_LOCK(isp);
1576 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1577 #if	0
1578 			(void) isp_target_putback_atio(isp, ccb);
1579 #endif
1580 			SLIST_INSERT_HEAD(&tptr->atios,
1581 			    &ccb->ccb_h, sim_links.sle);
1582 		} else {
1583 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1584 			    sim_links.sle);
1585 		}
1586 		ISP_UNLOCK(isp);
1587 		rls_lun_statep(isp, tptr);
1588 		ccb->ccb_h.status = CAM_REQ_INPROG;
1589 		break;
1590 	}
1591 	case XPT_CONT_TARGET_IO:
1592 	{
1593 		ISP_LOCK(isp);
1594 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1595 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1596 			if (isp->isp_osinfo.simqfrozen == 0) {
1597 				xpt_freeze_simq(sim, 1);
1598 				xpt_print_path(ccb->ccb_h.path);
1599 				printf("XPT_CONT_TARGET_IO freeze simq\n");
1600 			}
1601 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1602 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1603 			xpt_done(ccb);
1604 		} else {
1605 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1606 		}
1607 		ISP_UNLOCK(isp);
1608 		break;
1609 	}
1610 #endif
1611 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
1612 
1613 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1614 		tgt = ccb->ccb_h.target_id;
1615 		tgt |= (bus << 16);
1616 
1617 		ISP_LOCK(isp);
1618 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
1619 		ISP_UNLOCK(isp);
1620 		if (error) {
1621 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1622 		} else {
1623 			ccb->ccb_h.status = CAM_REQ_CMP;
1624 		}
1625 		xpt_done(ccb);
1626 		break;
1627 	case XPT_ABORT:			/* Abort the specified CCB */
1628 	{
1629 		union ccb *accb = ccb->cab.abort_ccb;
1630 		switch (accb->ccb_h.func_code) {
1631 #ifdef	ISP_TARGET_MODE
1632 		case XPT_ACCEPT_TARGET_IO:
1633 		case XPT_IMMED_NOTIFY:
1634         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
1635 			break;
1636 		case XPT_CONT_TARGET_IO:
1637 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
1638 			ccb->ccb_h.status = CAM_UA_ABORT;
1639 			break;
1640 #endif
1641 		case XPT_SCSI_IO:
1642 			ISP_LOCK(isp);
1643 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
1644 			ISP_UNLOCK(isp);
1645 			if (error) {
1646 				ccb->ccb_h.status = CAM_UA_ABORT;
1647 			} else {
1648 				ccb->ccb_h.status = CAM_REQ_CMP;
1649 			}
1650 			break;
1651 		default:
1652 			ccb->ccb_h.status = CAM_REQ_INVALID;
1653 			break;
1654 		}
1655 		xpt_done(ccb);
1656 		break;
1657 	}
1658 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1659 
1660 		cts = &ccb->cts;
1661 		tgt = cts->ccb_h.target_id;
1662 		ISP_LOCK(isp);
1663 		if (IS_SCSI(isp)) {
1664 			sdparam *sdp = isp->isp_param;
1665 			u_int16_t *dptr;
1666 
1667 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1668 
1669 			sdp += bus;
1670 #if	0
1671 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS)
1672 				dptr = &sdp->isp_devparam[tgt].cur_dflags;
1673 			else
1674 				dptr = &sdp->isp_devparam[tgt].dev_flags;
1675 #else
1676 			/*
1677 			 * We always update (internally) from dev_flags
1678 			 * so any request to change settings just gets
1679 			 * vectored to that location.
1680 			 */
1681 			dptr = &sdp->isp_devparam[tgt].dev_flags;
1682 #endif
1683 
1684 			/*
1685 			 * Note that these operations affect the
1686 			 * the goal flags (dev_flags)- not
1687 			 * the current state flags. Then we mark
1688 			 * things so that the next operation to
1689 			 * this HBA will cause the update to occur.
1690 			 */
1691 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1692 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
1693 					*dptr |= DPARM_DISC;
1694 				} else {
1695 					*dptr &= ~DPARM_DISC;
1696 				}
1697 			}
1698 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1699 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
1700 					*dptr |= DPARM_TQING;
1701 				} else {
1702 					*dptr &= ~DPARM_TQING;
1703 				}
1704 			}
1705 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1706 				switch (cts->bus_width) {
1707 				case MSG_EXT_WDTR_BUS_16_BIT:
1708 					*dptr |= DPARM_WIDE;
1709 					break;
1710 				default:
1711 					*dptr &= ~DPARM_WIDE;
1712 				}
1713 			}
1714 			/*
1715 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
1716 			 * of nonzero will cause us to go to the
1717 			 * selected (from NVRAM) maximum value for
1718 			 * this device. At a later point, we'll
1719 			 * allow finer control.
1720 			 */
1721 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1722 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
1723 			    (cts->sync_offset > 0)) {
1724 				*dptr |= DPARM_SYNC;
1725 			} else {
1726 				*dptr &= ~DPARM_SYNC;
1727 			}
1728 			*dptr |= DPARM_SAFE_DFLT;
1729 			isp_prt(isp, ISP_LOGDEBUG0,
1730 			    "%d.%d set %s period 0x%x offset 0x%x flags 0x%x",
1731 			    bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1732 			    "current" : "user",
1733 			    sdp->isp_devparam[tgt].sync_period,
1734 			    sdp->isp_devparam[tgt].sync_offset,
1735 			    sdp->isp_devparam[tgt].dev_flags);
1736 			sdp->isp_devparam[tgt].dev_update = 1;
1737 			isp->isp_update |= (1 << bus);
1738 		}
1739 		ISP_UNLOCK(isp);
1740 		ccb->ccb_h.status = CAM_REQ_CMP;
1741 		xpt_done(ccb);
1742 		break;
1743 
1744 	case XPT_GET_TRAN_SETTINGS:
1745 
1746 		cts = &ccb->cts;
1747 		tgt = cts->ccb_h.target_id;
1748 		if (IS_FC(isp)) {
1749 			/*
1750 			 * a lot of normal SCSI things don't make sense.
1751 			 */
1752 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1753 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1754 			/*
1755 			 * How do you measure the width of a high
1756 			 * speed serial bus? Well, in bytes.
1757 			 *
1758 			 * Offset and period make no sense, though, so we set
1759 			 * (above) a 'base' transfer speed to be gigabit.
1760 			 */
1761 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1762 		} else {
1763 			sdparam *sdp = isp->isp_param;
1764 			u_int16_t dval, pval, oval;
1765 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1766 
1767 			sdp += bus;
1768 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) {
1769 				ISP_LOCK(isp);
1770 				sdp->isp_devparam[tgt].dev_refresh = 1;
1771 				isp->isp_update |= (1 << bus);
1772 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
1773 				    NULL);
1774 				ISP_UNLOCK(isp);
1775 				dval = sdp->isp_devparam[tgt].cur_dflags;
1776 				oval = sdp->isp_devparam[tgt].cur_offset;
1777 				pval = sdp->isp_devparam[tgt].cur_period;
1778 			} else {
1779 				dval = sdp->isp_devparam[tgt].dev_flags;
1780 				oval = sdp->isp_devparam[tgt].sync_offset;
1781 				pval = sdp->isp_devparam[tgt].sync_period;
1782 			}
1783 
1784 			ISP_LOCK(isp);
1785 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1786 
1787 			if (dval & DPARM_DISC) {
1788 				cts->flags |= CCB_TRANS_DISC_ENB;
1789 			}
1790 			if (dval & DPARM_TQING) {
1791 				cts->flags |= CCB_TRANS_TAG_ENB;
1792 			}
1793 			if (dval & DPARM_WIDE) {
1794 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1795 			} else {
1796 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1797 			}
1798 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1799 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1800 
1801 			if ((dval & DPARM_SYNC) && oval != 0) {
1802 				cts->sync_period = pval;
1803 				cts->sync_offset = oval;
1804 				cts->valid |=
1805 				    CCB_TRANS_SYNC_RATE_VALID |
1806 				    CCB_TRANS_SYNC_OFFSET_VALID;
1807 			}
1808 			ISP_UNLOCK(isp);
1809 			isp_prt(isp, ISP_LOGDEBUG0,
1810 			    "%d.%d get %s period 0x%x offset 0x%x flags 0x%x",
1811 			    bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1812 			    "current" : "user", pval, oval, dval);
1813 		}
1814 		ccb->ccb_h.status = CAM_REQ_CMP;
1815 		xpt_done(ccb);
1816 		break;
1817 
1818 	case XPT_CALC_GEOMETRY:
1819 	{
1820 		struct ccb_calc_geometry *ccg;
1821 		u_int32_t secs_per_cylinder;
1822 		u_int32_t size_mb;
1823 
1824 		ccg = &ccb->ccg;
1825 		if (ccg->block_size == 0) {
1826 			isp_prt(isp, ISP_LOGERR,
1827 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
1828 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
1829 			ccb->ccb_h.status = CAM_REQ_INVALID;
1830 			xpt_done(ccb);
1831 			break;
1832 		}
1833 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
1834 		if (size_mb > 1024) {
1835 			ccg->heads = 255;
1836 			ccg->secs_per_track = 63;
1837 		} else {
1838 			ccg->heads = 64;
1839 			ccg->secs_per_track = 32;
1840 		}
1841 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1842 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1843 		ccb->ccb_h.status = CAM_REQ_CMP;
1844 		xpt_done(ccb);
1845 		break;
1846 	}
1847 	case XPT_RESET_BUS:		/* Reset the specified bus */
1848 		bus = cam_sim_bus(sim);
1849 		ISP_LOCK(isp);
1850 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
1851 		ISP_UNLOCK(isp);
1852 		if (error)
1853 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1854 		else {
1855 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
1856 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
1857 			else if (isp->isp_path != NULL)
1858 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
1859 			ccb->ccb_h.status = CAM_REQ_CMP;
1860 		}
1861 		xpt_done(ccb);
1862 		break;
1863 
1864 	case XPT_TERM_IO:		/* Terminate the I/O process */
1865 		ccb->ccb_h.status = CAM_REQ_INVALID;
1866 		xpt_done(ccb);
1867 		break;
1868 
1869 	case XPT_PATH_INQ:		/* Path routing inquiry */
1870 	{
1871 		struct ccb_pathinq *cpi = &ccb->cpi;
1872 
1873 		cpi->version_num = 1;
1874 #ifdef	ISP_TARGET_MODE
1875 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
1876 #else
1877 		cpi->target_sprt = 0;
1878 #endif
1879 		cpi->hba_eng_cnt = 0;
1880 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
1881 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
1882 		cpi->bus_id = cam_sim_bus(sim);
1883 		if (IS_FC(isp)) {
1884 			cpi->hba_misc = PIM_NOBUSRESET;
1885 			/*
1886 			 * Because our loop ID can shift from time to time,
1887 			 * make our initiator ID out of range of our bus.
1888 			 */
1889 			cpi->initiator_id = cpi->max_target + 1;
1890 
1891 			/*
1892 			 * Set base transfer capabilities for Fibre Channel.
1893 			 * Technically not correct because we don't know
1894 			 * what media we're running on top of- but we'll
1895 			 * look good if we always say 100MB/s.
1896 			 */
1897 			cpi->base_transfer_speed = 100000;
1898 			cpi->hba_inquiry = PI_TAG_ABLE;
1899 		} else {
1900 			sdparam *sdp = isp->isp_param;
1901 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
1902 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1903 			cpi->hba_misc = 0;
1904 			cpi->initiator_id = sdp->isp_initiator_id;
1905 			cpi->base_transfer_speed = 3300;
1906 		}
1907 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1908 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
1909 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1910 		cpi->unit_number = cam_sim_unit(sim);
1911 		cpi->ccb_h.status = CAM_REQ_CMP;
1912 		xpt_done(ccb);
1913 		break;
1914 	}
1915 	default:
1916 		ccb->ccb_h.status = CAM_REQ_INVALID;
1917 		xpt_done(ccb);
1918 		break;
1919 	}
1920 }
1921 
1922 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
1923 void
1924 isp_done(struct ccb_scsiio *sccb)
1925 {
1926 	struct ispsoftc *isp = XS_ISP(sccb);
1927 
1928 	if (XS_NOERR(sccb))
1929 		XS_SETERR(sccb, CAM_REQ_CMP);
1930 
1931 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
1932 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
1933 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
1934 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
1935 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
1936 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1937 		} else {
1938 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1939 		}
1940 	}
1941 
1942 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1943 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1944 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1945 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
1946 			xpt_freeze_devq(sccb->ccb_h.path, 1);
1947 			if (sccb->scsi_status != SCSI_STATUS_OK)
1948 				isp_prt(isp, ISP_LOGDEBUG2,
1949 				    "freeze devq %d.%d %x %x",
1950 				    sccb->ccb_h.target_id,
1951 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
1952 				    sccb->scsi_status);
1953 		}
1954 	}
1955 
1956 	/*
1957 	 * If we were frozen waiting resources, clear that we were frozen
1958 	 * waiting for resources. If we are no longer frozen, and the devq
1959 	 * isn't frozen, mark the completing CCB to have the XPT layer
1960 	 * release the simq.
1961 	 */
1962 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1963 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1964 		if (isp->isp_osinfo.simqfrozen == 0) {
1965 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1966 				isp_prt(isp, ISP_LOGDEBUG2,
1967 				    "isp_done->relsimq");
1968 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1969 			} else {
1970 				isp_prt(isp, ISP_LOGDEBUG2,
1971 				    "isp_done->devq frozen");
1972 			}
1973 		} else {
1974 			isp_prt(isp, ISP_LOGDEBUG2,
1975 			    "isp_done -> simqfrozen = %x",
1976 			    isp->isp_osinfo.simqfrozen);
1977 		}
1978 	}
1979 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
1980 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1981 		xpt_print_path(sccb->ccb_h.path);
1982 		printf("cam completion status 0x%x\n", sccb->ccb_h.status);
1983 	}
1984 
1985 	XS_CMD_S_DONE(sccb);
1986 	if (XS_CMD_WDOG_P(sccb) == 0) {
1987 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
1988 		if (XS_CMD_GRACE_P(sccb)) {
1989 			isp_prt(isp, ISP_LOGDEBUG2,
1990 			    "finished command on borrowed time");
1991 		}
1992 		XS_CMD_S_CLEAR(sccb);
1993 		ISP_UNLOCK(isp);
1994 #ifdef	ISP_SMPLOCK
1995 		mtx_enter(&Giant, MTX_DEF);
1996 		xpt_done((union ccb *) sccb);
1997 		mtx_exit(&Giant, MTX_DEF);
1998 #else
1999 		xpt_done((union ccb *) sccb);
2000 #endif
2001 		ISP_LOCK(isp);
2002 	}
2003 }
2004 
2005 int
2006 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2007 {
2008 	int bus, rv = 0;
2009 	switch (cmd) {
2010 	case ISPASYNC_NEW_TGT_PARAMS:
2011 	{
2012 		int flags, tgt;
2013 		sdparam *sdp = isp->isp_param;
2014 		struct ccb_trans_settings neg;
2015 		struct cam_path *tmppath;
2016 
2017 		tgt = *((int *)arg);
2018 		bus = (tgt >> 16) & 0xffff;
2019 		tgt &= 0xffff;
2020 		sdp += bus;
2021 		if (xpt_create_path(&tmppath, NULL,
2022 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2023 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2024 			isp_prt(isp, ISP_LOGWARN,
2025 			    "isp_async cannot make temp path for %d.%d",
2026 			    tgt, bus);
2027 			rv = -1;
2028 			break;
2029 		}
2030 		flags = sdp->isp_devparam[tgt].cur_dflags;
2031 		neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2032 		if (flags & DPARM_DISC) {
2033 			neg.flags |= CCB_TRANS_DISC_ENB;
2034 		}
2035 		if (flags & DPARM_TQING) {
2036 			neg.flags |= CCB_TRANS_TAG_ENB;
2037 		}
2038 		neg.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2039 		neg.bus_width = (flags & DPARM_WIDE)?
2040 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2041 		neg.sync_period = sdp->isp_devparam[tgt].cur_period;
2042 		neg.sync_offset = sdp->isp_devparam[tgt].cur_offset;
2043 		if (flags & DPARM_SYNC) {
2044 			neg.valid |=
2045 			    CCB_TRANS_SYNC_RATE_VALID |
2046 			    CCB_TRANS_SYNC_OFFSET_VALID;
2047 		}
2048 		isp_prt(isp, ISP_LOGDEBUG2,
2049 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2050 		    bus, tgt, neg.sync_period, neg.sync_offset, flags);
2051 		xpt_setup_ccb(&neg.ccb_h, tmppath, 1);
2052 		xpt_async(AC_TRANSFER_NEG, tmppath, &neg);
2053 		xpt_free_path(tmppath);
2054 		break;
2055 	}
2056 	case ISPASYNC_BUS_RESET:
2057 		bus = *((int *)arg);
2058 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2059 		    bus);
2060 		if (bus > 0 && isp->isp_path2) {
2061 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2062 		} else if (isp->isp_path) {
2063 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2064 		}
2065 		break;
2066 	case ISPASYNC_LOOP_DOWN:
2067 		if (isp->isp_path) {
2068 			if (isp->isp_osinfo.simqfrozen == 0) {
2069 				isp_prt(isp, ISP_LOGDEBUG2,
2070 				    "loop down freeze simq");
2071 				xpt_freeze_simq(isp->isp_sim, 1);
2072 			}
2073 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2074 		}
2075 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2076 		break;
2077 	case ISPASYNC_LOOP_UP:
2078 		if (isp->isp_path) {
2079 			int wasfrozen =
2080 			    isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2081 			isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2082 			if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2083 				xpt_release_simq(isp->isp_sim, 1);
2084 				isp_prt(isp, ISP_LOGDEBUG2,
2085 				    "loop up release simq");
2086 			}
2087 		}
2088 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2089 		break;
2090 	case ISPASYNC_LOGGED_INOUT:
2091 	{
2092 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2093 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2094 		const static char *roles[4] = {
2095 		    "(none)", "Target", "Initiator", "Target/Initiator"
2096 		};
2097 		char *ptr;
2098 		fcparam *fcp = isp->isp_param;
2099 		int tgt = *((int *) arg);
2100 		struct lportdb *lp = &fcp->portdb[tgt];
2101 
2102 		if (lp->valid) {
2103 			ptr = "arrived";
2104 		} else {
2105 			ptr = "disappeared";
2106 		}
2107 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2108 		    roles[lp->roles & 0x3], ptr,
2109 		    (u_int32_t) (lp->port_wwn >> 32),
2110 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2111 		    (u_int32_t) (lp->node_wwn >> 32),
2112 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2113 		break;
2114 	}
2115 	case ISPASYNC_CHANGE_NOTIFY:
2116 		if (arg == (void *) 1) {
2117 			isp_prt(isp, ISP_LOGINFO,
2118 			    "Name Server Database Changed");
2119 		} else {
2120 			isp_prt(isp, ISP_LOGINFO,
2121 			    "Name Server Database Changed");
2122 		}
2123 		break;
2124 #ifdef	ISP2100_FABRIC
2125 	case ISPASYNC_FABRIC_DEV:
2126 	{
2127 		int target, lrange;
2128 		struct lportdb *lp = NULL;
2129 		char *pt;
2130 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2131 		u_int32_t portid;
2132 		u_int64_t wwpn, wwnn;
2133 		fcparam *fcp = isp->isp_param;
2134 
2135 		portid =
2136 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2137 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2138 		    (((u_int32_t) resp->snscb_port_id[2]));
2139 
2140 		wwpn =
2141 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2142 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2143 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2144 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2145 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2146 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2147 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2148 		    (((u_int64_t)resp->snscb_portname[7]));
2149 
2150 		wwnn =
2151 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2152 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2153 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2154 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2155 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2156 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2157 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2158 		    (((u_int64_t)resp->snscb_nodename[7]));
2159 		if (portid == 0 || wwpn == 0) {
2160 			break;
2161 		}
2162 
2163 		switch (resp->snscb_port_type) {
2164 		case 1:
2165 			pt = "   N_Port";
2166 			break;
2167 		case 2:
2168 			pt = "  NL_Port";
2169 			break;
2170 		case 3:
2171 			pt = "F/NL_Port";
2172 			break;
2173 		case 0x7f:
2174 			pt = "  Nx_Port";
2175 			break;
2176 		case 0x81:
2177 			pt = "  F_port";
2178 			break;
2179 		case 0x82:
2180 			pt = "  FL_Port";
2181 			break;
2182 		case 0x84:
2183 			pt = "   E_port";
2184 			break;
2185 		default:
2186 			pt = "?";
2187 			break;
2188 		}
2189 		isp_prt(isp, ISP_LOGINFO,
2190 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2191 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2192 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2193 		/*
2194 		 * We're only interested in SCSI_FCP types (for now)
2195 		 */
2196 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
2197 			break;
2198 		}
2199 		if (fcp->isp_topo != TOPO_F_PORT)
2200 			lrange = FC_SNS_ID+1;
2201 		else
2202 			lrange = 0;
2203 		/*
2204 		 * Is it already in our list?
2205 		 */
2206 		for (target = lrange; target < MAX_FC_TARG; target++) {
2207 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2208 				continue;
2209 			}
2210 			lp = &fcp->portdb[target];
2211 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
2212 				lp->fabric_dev = 1;
2213 				break;
2214 			}
2215 		}
2216 		if (target < MAX_FC_TARG) {
2217 			break;
2218 		}
2219 		for (target = lrange; target < MAX_FC_TARG; target++) {
2220 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2221 				continue;
2222 			}
2223 			lp = &fcp->portdb[target];
2224 			if (lp->port_wwn == 0) {
2225 				break;
2226 			}
2227 		}
2228 		if (target == MAX_FC_TARG) {
2229 			isp_prt(isp, ISP_LOGWARN,
2230 			    "no more space for fabric devices");
2231 			break;
2232 		}
2233 		lp->node_wwn = wwnn;
2234 		lp->port_wwn = wwpn;
2235 		lp->portid = portid;
2236 		lp->fabric_dev = 1;
2237 		break;
2238 	}
2239 #endif
2240 #ifdef	ISP_TARGET_MODE
2241 	case ISPASYNC_TARGET_MESSAGE:
2242 	{
2243 		tmd_msg_t *mp = arg;
2244 		isp_prt(isp, ISP_LOGDEBUG2,
2245 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2246 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2247 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2248 		    mp->nt_msg[0]);
2249 		break;
2250 	}
2251 	case ISPASYNC_TARGET_EVENT:
2252 	{
2253 		tmd_event_t *ep = arg;
2254 		isp_prt(isp, ISP_LOGDEBUG2,
2255 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2256 		break;
2257 	}
2258 	case ISPASYNC_TARGET_ACTION:
2259 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2260 		default:
2261 			isp_prt(isp, ISP_LOGWARN,
2262 			   "event 0x%x for unhandled target action",
2263 			    ((isphdr_t *)arg)->rqs_entry_type);
2264 			break;
2265 		case RQSTYPE_ATIO:
2266 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2267 			break;
2268 		case RQSTYPE_ATIO2:
2269 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2270 			break;
2271 		case RQSTYPE_CTIO2:
2272 		case RQSTYPE_CTIO:
2273 			rv = isp_handle_platform_ctio(isp, arg);
2274 			break;
2275 		case RQSTYPE_ENABLE_LUN:
2276 		case RQSTYPE_MODIFY_LUN:
2277 			isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status);
2278 			break;
2279 		}
2280 		break;
2281 #endif
2282 	default:
2283 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2284 		rv = -1;
2285 		break;
2286 	}
2287 	return (rv);
2288 }
2289 
2290 
2291 /*
2292  * Locks are held before coming here.
2293  */
2294 void
2295 isp_uninit(struct ispsoftc *isp)
2296 {
2297 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2298 	DISABLE_INTS(isp);
2299 }
2300 
2301 void
2302 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2303 {
2304 	va_list ap;
2305 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2306 		return;
2307 	}
2308 	printf("%s: ", isp->isp_name);
2309 	va_start(ap, fmt);
2310 	vprintf(fmt, ap);
2311 	va_end(ap);
2312 	printf("\n");
2313 }
2314