xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 4a558355e5f3b4521cef56a6b705fa84be41dfa0)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <machine/stdarg.h>	/* for use by isp_prt below */
30 
31 static void isp_intr_enable(void *);
32 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
33 static void isp_poll(struct cam_sim *);
34 static void isp_relsim(void *);
35 static timeout_t isp_watchdog;
36 static void isp_action(struct cam_sim *, union ccb *);
37 
38 
39 static struct ispsoftc *isplist = NULL;
40 
41 void
42 isp_attach(struct ispsoftc *isp)
43 {
44 	int primary, secondary;
45 	struct ccb_setasync csa;
46 	struct cam_devq *devq;
47 	struct cam_sim *sim;
48 	struct cam_path *path;
49 
50 	/*
51 	 * Establish (in case of 12X0) which bus is the primary.
52 	 */
53 
54 	primary = 0;
55 	secondary = 1;
56 
57 	/*
58 	 * Create the device queue for our SIM(s).
59 	 */
60 	devq = cam_simq_alloc(isp->isp_maxcmds);
61 	if (devq == NULL) {
62 		return;
63 	}
64 
65 	/*
66 	 * Construct our SIM entry.
67 	 */
68 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
69 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
70 	if (sim == NULL) {
71 		cam_simq_free(devq);
72 		return;
73 	}
74 
75 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
76 	isp->isp_osinfo.ehook.ich_arg = isp;
77 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
78 		isp_prt(isp, ISP_LOGERR,
79 		    "could not establish interrupt enable hook");
80 		cam_sim_free(sim, TRUE);
81 		return;
82 	}
83 
84 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
85 		cam_sim_free(sim, TRUE);
86 		return;
87 	}
88 
89 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
90 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
91 		xpt_bus_deregister(cam_sim_path(sim));
92 		cam_sim_free(sim, TRUE);
93 		return;
94 	}
95 
96 	xpt_setup_ccb(&csa.ccb_h, path, 5);
97 	csa.ccb_h.func_code = XPT_SASYNC_CB;
98 	csa.event_enable = AC_LOST_DEVICE;
99 	csa.callback = isp_cam_async;
100 	csa.callback_arg = sim;
101 	xpt_action((union ccb *)&csa);
102 	isp->isp_sim = sim;
103 	isp->isp_path = path;
104 
105 	/*
106 	 * If we have a second channel, construct SIM entry for that.
107 	 */
108 	if (IS_DUALBUS(isp)) {
109 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
110 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
111 		if (sim == NULL) {
112 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
113 			xpt_free_path(isp->isp_path);
114 			cam_simq_free(devq);
115 			return;
116 		}
117 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
118 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
119 			xpt_free_path(isp->isp_path);
120 			cam_sim_free(sim, TRUE);
121 			return;
122 		}
123 
124 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
125 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
126 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
127 			xpt_free_path(isp->isp_path);
128 			xpt_bus_deregister(cam_sim_path(sim));
129 			cam_sim_free(sim, TRUE);
130 			return;
131 		}
132 
133 		xpt_setup_ccb(&csa.ccb_h, path, 5);
134 		csa.ccb_h.func_code = XPT_SASYNC_CB;
135 		csa.event_enable = AC_LOST_DEVICE;
136 		csa.callback = isp_cam_async;
137 		csa.callback_arg = sim;
138 		xpt_action((union ccb *)&csa);
139 		isp->isp_sim2 = sim;
140 		isp->isp_path2 = path;
141 	}
142 	if (isp->isp_role != ISP_ROLE_NONE) {
143 		isp->isp_state = ISP_RUNSTATE;
144 		ENABLE_INTS(isp);
145 	}
146 	if (isplist == NULL) {
147 		isplist = isp;
148 	} else {
149 		struct ispsoftc *tmp = isplist;
150 		while (tmp->isp_osinfo.next) {
151 			tmp = tmp->isp_osinfo.next;
152 		}
153 		tmp->isp_osinfo.next = isp;
154 	}
155 }
156 
157 static void
158 isp_intr_enable(void *arg)
159 {
160 	struct ispsoftc *isp = arg;
161 	if (isp->isp_role != ISP_ROLE_NONE) {
162 		ENABLE_INTS(isp);
163 		isp->isp_osinfo.intsok = 1;
164 	}
165 	/* Release our hook so that the boot can continue. */
166 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
167 }
168 
169 /*
170  * Put the target mode functions here, because some are inlines
171  */
172 
173 #ifdef	ISP_TARGET_MODE
174 
175 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t);
176 static __inline int are_any_luns_enabled(struct ispsoftc *);
177 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t);
178 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
179 static __inline int isp_psema_sig_rqe(struct ispsoftc *);
180 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int);
181 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int);
182 static __inline void isp_vsema_rqe(struct ispsoftc *);
183 static cam_status
184 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **);
185 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
186 static void isp_en_lun(struct ispsoftc *, union ccb *);
187 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
188 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
189 static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *);
190 static timeout_t isp_refire_putback_atio;
191 
192 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
193 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
194 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
195 static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *);
196 
197 static __inline int
198 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun)
199 {
200 	tstate_t *tptr;
201 	ISP_LOCK(isp);
202 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
203 		ISP_UNLOCK(isp);
204 		return (0);
205 	}
206 	do {
207 		if (tptr->lun == (lun_id_t) lun) {
208 			ISP_UNLOCK(isp);
209 			return (1);
210 		}
211 	} while ((tptr = tptr->next) != NULL);
212 	ISP_UNLOCK(isp);
213 	return (0);
214 }
215 
216 static __inline int
217 are_any_luns_enabled(struct ispsoftc *isp)
218 {
219 	int i;
220 	for (i = 0; i < LUN_HASH_SIZE; i++) {
221 		if (isp->isp_osinfo.lun_hash[i]) {
222 			return (1);
223 		}
224 	}
225 	return (0);
226 }
227 
228 static __inline tstate_t *
229 get_lun_statep(struct ispsoftc *isp, lun_id_t lun)
230 {
231 	tstate_t *tptr;
232 
233 	ISP_LOCK(isp);
234 	if (lun == CAM_LUN_WILDCARD) {
235 		tptr = &isp->isp_osinfo.tsdflt;
236 		tptr->hold++;
237 		ISP_UNLOCK(isp);
238 		return (tptr);
239 	} else {
240 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)];
241 	}
242 	if (tptr == NULL) {
243 		ISP_UNLOCK(isp);
244 		return (NULL);
245 	}
246 
247 	do {
248 		if (tptr->lun == lun) {
249 			tptr->hold++;
250 			ISP_UNLOCK(isp);
251 			return (tptr);
252 		}
253 	} while ((tptr = tptr->next) != NULL);
254 	ISP_UNLOCK(isp);
255 	return (tptr);
256 }
257 
258 static __inline void
259 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
260 {
261 	if (tptr->hold)
262 		tptr->hold--;
263 }
264 
265 static __inline int
266 isp_psema_sig_rqe(struct ispsoftc *isp)
267 {
268 	ISP_LOCK(isp);
269 	while (isp->isp_osinfo.tmflags & TM_BUSY) {
270 		isp->isp_osinfo.tmflags |= TM_WANTED;
271 		if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) {
272 			ISP_UNLOCK(isp);
273 			return (-1);
274 		}
275 		isp->isp_osinfo.tmflags |= TM_BUSY;
276 	}
277 	ISP_UNLOCK(isp);
278 	return (0);
279 }
280 
281 static __inline int
282 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo)
283 {
284 	ISP_LOCK(isp);
285 	if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) {
286 		ISP_UNLOCK(isp);
287 		return (-1);
288 	}
289 	ISP_UNLOCK(isp);
290 	return (0);
291 }
292 
293 static __inline void
294 isp_cv_signal_rqe(struct ispsoftc *isp, int status)
295 {
296 	isp->isp_osinfo.rstatus = status;
297 	wakeup(&isp->isp_osinfo.rstatus);
298 }
299 
300 static __inline void
301 isp_vsema_rqe(struct ispsoftc *isp)
302 {
303 	ISP_LOCK(isp);
304 	if (isp->isp_osinfo.tmflags & TM_WANTED) {
305 		isp->isp_osinfo.tmflags &= ~TM_WANTED;
306 		wakeup(&isp->isp_osinfo.tmflags);
307 	}
308 	isp->isp_osinfo.tmflags &= ~TM_BUSY;
309 	ISP_UNLOCK(isp);
310 }
311 
312 static cam_status
313 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt)
314 {
315 	cam_status status;
316 	lun_id_t lun;
317 	tstate_t *tptr, *new;
318 
319 	lun = xpt_path_lun_id(path);
320 	if (lun < 0) {
321 		return (CAM_LUN_INVALID);
322 	}
323 	if (is_lun_enabled(isp, lun)) {
324 		return (CAM_LUN_ALRDY_ENA);
325 	}
326 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
327 	if (new == NULL) {
328 		return (CAM_RESRC_UNAVAIL);
329 	}
330 
331 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
332 	    xpt_path_target_id(path), xpt_path_lun_id(path));
333 	if (status != CAM_REQ_CMP) {
334 		free(new, M_DEVBUF);
335 		return (status);
336 	}
337 	new->lun = lun;
338 	SLIST_INIT(&new->atios);
339 	SLIST_INIT(&new->inots);
340 	new->hold = 1;
341 
342 	ISP_LOCK(isp);
343 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
344 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new;
345 	} else {
346 		while (tptr->next)
347 			tptr = tptr->next;
348 		tptr->next = new;
349 	}
350 	ISP_UNLOCK(isp);
351 	*rslt = new;
352 	return (CAM_REQ_CMP);
353 }
354 
355 static __inline void
356 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
357 {
358 	tstate_t *lw, *pw;
359 
360 	ISP_LOCK(isp);
361 	if (tptr->hold) {
362 		ISP_UNLOCK(isp);
363 		return;
364 	}
365 	pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)];
366 	if (pw == NULL) {
367 		ISP_UNLOCK(isp);
368 		return;
369 	} else if (pw->lun == tptr->lun) {
370 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next;
371 	} else {
372 		lw = pw;
373 		pw = lw->next;
374 		while (pw) {
375 			if (pw->lun == tptr->lun) {
376 				lw->next = pw->next;
377 				break;
378 			}
379 			lw = pw;
380 			pw = pw->next;
381 		}
382 		if (pw == NULL) {
383 			ISP_UNLOCK(isp);
384 			return;
385 		}
386 	}
387 	free(tptr, M_DEVBUF);
388 	ISP_UNLOCK(isp);
389 }
390 
391 static void
392 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
393 {
394 	const char lfmt[] = "Lun now %sabled for target mode";
395 	struct ccb_en_lun *cel = &ccb->cel;
396 	tstate_t *tptr;
397 	u_int16_t rstat;
398 	int bus, frozen = 0;
399 	lun_id_t lun;
400 	target_id_t tgt;
401 
402 
403 	bus = XS_CHANNEL(ccb);
404 	if (bus != 0) {
405 		isp_prt(isp, ISP_LOGERR,
406 		    "second channel target mode not supported");
407 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
408 		return;
409 	}
410 	tgt = ccb->ccb_h.target_id;
411 	lun = ccb->ccb_h.target_lun;
412 
413 	/*
414 	 * Do some sanity checking first.
415 	 */
416 
417 	if ((lun != CAM_LUN_WILDCARD) &&
418 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
419 		ccb->ccb_h.status = CAM_LUN_INVALID;
420 		return;
421 	}
422 	if (IS_SCSI(isp)) {
423 		if (tgt != CAM_TARGET_WILDCARD &&
424 		    tgt != SDPARAM(isp)->isp_initiator_id) {
425 			ccb->ccb_h.status = CAM_TID_INVALID;
426 			return;
427 		}
428 	} else {
429 		if (tgt != CAM_TARGET_WILDCARD &&
430 		    tgt != FCPARAM(isp)->isp_iid) {
431 			ccb->ccb_h.status = CAM_TID_INVALID;
432 			return;
433 		}
434 	}
435 
436 	if (tgt == CAM_TARGET_WILDCARD) {
437 		if (lun != CAM_LUN_WILDCARD) {
438 			ccb->ccb_h.status = CAM_LUN_INVALID;
439 			return;
440 		}
441 	}
442 
443 	/*
444 	 * If Fibre Channel, stop and drain all activity to this bus.
445 	 */
446 #if	0
447 	if (IS_FC(isp)) {
448 		ISP_LOCK(isp);
449 		frozen = 1;
450 		xpt_freeze_simq(isp->isp_sim, 1);
451 		isp->isp_osinfo.drain = 1;
452 		while (isp->isp_osinfo.drain) {
453 			 (void) msleep(&isp->isp_osinfo.drain,
454 				    &isp->isp_osinfo.lock, PRIBIO,
455 				    "ispdrain", 10 * hz);
456 		}
457 		ISP_UNLOCK(isp);
458 	}
459 #endif
460 
461 	/*
462 	 * Check to see if we're enabling on fibre channel and
463 	 * don't yet have a notion of who the heck we are (no
464 	 * loop yet).
465 	 */
466 	if (IS_FC(isp) && cel->enable &&
467 	    (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
468 		fcparam *fcp = isp->isp_param;
469 		int rv;
470 
471 		ISP_LOCK(isp);
472 		rv = isp_fc_runstate(isp, 2 * 1000000);
473 		ISP_UNLOCK(isp);
474 		if (fcp->isp_fwstate != FW_READY ||
475 		    fcp->isp_loopstate != LOOP_READY) {
476 			xpt_print_path(ccb->ccb_h.path);
477 			isp_prt(isp, ISP_LOGWARN,
478 			    "could not get a good port database read");
479 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
480 			if (frozen)
481 				xpt_release_simq(isp->isp_sim, 1);
482 			return;
483 		}
484 	}
485 
486 
487 	/*
488 	 * Next check to see whether this is a target/lun wildcard action.
489 	 *
490 	 * If so, we enable/disable target mode but don't do any lun enabling.
491 	 */
492 	if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) {
493 		int av;
494 		tptr = &isp->isp_osinfo.tsdflt;
495 		if (cel->enable) {
496 			if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) {
497 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
498 				if (frozen)
499 					xpt_release_simq(isp->isp_sim, 1);
500 				return;
501 			}
502 			ccb->ccb_h.status =
503 			    xpt_create_path(&tptr->owner, NULL,
504 			    xpt_path_path_id(ccb->ccb_h.path),
505 			    xpt_path_target_id(ccb->ccb_h.path),
506 			    xpt_path_lun_id(ccb->ccb_h.path));
507 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
508 				if (frozen)
509 					xpt_release_simq(isp->isp_sim, 1);
510 				return;
511 			}
512 			SLIST_INIT(&tptr->atios);
513 			SLIST_INIT(&tptr->inots);
514 			av = 1;
515 			ISP_LOCK(isp);
516 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
517 			if (av) {
518 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
519 				xpt_free_path(tptr->owner);
520 				ISP_UNLOCK(isp);
521 				if (frozen)
522 					xpt_release_simq(isp->isp_sim, 1);
523 				return;
524 			}
525 			isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED;
526 			ISP_UNLOCK(isp);
527 		} else {
528 			if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
529 				ccb->ccb_h.status = CAM_LUN_INVALID;
530 				if (frozen)
531 					xpt_release_simq(isp->isp_sim, 1);
532 				return;
533 			}
534 			if (are_any_luns_enabled(isp)) {
535 				ccb->ccb_h.status = CAM_SCSI_BUSY;
536 				if (frozen)
537 					xpt_release_simq(isp->isp_sim, 1);
538 				return;
539 			}
540 			av = 0;
541 			ISP_LOCK(isp);
542 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
543 			if (av) {
544 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
545 				ISP_UNLOCK(isp);
546 				if (frozen)
547 					xpt_release_simq(isp->isp_sim, 1);
548 				return;
549 			}
550 			isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED;
551 			ISP_UNLOCK(isp);
552 			ccb->ccb_h.status = CAM_REQ_CMP;
553 		}
554 		xpt_print_path(ccb->ccb_h.path);
555 		isp_prt(isp, ISP_LOGINFO, lfmt, (cel->enable) ? "en" : "dis");
556 		if (frozen)
557 			xpt_release_simq(isp->isp_sim, 1);
558 		return;
559 	}
560 
561 	/*
562 	 * We can move along now...
563 	 */
564 
565 	if (frozen)
566 		xpt_release_simq(isp->isp_sim, 1);
567 
568 
569 	if (cel->enable) {
570 		ccb->ccb_h.status =
571 		    create_lun_state(isp, ccb->ccb_h.path, &tptr);
572 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
573 			return;
574 		}
575 	} else {
576 		tptr = get_lun_statep(isp, lun);
577 		if (tptr == NULL) {
578 			ccb->ccb_h.status = CAM_LUN_INVALID;
579 			return;
580 		}
581 	}
582 
583 	if (isp_psema_sig_rqe(isp)) {
584 		rls_lun_statep(isp, tptr);
585 		if (cel->enable)
586 			destroy_lun_state(isp, tptr);
587 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
588 		return;
589 	}
590 
591 	ISP_LOCK(isp);
592 	if (cel->enable) {
593 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
594 		rstat = LUN_ERR;
595 		if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
596 			xpt_print_path(ccb->ccb_h.path);
597 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
598 			goto out;
599 		}
600 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
601 			xpt_print_path(ccb->ccb_h.path);
602 			isp_prt(isp, ISP_LOGERR,
603 			    "wait for ENABLE LUN timed out");
604 			goto out;
605 		}
606 		rstat = isp->isp_osinfo.rstatus;
607 		if (rstat != LUN_OK) {
608 			xpt_print_path(ccb->ccb_h.path);
609 			isp_prt(isp, ISP_LOGERR,
610 			    "ENABLE LUN returned 0x%x", rstat);
611 			goto out;
612 		}
613 	} else {
614 		u_int32_t seq;
615 
616 		seq = isp->isp_osinfo.rollinfo++;
617 		rstat = LUN_ERR;
618 
619 		if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) {
620 			xpt_print_path(ccb->ccb_h.path);
621 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
622 			goto out;
623 		}
624 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
625 			xpt_print_path(ccb->ccb_h.path);
626 			isp_prt(isp, ISP_LOGERR,
627 			    "wait for MODIFY LUN timed out");
628 			goto out;
629 		}
630 		rstat = isp->isp_osinfo.rstatus;
631 		if (rstat != LUN_OK) {
632 			xpt_print_path(ccb->ccb_h.path);
633 			isp_prt(isp, ISP_LOGERR,
634 			    "MODIFY LUN returned 0x%x", rstat);
635 			goto out;
636 		}
637 		rstat = LUN_ERR;
638 		seq = isp->isp_osinfo.rollinfo++;
639 
640 		if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
641 			xpt_print_path(ccb->ccb_h.path);
642 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
643 			goto out;
644 		}
645 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
646 			xpt_print_path(ccb->ccb_h.path);
647 			isp_prt(isp, ISP_LOGERR,
648 			     "wait for ENABLE LUN timed out");
649 			goto out;
650 		}
651 		rstat = isp->isp_osinfo.rstatus;
652 		if (rstat != LUN_OK) {
653 			xpt_print_path(ccb->ccb_h.path);
654 			isp_prt(isp, ISP_LOGWARN,
655 			    "ENABLE LUN returned 0x%x", rstat);
656 			goto out;
657 		}
658 	}
659 out:
660 	isp_vsema_rqe(isp);
661 	ISP_UNLOCK(isp);
662 
663 	if (rstat != LUN_OK) {
664 		xpt_print_path(ccb->ccb_h.path);
665 		isp_prt(isp, ISP_LOGWARN,
666 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
667 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
668 		rls_lun_statep(isp, tptr);
669 		if (cel->enable)
670 			destroy_lun_state(isp, tptr);
671 	} else {
672 		xpt_print_path(ccb->ccb_h.path);
673 		isp_prt(isp, ISP_LOGINFO, lfmt, (cel->enable) ? "en" : "dis");
674 		rls_lun_statep(isp, tptr);
675 		if (cel->enable == 0) {
676 			destroy_lun_state(isp, tptr);
677 		}
678 		ccb->ccb_h.status = CAM_REQ_CMP;
679 	}
680 }
681 
682 static cam_status
683 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
684 {
685 	tstate_t *tptr;
686 	struct ccb_hdr_slist *lp;
687 	struct ccb_hdr *curelm;
688 	int found;
689 	union ccb *accb = ccb->cab.abort_ccb;
690 
691 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
692 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
693 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
694 			return (CAM_PATH_INVALID);
695 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
696 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
697 			return (CAM_PATH_INVALID);
698 		}
699 	}
700 	tptr = get_lun_statep(isp, accb->ccb_h.target_lun);
701 	if (tptr == NULL) {
702 		return (CAM_PATH_INVALID);
703 	}
704 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
705 		lp = &tptr->atios;
706 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
707 		lp = &tptr->inots;
708 	} else {
709 		rls_lun_statep(isp, tptr);
710 		return (CAM_UA_ABORT);
711 	}
712 	curelm = SLIST_FIRST(lp);
713 	found = 0;
714 	if (curelm == &accb->ccb_h) {
715 		found = 1;
716 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
717 	} else {
718 		while(curelm != NULL) {
719 			struct ccb_hdr *nextelm;
720 
721 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
722 			if (nextelm == &accb->ccb_h) {
723 				found = 1;
724 				SLIST_NEXT(curelm, sim_links.sle) =
725 				    SLIST_NEXT(nextelm, sim_links.sle);
726 				break;
727 			}
728 			curelm = nextelm;
729 		}
730 	}
731 	rls_lun_statep(isp, tptr);
732 	if (found) {
733 		accb->ccb_h.status = CAM_REQ_ABORTED;
734 		return (CAM_REQ_CMP);
735 	}
736 	return(CAM_PATH_INVALID);
737 }
738 
739 static cam_status
740 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
741 {
742 	void *qe;
743 	struct ccb_scsiio *cso = &ccb->csio;
744 	u_int16_t *hp, save_handle;
745 	u_int16_t iptr, optr;
746 
747 
748 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
749 		xpt_print_path(ccb->ccb_h.path);
750 		printf("Request Queue Overflow in isp_target_start_ctio\n");
751 		return (CAM_RESRC_UNAVAIL);
752 	}
753 	bzero(qe, QENTRY_LEN);
754 
755 	/*
756 	 * We're either moving data or completing a command here.
757 	 */
758 
759 	if (IS_FC(isp)) {
760 		struct ccb_accept_tio *atiop;
761 		ct2_entry_t *cto = qe;
762 
763 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
764 		cto->ct_header.rqs_entry_count = 1;
765 		cto->ct_iid = cso->init_id;
766 		if (isp->isp_maxluns <= 16) {
767 			cto->ct_lun = ccb->ccb_h.target_lun;
768 		}
769 		/*
770 		 * Start with a residual based on what the original datalength
771 		 * was supposed to be. Basically, we ignore what CAM has set
772 		 * for residuals. The data transfer routines will knock off
773 		 * the residual for each byte actually moved- and also will
774 		 * be responsible for setting the underrun flag.
775 		 */
776 		/* HACK! HACK! */
777 		if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) {
778 			cto->ct_resid = atiop->ccb_h.spriv_field0;
779 		}
780 
781 		/*
782 		 * We always have to use the tag_id- it has the responder
783 		 * exchange id in it.
784 		 */
785 		cto->ct_rxid = cso->tag_id;
786 		if (cso->dxfer_len == 0) {
787 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
788 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
789 				cto->ct_flags |= CT2_SENDSTATUS;
790 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
791 			}
792 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
793 				int m = min(cso->sense_len, MAXRESPLEN);
794 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
795 				cto->rsp.m1.ct_senselen = m;
796 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
797 			}
798 		} else {
799 			cto->ct_flags |= CT2_FLAG_MODE0;
800 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
801 				cto->ct_flags |= CT2_DATA_IN;
802 			} else {
803 				cto->ct_flags |= CT2_DATA_OUT;
804 			}
805 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
806 				cto->ct_flags |= CT2_SENDSTATUS;
807 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
808 			}
809 			/*
810 			 * If we're sending data and status back together,
811 			 * we can't also send back sense data as well.
812 			 */
813 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
814 		}
815 		if (cto->ct_flags & CT2_SENDSTATUS) {
816 			isp_prt(isp, ISP_LOGTDEBUG2,
817 			    "CTIO2[%x] SCSI STATUS 0x%x datalength %u",
818 			    cto->ct_rxid, cso->scsi_status, cto->ct_resid);
819 		}
820 		cto->ct_timeout = 2;
821 		hp = &cto->ct_syshandle;
822 	} else {
823 		ct_entry_t *cto = qe;
824 
825 		/*
826 		 * We always have to use the tag_id- it has the handle
827 		 * for this command.
828 		 */
829 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
830 		cto->ct_header.rqs_entry_count = 1;
831 		cto->ct_iid = cso->init_id;
832 		cto->ct_tgt = ccb->ccb_h.target_id;
833 		cto->ct_lun = ccb->ccb_h.target_lun;
834 		cto->ct_fwhandle = cso->tag_id >> 8;
835 		cto->ct_tag_val = cso->tag_id & 0xff;
836 		if (cto->ct_tag_val && cso->tag_action) {
837 			/*
838 			 * We don't specify a tag type for regular SCSI,
839 			 * just the tag value and set a flag.
840 			 */
841 			cto->ct_flags |= CT_TQAE;
842 		}
843 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
844 			cto->ct_flags |= CT_NODISC;
845 		}
846 		if (cso->dxfer_len == 0) {
847 			cto->ct_flags |= CT_NO_DATA;
848 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
849 			cto->ct_flags |= CT_DATA_IN;
850 		} else {
851 			cto->ct_flags |= CT_DATA_OUT;
852 		}
853 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
854 			cto->ct_flags |= CT_SENDSTATUS;
855 			cto->ct_scsi_status = cso->scsi_status;
856 			cto->ct_resid = cso->resid;
857 		}
858 		if (cto->ct_flags & CT_SENDSTATUS) {
859 			isp_prt(isp, ISP_LOGTDEBUG2,
860 			    "CTIO SCSI STATUS 0x%x resid %d",
861 			    cso->scsi_status, cso->resid);
862 		}
863 		cto->ct_timeout = 2;
864 		hp = &cto->ct_syshandle;
865 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
866 	}
867 
868 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
869 		xpt_print_path(ccb->ccb_h.path);
870 		printf("No XFLIST pointers for isp_target_start_ctio\n");
871 		return (CAM_RESRC_UNAVAIL);
872 	}
873 
874 
875 	/*
876 	 * Call the dma setup routines for this entry (and any subsequent
877 	 * CTIOs) if there's data to move, and then tell the f/w it's got
878 	 * new things to play with. As with isp_start's usage of DMA setup,
879 	 * any swizzling is done in the machine dependent layer. Because
880 	 * of this, we put the request onto the queue area first in native
881 	 * format.
882 	 */
883 
884 	save_handle = *hp;
885 	switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) {
886 	case CMD_QUEUED:
887 		ISP_ADD_REQUEST(isp, iptr);
888 		return (CAM_REQ_INPROG);
889 
890 	case CMD_EAGAIN:
891 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
892 		isp_destroy_handle(isp, save_handle);
893 		return (CAM_RESRC_UNAVAIL);
894 
895 	default:
896 		isp_destroy_handle(isp, save_handle);
897 		return (XS_ERR(ccb));
898 	}
899 }
900 
901 static cam_status
902 isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb)
903 {
904 	void *qe;
905 	struct ccb_accept_tio *atiop;
906 	u_int16_t iptr, optr;
907 
908 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
909 		xpt_print_path(ccb->ccb_h.path);
910 		printf("Request Queue Overflow in isp_target_putback_atio\n");
911 		return (CAM_RESRC_UNAVAIL);
912 	}
913 	bzero(qe, QENTRY_LEN);
914 	atiop = (struct ccb_accept_tio *) ccb;
915 	if (IS_FC(isp)) {
916 		at2_entry_t *at = qe;
917 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
918 		at->at_header.rqs_entry_count = 1;
919 		if (isp->isp_maxluns > 16) {
920 			at->at_scclun = (uint16_t) atiop->ccb_h.target_lun;
921 		} else {
922 			at->at_lun = (uint8_t) atiop->ccb_h.target_lun;
923 		}
924 		at->at_status = CT_OK;
925 		at->at_rxid = atiop->tag_id;
926 		ISP_SWIZ_ATIO2(isp, qe, qe);
927 	} else {
928 		at_entry_t *at = qe;
929 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
930 		at->at_header.rqs_entry_count = 1;
931 		at->at_iid = atiop->init_id;
932 		at->at_tgt = atiop->ccb_h.target_id;
933 		at->at_lun = atiop->ccb_h.target_lun;
934 		at->at_status = CT_OK;
935 		if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) {
936 			at->at_tag_type = atiop->tag_action;
937 		}
938 		at->at_tag_val = atiop->tag_id & 0xff;
939 		at->at_handle = atiop->tag_id >> 8;
940 		ISP_SWIZ_ATIO(isp, qe, qe);
941 	}
942 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
943 	ISP_ADD_REQUEST(isp, iptr);
944 	return (CAM_REQ_CMP);
945 }
946 
947 static void
948 isp_refire_putback_atio(void *arg)
949 {
950 	union ccb *ccb = arg;
951 	int s = splcam();
952 	if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) {
953 		(void) timeout(isp_refire_putback_atio, ccb, 10);
954 	} else {
955 		isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb);
956 	}
957 	splx(s);
958 }
959 
960 /*
961  * Handle ATIO stuff that the generic code can't.
962  * This means handling CDBs.
963  */
964 
965 static int
966 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
967 {
968 	tstate_t *tptr;
969 	int status;
970 	struct ccb_accept_tio *atiop;
971 
972 	/*
973 	 * The firmware status (except for the QLTM_SVALID bit)
974 	 * indicates why this ATIO was sent to us.
975 	 *
976 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
977 	 *
978 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
979 	 * we're still connected on the SCSI bus - i.e. the initiator
980 	 * did not set DiscPriv in the identify message. We don't care
981 	 * about this so it's ignored.
982 	 */
983 	status = aep->at_status;
984 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
985 		/*
986 		 * Bus Phase Sequence error. We should have sense data
987 		 * suggested by the f/w. I'm not sure quite yet what
988 		 * to do about this for CAM.
989 		 */
990 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
991 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
992 		return (0);
993 	}
994 	if ((status & ~QLTM_SVALID) != AT_CDB) {
995 		isp_prt(isp,
996 		    ISP_LOGWARN, "bogus atio (0x%x) leaked to platform",
997 		    status);
998 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
999 		return (0);
1000 	}
1001 
1002 	tptr = get_lun_statep(isp, aep->at_lun);
1003 	if (tptr == NULL) {
1004 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
1005 	}
1006 
1007 	if (tptr == NULL) {
1008 		/*
1009 		 * Because we can't autofeed sense data back with
1010 		 * a command for parallel SCSI, we can't give back
1011 		 * a CHECK CONDITION. We'll give back a BUSY status
1012 		 * instead. This works out okay because the only
1013 		 * time we should, in fact, get this, is in the
1014 		 * case that somebody configured us without the
1015 		 * blackhole driver, so they get what they deserve.
1016 		 */
1017 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1018 		return (0);
1019 	}
1020 
1021 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1022 	if (atiop == NULL) {
1023 		/*
1024 		 * Because we can't autofeed sense data back with
1025 		 * a command for parallel SCSI, we can't give back
1026 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1027 		 * instead. This works out okay because the only time we
1028 		 * should, in fact, get this, is in the case that we've
1029 		 * run out of ATIOS.
1030 		 */
1031 		xpt_print_path(tptr->owner);
1032 		isp_prt(isp, ISP_LOGWARN,
1033 		    "no ATIOS for lun %d from initiator %d",
1034 		    aep->at_lun, aep->at_iid);
1035 		rls_lun_statep(isp, tptr);
1036 		if (aep->at_flags & AT_TQAE)
1037 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1038 		else
1039 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1040 		return (0);
1041 	}
1042 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1043 	if (tptr == &isp->isp_osinfo.tsdflt) {
1044 		atiop->ccb_h.target_id = aep->at_tgt;
1045 		atiop->ccb_h.target_lun = aep->at_lun;
1046 	}
1047 	if (aep->at_flags & AT_NODISC) {
1048 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1049 	} else {
1050 		atiop->ccb_h.flags = 0;
1051 	}
1052 
1053 	if (status & QLTM_SVALID) {
1054 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1055 		atiop->sense_len = amt;
1056 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1057 	} else {
1058 		atiop->sense_len = 0;
1059 	}
1060 
1061 	atiop->init_id = aep->at_iid;
1062 	atiop->cdb_len = aep->at_cdblen;
1063 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1064 	atiop->ccb_h.status = CAM_CDB_RECVD;
1065 	atiop->tag_id = aep->at_tag_val | (aep->at_handle << 8);
1066 	if ((atiop->tag_action = aep->at_tag_type) != 0) {
1067 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1068 	}
1069 	xpt_done((union ccb*)atiop);
1070 	isp_prt(isp, ISP_LOGTDEBUG2,
1071 	    "ATIO[%x] CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s",
1072 	    aep->at_handle, aep->at_cdb[0] & 0xff, aep->at_iid, aep->at_lun,
1073 	    aep->at_tag_val & 0xff, aep->at_tag_type,
1074 	    (aep->at_flags & AT_NODISC)? "nondisc" : "disconnecting");
1075 	rls_lun_statep(isp, tptr);
1076 	return (0);
1077 }
1078 
1079 static int
1080 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1081 {
1082 	lun_id_t lun;
1083 	tstate_t *tptr;
1084 	struct ccb_accept_tio *atiop;
1085 
1086 	/*
1087 	 * The firmware status (except for the QLTM_SVALID bit)
1088 	 * indicates why this ATIO was sent to us.
1089 	 *
1090 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1091 	 */
1092 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1093 		isp_prt(isp, ISP_LOGWARN,
1094 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1095 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1096 		return (0);
1097 	}
1098 
1099 	if (isp->isp_maxluns > 16) {
1100 		lun = aep->at_scclun;
1101 	} else {
1102 		lun = aep->at_lun;
1103 	}
1104 	tptr = get_lun_statep(isp, lun);
1105 	if (tptr == NULL) {
1106 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
1107 	}
1108 
1109 	if (tptr == NULL) {
1110 		/*
1111 		 * What we'd like to know is whether or not we have a listener
1112 		 * upstream that really hasn't configured yet. If we do, then
1113 		 * we can give a more sensible reply here. If not, then we can
1114 		 * reject this out of hand.
1115 		 *
1116 		 * Choices for what to send were
1117 		 *
1118                  *	Not Ready, Unit Not Self-Configured Yet
1119 		 *	(0x2,0x3e,0x00)
1120 		 *
1121 		 * for the former and
1122 		 *
1123 		 *	Illegal Request, Logical Unit Not Supported
1124 		 *	(0x5,0x25,0x00)
1125 		 *
1126 		 * for the latter.
1127 		 *
1128 		 * We used to decide whether there was at least one listener
1129 		 * based upon whether the black hole driver was configured.
1130 		 * However, recent config(8) changes have made this hard to do
1131 		 * at this time.
1132 		 *
1133 		 */
1134 		u_int32_t ccode = SCSI_STATUS_BUSY;
1135 
1136 		/*
1137 		 * Because we can't autofeed sense data back with
1138 		 * a command for parallel SCSI, we can't give back
1139 		 * a CHECK CONDITION. We'll give back a BUSY status
1140 		 * instead. This works out okay because the only
1141 		 * time we should, in fact, get this, is in the
1142 		 * case that somebody configured us without the
1143 		 * blackhole driver, so they get what they deserve.
1144 		 */
1145 		isp_endcmd(isp, aep, ccode, 0);
1146 		return (0);
1147 	}
1148 
1149 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1150 	if (atiop == NULL) {
1151 		/*
1152 		 * Because we can't autofeed sense data back with
1153 		 * a command for parallel SCSI, we can't give back
1154 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1155 		 * instead. This works out okay because the only time we
1156 		 * should, in fact, get this, is in the case that we've
1157 		 * run out of ATIOS.
1158 		 */
1159 		xpt_print_path(tptr->owner);
1160 		isp_prt(isp, ISP_LOGWARN,
1161 		    "no ATIOS for lun %d from initiator %d", lun, aep->at_iid);
1162 		rls_lun_statep(isp, tptr);
1163 		if (aep->at_flags & AT_TQAE)
1164 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1165 		else
1166 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1167 		return (0);
1168 	}
1169 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1170 
1171 	if (tptr == &isp->isp_osinfo.tsdflt) {
1172 		atiop->ccb_h.target_id =
1173 			((fcparam *)isp->isp_param)->isp_loopid;
1174 		atiop->ccb_h.target_lun = lun;
1175 	}
1176 	/*
1177 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1178 	 */
1179 	atiop->sense_len = 0;
1180 
1181 	atiop->init_id = aep->at_iid;
1182 	atiop->cdb_len = ATIO2_CDBLEN;
1183 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1184 	atiop->ccb_h.status = CAM_CDB_RECVD;
1185 	atiop->tag_id = aep->at_rxid;
1186 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1187 	case ATIO2_TC_ATTR_SIMPLEQ:
1188 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1189 		break;
1190         case ATIO2_TC_ATTR_HEADOFQ:
1191 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1192 		break;
1193         case ATIO2_TC_ATTR_ORDERED:
1194 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1195 		break;
1196         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1197 	case ATIO2_TC_ATTR_UNTAGGED:
1198 	default:
1199 		atiop->tag_action = 0;
1200 		break;
1201 	}
1202 	if (atiop->tag_action != 0) {
1203 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1204 	}
1205 
1206 	/*
1207 	 * Preserve overall command datalength in private field.
1208 	 */
1209 	atiop->ccb_h.spriv_field0 = aep->at_datalen;
1210 
1211 	xpt_done((union ccb*)atiop);
1212 	isp_prt(isp, ISP_LOGTDEBUG2,
1213 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1214 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1215 	    lun, aep->at_taskflags, aep->at_datalen);
1216 	rls_lun_statep(isp, tptr);
1217 	return (0);
1218 }
1219 
1220 static int
1221 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1222 {
1223 	union ccb *ccb;
1224 	int sentstatus, ok, notify_cam;
1225 
1226 	/*
1227 	 * CTIO and CTIO2 are close enough....
1228 	 */
1229 
1230 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1231 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1232 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1233 
1234 	if (IS_FC(isp)) {
1235 		ct2_entry_t *ct = arg;
1236 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1237 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1238 		if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1239 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1240 		}
1241 		isp_prt(isp, ISP_LOGTDEBUG2,
1242 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d FIN",
1243 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1244 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0);
1245 		notify_cam = ct->ct_header.rqs_seqno;
1246 	} else {
1247 		ct_entry_t *ct = arg;
1248 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1249 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1250 		isp_prt(isp, ISP_LOGTDEBUG2,
1251 		    "CTIO tag 0x%x sts 0x%x flg 0x%x FIN",
1252 		    ct->ct_tag_val, ct->ct_status, ct->ct_flags);
1253 		notify_cam = ct->ct_header.rqs_seqno;
1254 	}
1255 
1256 	/*
1257 	 * We're here either because data transfers are done (and
1258 	 * it's time to send a final status CTIO) or because the final
1259 	 * status CTIO is done. We don't get called for all intermediate
1260 	 * CTIOs that happen for a large data transfer.
1261 	 *
1262 	 * In any case, for this platform, the upper layers figure out
1263 	 * what to do next, so all we do here is collect status and
1264 	 * pass information along. The exception is that we clear
1265 	 * the notion of handling a non-disconnecting command here.
1266 	 */
1267 
1268 	if (sentstatus) {
1269 		/*
1270 		 * Data transfer done. See if all went okay.
1271 		 */
1272 		if (ok) {
1273 			ccb->csio.resid = 0;
1274 		} else {
1275 			ccb->csio.resid = ccb->csio.dxfer_len;
1276 		}
1277 	}
1278 
1279 	if (notify_cam == 0) {
1280 		isp_prt(isp, ISP_LOGTDEBUG1, "Intermediate CTIO done");
1281 		return (0);
1282 	}
1283 	isp_prt(isp, ISP_LOGTDEBUG1, "Final CTIO done");
1284 	if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) {
1285 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1286 	} else {
1287 		isp_handle_platform_ctio_part2(isp, ccb);
1288 	}
1289 	return (0);
1290 }
1291 
1292 static void
1293 isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb)
1294 {
1295 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1296 		ccb->ccb_h.status |= CAM_REQ_CMP;
1297 	}
1298 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1299 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1300 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1301 		if (isp->isp_osinfo.simqfrozen == 0) {
1302 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1303 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1304 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1305 			} else {
1306 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->devqfrozen");
1307 			}
1308 		} else {
1309 			isp_prt(isp, ISP_LOGDEBUG2,
1310 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1311 		}
1312 	}
1313 	xpt_done(ccb);
1314 }
1315 #endif
1316 
1317 static void
1318 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1319 {
1320 	struct cam_sim *sim;
1321 	struct ispsoftc *isp;
1322 
1323 	sim = (struct cam_sim *)cbarg;
1324 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1325 	switch (code) {
1326 	case AC_LOST_DEVICE:
1327 		if (IS_SCSI(isp)) {
1328 			u_int16_t oflags, nflags;
1329 			sdparam *sdp = isp->isp_param;
1330 			int rvf, tgt;
1331 
1332 			tgt = xpt_path_target_id(path);
1333 			rvf = ISP_FW_REVX(isp->isp_fwrev);
1334 			ISP_LOCK(isp);
1335 			sdp += cam_sim_bus(sim);
1336 			isp->isp_update |= (1 << cam_sim_bus(sim));
1337 			nflags = DPARM_SAFE_DFLT;
1338 			if (rvf >= ISP_FW_REV(7, 55, 0) ||
1339 			   (ISP_FW_REV(4, 55, 0) <= rvf &&
1340 			   (rvf < ISP_FW_REV(5, 0, 0)))) {
1341 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1342 			}
1343 			oflags = sdp->isp_devparam[tgt].dev_flags;
1344 			sdp->isp_devparam[tgt].dev_flags = nflags;
1345 			sdp->isp_devparam[tgt].dev_update = 1;
1346 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1347 			sdp->isp_devparam[tgt].dev_flags = oflags;
1348 			ISP_UNLOCK(isp);
1349 		}
1350 		break;
1351 	default:
1352 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1353 		break;
1354 	}
1355 }
1356 
1357 static void
1358 isp_poll(struct cam_sim *sim)
1359 {
1360 	struct ispsoftc *isp = cam_sim_softc(sim);
1361 	ISP_LOCK(isp);
1362 	(void) isp_intr(isp);
1363 	ISP_UNLOCK(isp);
1364 }
1365 
1366 static void
1367 isp_relsim(void *arg)
1368 {
1369 	struct ispsoftc *isp = arg;
1370 	ISP_LOCK(isp);
1371 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1372 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1373 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1374 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1375 			xpt_release_simq(isp->isp_sim, 1);
1376 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1377 		}
1378 	}
1379 	ISP_UNLOCK(isp);
1380 }
1381 
1382 static void
1383 isp_watchdog(void *arg)
1384 {
1385 	XS_T *xs = arg;
1386 	struct ispsoftc *isp = XS_ISP(xs);
1387 	u_int32_t handle;
1388 
1389 	/*
1390 	 * We've decided this command is dead. Make sure we're not trying
1391 	 * to kill a command that's already dead by getting it's handle and
1392 	 * and seeing whether it's still alive.
1393 	 */
1394 	ISP_LOCK(isp);
1395 	handle = isp_find_handle(isp, xs);
1396 	if (handle) {
1397 		u_int16_t r;
1398 
1399 		if (XS_CMD_DONE_P(xs)) {
1400 			isp_prt(isp, ISP_LOGDEBUG1,
1401 			    "watchdog found done cmd (handle 0x%x)", handle);
1402 			ISP_UNLOCK(isp);
1403 			return;
1404 		}
1405 
1406 		if (XS_CMD_WDOG_P(xs)) {
1407 			isp_prt(isp, ISP_LOGDEBUG2,
1408 			    "recursive watchdog (handle 0x%x)", handle);
1409 			ISP_UNLOCK(isp);
1410 			return;
1411 		}
1412 
1413 		XS_CMD_S_WDOG(xs);
1414 
1415 		r = ISP_READ(isp, BIU_ISR);
1416 
1417 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
1418 			isp_prt(isp, ISP_LOGDEBUG2,
1419 			    "watchdog cleanup (%x, %x)", handle, r);
1420 			xpt_done((union ccb *) xs);
1421 		} else if (XS_CMD_GRACE_P(xs)) {
1422 			/*
1423 			 * Make sure the command is *really* dead before we
1424 			 * release the handle (and DMA resources) for reuse.
1425 			 */
1426 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1427 
1428 			/*
1429 			 * After this point, the comamnd is really dead.
1430 			 */
1431 			if (XS_XFRLEN(xs)) {
1432 				ISP_DMAFREE(isp, xs, handle);
1433                 	}
1434 			isp_destroy_handle(isp, handle);
1435 			xpt_print_path(xs->ccb_h.path);
1436 			isp_prt(isp, ISP_LOGWARN,
1437 			    "watchdog timeout (%x, %x)", handle, r);
1438 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1439 			XS_CMD_C_WDOG(xs);
1440 			isp_done(xs);
1441 		} else {
1442 			u_int16_t iptr, optr;
1443 			ispreq_t *mp;
1444 
1445 			XS_CMD_C_WDOG(xs);
1446 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1447 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
1448 				ISP_UNLOCK(isp);
1449 				return;
1450 			}
1451 			XS_CMD_S_GRACE(xs);
1452 			MEMZERO((void *) mp, sizeof (*mp));
1453 			mp->req_header.rqs_entry_count = 1;
1454 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1455 			mp->req_modifier = SYNC_ALL;
1456 			mp->req_target = XS_CHANNEL(xs) << 7;
1457 			ISP_SWIZZLE_REQUEST(isp, mp);
1458 			ISP_ADD_REQUEST(isp, iptr);
1459 		}
1460 	} else {
1461 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1462 	}
1463 	ISP_UNLOCK(isp);
1464 }
1465 
1466 static void
1467 isp_action(struct cam_sim *sim, union ccb *ccb)
1468 {
1469 	int bus, tgt, error;
1470 	struct ispsoftc *isp;
1471 	struct ccb_trans_settings *cts;
1472 
1473 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1474 
1475 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1476 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1477 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1478 	if (isp->isp_state != ISP_RUNSTATE &&
1479 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1480 		ISP_LOCK(isp);
1481 		isp_init(isp);
1482 		if (isp->isp_state != ISP_INITSTATE) {
1483 			ISP_UNLOCK(isp);
1484 			/*
1485 			 * Lie. Say it was a selection timeout.
1486 			 */
1487 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1488 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1489 			xpt_done(ccb);
1490 			return;
1491 		}
1492 		isp->isp_state = ISP_RUNSTATE;
1493 		ISP_UNLOCK(isp);
1494 	}
1495 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1496 
1497 	switch (ccb->ccb_h.func_code) {
1498 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1499 		/*
1500 		 * Do a couple of preliminary checks...
1501 		 */
1502 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1503 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1504 				ccb->ccb_h.status = CAM_REQ_INVALID;
1505 				xpt_done(ccb);
1506 				break;
1507 			}
1508 		}
1509 #ifdef	DIAGNOSTIC
1510 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1511 			ccb->ccb_h.status = CAM_PATH_INVALID;
1512 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1513 			ccb->ccb_h.status = CAM_PATH_INVALID;
1514 		}
1515 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1516 			isp_prt(isp, ISP_LOGERR,
1517 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
1518 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1519 			xpt_done(ccb);
1520 			break;
1521 		}
1522 #endif
1523 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1524 		ISP_LOCK(isp);
1525 		error = isp_start((XS_T *) ccb);
1526 		ISP_UNLOCK(isp);
1527 		switch (error) {
1528 		case CMD_QUEUED:
1529 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1530 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1531 				u_int64_t ticks = (u_int64_t) hz;
1532 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1533 					ticks = 60 * 1000 * ticks;
1534 				else
1535 					ticks = ccb->ccb_h.timeout * hz;
1536 				ticks = ((ticks + 999) / 1000) + hz + hz;
1537 				if (ticks >= 0x80000000) {
1538 					isp_prt(isp, ISP_LOGERR,
1539 					    "timeout overflow");
1540 					ticks = 0x80000000;
1541 				}
1542 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
1543 				    (caddr_t)ccb, (int)ticks);
1544 			} else {
1545 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1546 			}
1547 			break;
1548 		case CMD_RQLATER:
1549 			if (isp->isp_osinfo.simqfrozen == 0) {
1550 				isp_prt(isp, ISP_LOGDEBUG2,
1551 				    "RQLATER freeze simq");
1552 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED;
1553 				timeout(isp_relsim, isp, 500);
1554 				xpt_freeze_simq(sim, 1);
1555 			}
1556 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1557 			xpt_done(ccb);
1558 			break;
1559 		case CMD_EAGAIN:
1560 			if (isp->isp_osinfo.simqfrozen == 0) {
1561 				xpt_freeze_simq(sim, 1);
1562 				isp_prt(isp, ISP_LOGDEBUG2,
1563 				    "EAGAIN freeze simq");
1564 			}
1565 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1566 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1567 			xpt_done(ccb);
1568 			break;
1569 		case CMD_COMPLETE:
1570 			ISP_LOCK(isp);
1571 			isp_done((struct ccb_scsiio *) ccb);
1572 			ISP_UNLOCK(isp);
1573 			break;
1574 		default:
1575 			isp_prt(isp, ISP_LOGERR,
1576 			    "What's this? 0x%x at %d in file %s",
1577 			    error, __LINE__, __FILE__);
1578 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1579 			xpt_done(ccb);
1580 		}
1581 		break;
1582 
1583 #ifdef	ISP_TARGET_MODE
1584 	case XPT_EN_LUN:		/* Enable LUN as a target */
1585 		isp_en_lun(isp, ccb);
1586 		xpt_done(ccb);
1587 		break;
1588 
1589 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1590 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1591 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1592 	{
1593 		tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun);
1594 		if (tptr == NULL) {
1595 			ccb->ccb_h.status = CAM_LUN_INVALID;
1596 			xpt_done(ccb);
1597 			break;
1598 		}
1599 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1600 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1601 		ISP_LOCK(isp);
1602 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1603 #if	0
1604 			(void) isp_target_putback_atio(isp, ccb);
1605 #endif
1606 			SLIST_INSERT_HEAD(&tptr->atios,
1607 			    &ccb->ccb_h, sim_links.sle);
1608 		} else {
1609 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1610 			    sim_links.sle);
1611 		}
1612 		ISP_UNLOCK(isp);
1613 		rls_lun_statep(isp, tptr);
1614 		ccb->ccb_h.status = CAM_REQ_INPROG;
1615 		break;
1616 	}
1617 	case XPT_CONT_TARGET_IO:
1618 	{
1619 		ISP_LOCK(isp);
1620 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1621 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1622 			if (isp->isp_osinfo.simqfrozen == 0) {
1623 				xpt_freeze_simq(sim, 1);
1624 				xpt_print_path(ccb->ccb_h.path);
1625 				isp_prt(isp, ISP_LOGINFO,
1626 				    "XPT_CONT_TARGET_IO freeze simq");
1627 			}
1628 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1629 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1630 			xpt_done(ccb);
1631 		} else {
1632 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1633 		}
1634 		ISP_UNLOCK(isp);
1635 		break;
1636 	}
1637 #endif
1638 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
1639 
1640 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1641 		tgt = ccb->ccb_h.target_id;
1642 		tgt |= (bus << 16);
1643 
1644 		ISP_LOCK(isp);
1645 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
1646 		ISP_UNLOCK(isp);
1647 		if (error) {
1648 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1649 		} else {
1650 			ccb->ccb_h.status = CAM_REQ_CMP;
1651 		}
1652 		xpt_done(ccb);
1653 		break;
1654 	case XPT_ABORT:			/* Abort the specified CCB */
1655 	{
1656 		union ccb *accb = ccb->cab.abort_ccb;
1657 		switch (accb->ccb_h.func_code) {
1658 #ifdef	ISP_TARGET_MODE
1659 		case XPT_ACCEPT_TARGET_IO:
1660 		case XPT_IMMED_NOTIFY:
1661         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
1662 			break;
1663 		case XPT_CONT_TARGET_IO:
1664 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
1665 			ccb->ccb_h.status = CAM_UA_ABORT;
1666 			break;
1667 #endif
1668 		case XPT_SCSI_IO:
1669 			ISP_LOCK(isp);
1670 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
1671 			ISP_UNLOCK(isp);
1672 			if (error) {
1673 				ccb->ccb_h.status = CAM_UA_ABORT;
1674 			} else {
1675 				ccb->ccb_h.status = CAM_REQ_CMP;
1676 			}
1677 			break;
1678 		default:
1679 			ccb->ccb_h.status = CAM_REQ_INVALID;
1680 			break;
1681 		}
1682 		xpt_done(ccb);
1683 		break;
1684 	}
1685 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1686 
1687 		cts = &ccb->cts;
1688 		tgt = cts->ccb_h.target_id;
1689 		ISP_LOCK(isp);
1690 		if (IS_SCSI(isp)) {
1691 			sdparam *sdp = isp->isp_param;
1692 			u_int16_t *dptr;
1693 
1694 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1695 
1696 			sdp += bus;
1697 #if	0
1698 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS)
1699 				dptr = &sdp->isp_devparam[tgt].cur_dflags;
1700 			else
1701 				dptr = &sdp->isp_devparam[tgt].dev_flags;
1702 #else
1703 			/*
1704 			 * We always update (internally) from dev_flags
1705 			 * so any request to change settings just gets
1706 			 * vectored to that location.
1707 			 */
1708 			dptr = &sdp->isp_devparam[tgt].dev_flags;
1709 #endif
1710 
1711 			/*
1712 			 * Note that these operations affect the
1713 			 * the goal flags (dev_flags)- not
1714 			 * the current state flags. Then we mark
1715 			 * things so that the next operation to
1716 			 * this HBA will cause the update to occur.
1717 			 */
1718 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1719 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
1720 					*dptr |= DPARM_DISC;
1721 				} else {
1722 					*dptr &= ~DPARM_DISC;
1723 				}
1724 			}
1725 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1726 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
1727 					*dptr |= DPARM_TQING;
1728 				} else {
1729 					*dptr &= ~DPARM_TQING;
1730 				}
1731 			}
1732 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1733 				switch (cts->bus_width) {
1734 				case MSG_EXT_WDTR_BUS_16_BIT:
1735 					*dptr |= DPARM_WIDE;
1736 					break;
1737 				default:
1738 					*dptr &= ~DPARM_WIDE;
1739 				}
1740 			}
1741 			/*
1742 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
1743 			 * of nonzero will cause us to go to the
1744 			 * selected (from NVRAM) maximum value for
1745 			 * this device. At a later point, we'll
1746 			 * allow finer control.
1747 			 */
1748 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1749 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
1750 			    (cts->sync_offset > 0)) {
1751 				*dptr |= DPARM_SYNC;
1752 			} else {
1753 				*dptr &= ~DPARM_SYNC;
1754 			}
1755 			*dptr |= DPARM_SAFE_DFLT;
1756 			isp_prt(isp, ISP_LOGDEBUG0,
1757 			    "%d.%d set %s period 0x%x offset 0x%x flags 0x%x",
1758 			    bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1759 			    "current" : "user",
1760 			    sdp->isp_devparam[tgt].sync_period,
1761 			    sdp->isp_devparam[tgt].sync_offset,
1762 			    sdp->isp_devparam[tgt].dev_flags);
1763 			sdp->isp_devparam[tgt].dev_update = 1;
1764 			isp->isp_update |= (1 << bus);
1765 		}
1766 		ISP_UNLOCK(isp);
1767 		ccb->ccb_h.status = CAM_REQ_CMP;
1768 		xpt_done(ccb);
1769 		break;
1770 
1771 	case XPT_GET_TRAN_SETTINGS:
1772 
1773 		cts = &ccb->cts;
1774 		tgt = cts->ccb_h.target_id;
1775 		if (IS_FC(isp)) {
1776 			/*
1777 			 * a lot of normal SCSI things don't make sense.
1778 			 */
1779 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1780 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1781 			/*
1782 			 * How do you measure the width of a high
1783 			 * speed serial bus? Well, in bytes.
1784 			 *
1785 			 * Offset and period make no sense, though, so we set
1786 			 * (above) a 'base' transfer speed to be gigabit.
1787 			 */
1788 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1789 		} else {
1790 			sdparam *sdp = isp->isp_param;
1791 			u_int16_t dval, pval, oval;
1792 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1793 
1794 			sdp += bus;
1795 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) {
1796 				ISP_LOCK(isp);
1797 				sdp->isp_devparam[tgt].dev_refresh = 1;
1798 				isp->isp_update |= (1 << bus);
1799 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
1800 				    NULL);
1801 				ISP_UNLOCK(isp);
1802 				dval = sdp->isp_devparam[tgt].cur_dflags;
1803 				oval = sdp->isp_devparam[tgt].cur_offset;
1804 				pval = sdp->isp_devparam[tgt].cur_period;
1805 			} else {
1806 				dval = sdp->isp_devparam[tgt].dev_flags;
1807 				oval = sdp->isp_devparam[tgt].sync_offset;
1808 				pval = sdp->isp_devparam[tgt].sync_period;
1809 			}
1810 
1811 			ISP_LOCK(isp);
1812 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1813 
1814 			if (dval & DPARM_DISC) {
1815 				cts->flags |= CCB_TRANS_DISC_ENB;
1816 			}
1817 			if (dval & DPARM_TQING) {
1818 				cts->flags |= CCB_TRANS_TAG_ENB;
1819 			}
1820 			if (dval & DPARM_WIDE) {
1821 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1822 			} else {
1823 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1824 			}
1825 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1826 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1827 
1828 			if ((dval & DPARM_SYNC) && oval != 0) {
1829 				cts->sync_period = pval;
1830 				cts->sync_offset = oval;
1831 				cts->valid |=
1832 				    CCB_TRANS_SYNC_RATE_VALID |
1833 				    CCB_TRANS_SYNC_OFFSET_VALID;
1834 			}
1835 			ISP_UNLOCK(isp);
1836 			isp_prt(isp, ISP_LOGDEBUG0,
1837 			    "%d.%d get %s period 0x%x offset 0x%x flags 0x%x",
1838 			    bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1839 			    "current" : "user", pval, oval, dval);
1840 		}
1841 		ccb->ccb_h.status = CAM_REQ_CMP;
1842 		xpt_done(ccb);
1843 		break;
1844 
1845 	case XPT_CALC_GEOMETRY:
1846 	{
1847 		struct ccb_calc_geometry *ccg;
1848 		u_int32_t secs_per_cylinder;
1849 		u_int32_t size_mb;
1850 
1851 		ccg = &ccb->ccg;
1852 		if (ccg->block_size == 0) {
1853 			isp_prt(isp, ISP_LOGERR,
1854 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
1855 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
1856 			ccb->ccb_h.status = CAM_REQ_INVALID;
1857 			xpt_done(ccb);
1858 			break;
1859 		}
1860 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
1861 		if (size_mb > 1024) {
1862 			ccg->heads = 255;
1863 			ccg->secs_per_track = 63;
1864 		} else {
1865 			ccg->heads = 64;
1866 			ccg->secs_per_track = 32;
1867 		}
1868 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1869 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1870 		ccb->ccb_h.status = CAM_REQ_CMP;
1871 		xpt_done(ccb);
1872 		break;
1873 	}
1874 	case XPT_RESET_BUS:		/* Reset the specified bus */
1875 		bus = cam_sim_bus(sim);
1876 		ISP_LOCK(isp);
1877 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
1878 		ISP_UNLOCK(isp);
1879 		if (error)
1880 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1881 		else {
1882 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
1883 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
1884 			else if (isp->isp_path != NULL)
1885 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
1886 			ccb->ccb_h.status = CAM_REQ_CMP;
1887 		}
1888 		xpt_done(ccb);
1889 		break;
1890 
1891 	case XPT_TERM_IO:		/* Terminate the I/O process */
1892 		ccb->ccb_h.status = CAM_REQ_INVALID;
1893 		xpt_done(ccb);
1894 		break;
1895 
1896 	case XPT_PATH_INQ:		/* Path routing inquiry */
1897 	{
1898 		struct ccb_pathinq *cpi = &ccb->cpi;
1899 
1900 		cpi->version_num = 1;
1901 #ifdef	ISP_TARGET_MODE
1902 		/* XXX: we don't support 2nd bus target mode yet */
1903 		if (cam_sim_bus(sim) == 0)
1904 			cpi->target_sprt =
1905 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
1906 		else
1907 			cpi->target_sprt = 0;
1908 #else
1909 		cpi->target_sprt = 0;
1910 #endif
1911 		cpi->hba_eng_cnt = 0;
1912 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
1913 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
1914 		cpi->bus_id = cam_sim_bus(sim);
1915 		if (IS_FC(isp)) {
1916 			cpi->hba_misc = PIM_NOBUSRESET;
1917 			/*
1918 			 * Because our loop ID can shift from time to time,
1919 			 * make our initiator ID out of range of our bus.
1920 			 */
1921 			cpi->initiator_id = cpi->max_target + 1;
1922 
1923 			/*
1924 			 * Set base transfer capabilities for Fibre Channel.
1925 			 * Technically not correct because we don't know
1926 			 * what media we're running on top of- but we'll
1927 			 * look good if we always say 100MB/s.
1928 			 */
1929 			cpi->base_transfer_speed = 100000;
1930 			cpi->hba_inquiry = PI_TAG_ABLE;
1931 		} else {
1932 			sdparam *sdp = isp->isp_param;
1933 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
1934 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1935 			cpi->hba_misc = 0;
1936 			cpi->initiator_id = sdp->isp_initiator_id;
1937 			cpi->base_transfer_speed = 3300;
1938 		}
1939 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1940 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
1941 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1942 		cpi->unit_number = cam_sim_unit(sim);
1943 		cpi->ccb_h.status = CAM_REQ_CMP;
1944 		xpt_done(ccb);
1945 		break;
1946 	}
1947 	default:
1948 		ccb->ccb_h.status = CAM_REQ_INVALID;
1949 		xpt_done(ccb);
1950 		break;
1951 	}
1952 }
1953 
1954 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
1955 void
1956 isp_done(struct ccb_scsiio *sccb)
1957 {
1958 	struct ispsoftc *isp = XS_ISP(sccb);
1959 
1960 	if (XS_NOERR(sccb))
1961 		XS_SETERR(sccb, CAM_REQ_CMP);
1962 
1963 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
1964 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
1965 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
1966 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
1967 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
1968 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1969 		} else {
1970 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1971 		}
1972 	}
1973 
1974 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1975 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1976 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1977 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
1978 			xpt_freeze_devq(sccb->ccb_h.path, 1);
1979 			if (sccb->scsi_status != SCSI_STATUS_OK)
1980 				isp_prt(isp, ISP_LOGDEBUG2,
1981 				    "freeze devq %d.%d %x %x",
1982 				    sccb->ccb_h.target_id,
1983 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
1984 				    sccb->scsi_status);
1985 		}
1986 	}
1987 
1988 	/*
1989 	 * If we were frozen waiting resources, clear that we were frozen
1990 	 * waiting for resources. If we are no longer frozen, and the devq
1991 	 * isn't frozen, mark the completing CCB to have the XPT layer
1992 	 * release the simq.
1993 	 */
1994 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1995 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1996 		if (isp->isp_osinfo.simqfrozen == 0) {
1997 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1998 				isp_prt(isp, ISP_LOGDEBUG2,
1999 				    "isp_done->relsimq");
2000 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2001 			} else {
2002 				isp_prt(isp, ISP_LOGDEBUG2,
2003 				    "isp_done->devq frozen");
2004 			}
2005 		} else {
2006 			isp_prt(isp, ISP_LOGDEBUG2,
2007 			    "isp_done -> simqfrozen = %x",
2008 			    isp->isp_osinfo.simqfrozen);
2009 		}
2010 	}
2011 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2012 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2013 		xpt_print_path(sccb->ccb_h.path);
2014 		isp_prt(isp, ISP_LOGINFO,
2015 		    "cam completion status 0x%x", sccb->ccb_h.status);
2016 	}
2017 
2018 	XS_CMD_S_DONE(sccb);
2019 	if (XS_CMD_WDOG_P(sccb) == 0) {
2020 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2021 		if (XS_CMD_GRACE_P(sccb)) {
2022 			isp_prt(isp, ISP_LOGDEBUG2,
2023 			    "finished command on borrowed time");
2024 		}
2025 		XS_CMD_S_CLEAR(sccb);
2026 		ISP_UNLOCK(isp);
2027 #ifdef	ISP_SMPLOCK
2028 		mtx_lock(&Giant);
2029 		xpt_done((union ccb *) sccb);
2030 		mtx_unlock(&Giant);
2031 #else
2032 		xpt_done((union ccb *) sccb);
2033 #endif
2034 		ISP_LOCK(isp);
2035 	}
2036 }
2037 
2038 int
2039 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2040 {
2041 	int bus, rv = 0;
2042 	switch (cmd) {
2043 	case ISPASYNC_NEW_TGT_PARAMS:
2044 	{
2045 		int flags, tgt;
2046 		sdparam *sdp = isp->isp_param;
2047 		struct ccb_trans_settings neg;
2048 		struct cam_path *tmppath;
2049 
2050 		tgt = *((int *)arg);
2051 		bus = (tgt >> 16) & 0xffff;
2052 		tgt &= 0xffff;
2053 		sdp += bus;
2054 		if (xpt_create_path(&tmppath, NULL,
2055 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2056 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2057 			isp_prt(isp, ISP_LOGWARN,
2058 			    "isp_async cannot make temp path for %d.%d",
2059 			    tgt, bus);
2060 			rv = -1;
2061 			break;
2062 		}
2063 		flags = sdp->isp_devparam[tgt].cur_dflags;
2064 		neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2065 		if (flags & DPARM_DISC) {
2066 			neg.flags |= CCB_TRANS_DISC_ENB;
2067 		}
2068 		if (flags & DPARM_TQING) {
2069 			neg.flags |= CCB_TRANS_TAG_ENB;
2070 		}
2071 		neg.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2072 		neg.bus_width = (flags & DPARM_WIDE)?
2073 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2074 		neg.sync_period = sdp->isp_devparam[tgt].cur_period;
2075 		neg.sync_offset = sdp->isp_devparam[tgt].cur_offset;
2076 		if (flags & DPARM_SYNC) {
2077 			neg.valid |=
2078 			    CCB_TRANS_SYNC_RATE_VALID |
2079 			    CCB_TRANS_SYNC_OFFSET_VALID;
2080 		}
2081 		isp_prt(isp, ISP_LOGDEBUG2,
2082 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2083 		    bus, tgt, neg.sync_period, neg.sync_offset, flags);
2084 		xpt_setup_ccb(&neg.ccb_h, tmppath, 1);
2085 		xpt_async(AC_TRANSFER_NEG, tmppath, &neg);
2086 		xpt_free_path(tmppath);
2087 		break;
2088 	}
2089 	case ISPASYNC_BUS_RESET:
2090 		bus = *((int *)arg);
2091 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2092 		    bus);
2093 		if (bus > 0 && isp->isp_path2) {
2094 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2095 		} else if (isp->isp_path) {
2096 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2097 		}
2098 		break;
2099 	case ISPASYNC_LOOP_DOWN:
2100 		if (isp->isp_path) {
2101 			if (isp->isp_osinfo.simqfrozen == 0) {
2102 				isp_prt(isp, ISP_LOGDEBUG2,
2103 				    "loop down freeze simq");
2104 				xpt_freeze_simq(isp->isp_sim, 1);
2105 			}
2106 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2107 		}
2108 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2109 		break;
2110 	case ISPASYNC_LOOP_UP:
2111 		if (isp->isp_path) {
2112 			int wasfrozen =
2113 			    isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2114 			isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2115 			if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2116 				xpt_release_simq(isp->isp_sim, 1);
2117 				isp_prt(isp, ISP_LOGDEBUG2,
2118 				    "loop up release simq");
2119 			}
2120 		}
2121 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2122 		break;
2123 	case ISPASYNC_PROMENADE:
2124 	{
2125 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2126 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2127 		static const char *roles[4] = {
2128 		    "(none)", "Target", "Initiator", "Target/Initiator"
2129 		};
2130 		fcparam *fcp = isp->isp_param;
2131 		int tgt = *((int *) arg);
2132 		struct lportdb *lp = &fcp->portdb[tgt];
2133 
2134 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2135 		    roles[lp->roles & 0x3],
2136 		    (lp->valid)? "Arrived" : "Departed",
2137 		    (u_int32_t) (lp->port_wwn >> 32),
2138 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2139 		    (u_int32_t) (lp->node_wwn >> 32),
2140 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2141 		break;
2142 	}
2143 	case ISPASYNC_CHANGE_NOTIFY:
2144 		if (arg == (void *) 1) {
2145 			isp_prt(isp, ISP_LOGINFO,
2146 			    "Name Server Database Changed");
2147 		} else {
2148 			isp_prt(isp, ISP_LOGINFO,
2149 			    "Name Server Database Changed");
2150 		}
2151 		break;
2152 	case ISPASYNC_FABRIC_DEV:
2153 	{
2154 		int target, lrange;
2155 		struct lportdb *lp = NULL;
2156 		char *pt;
2157 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2158 		u_int32_t portid;
2159 		u_int64_t wwpn, wwnn;
2160 		fcparam *fcp = isp->isp_param;
2161 
2162 		portid =
2163 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2164 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2165 		    (((u_int32_t) resp->snscb_port_id[2]));
2166 
2167 		wwpn =
2168 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2169 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2170 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2171 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2172 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2173 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2174 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2175 		    (((u_int64_t)resp->snscb_portname[7]));
2176 
2177 		wwnn =
2178 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2179 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2180 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2181 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2182 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2183 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2184 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2185 		    (((u_int64_t)resp->snscb_nodename[7]));
2186 		if (portid == 0 || wwpn == 0) {
2187 			break;
2188 		}
2189 
2190 		switch (resp->snscb_port_type) {
2191 		case 1:
2192 			pt = "   N_Port";
2193 			break;
2194 		case 2:
2195 			pt = "  NL_Port";
2196 			break;
2197 		case 3:
2198 			pt = "F/NL_Port";
2199 			break;
2200 		case 0x7f:
2201 			pt = "  Nx_Port";
2202 			break;
2203 		case 0x81:
2204 			pt = "  F_port";
2205 			break;
2206 		case 0x82:
2207 			pt = "  FL_Port";
2208 			break;
2209 		case 0x84:
2210 			pt = "   E_port";
2211 			break;
2212 		default:
2213 			pt = "?";
2214 			break;
2215 		}
2216 		isp_prt(isp, ISP_LOGINFO,
2217 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2218 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2219 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2220 		/*
2221 		 * We're only interested in SCSI_FCP types (for now)
2222 		 */
2223 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
2224 			break;
2225 		}
2226 		if (fcp->isp_topo != TOPO_F_PORT)
2227 			lrange = FC_SNS_ID+1;
2228 		else
2229 			lrange = 0;
2230 		/*
2231 		 * Is it already in our list?
2232 		 */
2233 		for (target = lrange; target < MAX_FC_TARG; target++) {
2234 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2235 				continue;
2236 			}
2237 			lp = &fcp->portdb[target];
2238 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
2239 				lp->fabric_dev = 1;
2240 				break;
2241 			}
2242 		}
2243 		if (target < MAX_FC_TARG) {
2244 			break;
2245 		}
2246 		for (target = lrange; target < MAX_FC_TARG; target++) {
2247 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2248 				continue;
2249 			}
2250 			lp = &fcp->portdb[target];
2251 			if (lp->port_wwn == 0) {
2252 				break;
2253 			}
2254 		}
2255 		if (target == MAX_FC_TARG) {
2256 			isp_prt(isp, ISP_LOGWARN,
2257 			    "no more space for fabric devices");
2258 			break;
2259 		}
2260 		lp->node_wwn = wwnn;
2261 		lp->port_wwn = wwpn;
2262 		lp->portid = portid;
2263 		lp->fabric_dev = 1;
2264 		break;
2265 	}
2266 #ifdef	ISP_TARGET_MODE
2267 	case ISPASYNC_TARGET_MESSAGE:
2268 	{
2269 		tmd_msg_t *mp = arg;
2270 		isp_prt(isp, ISP_LOGDEBUG2,
2271 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2272 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2273 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2274 		    mp->nt_msg[0]);
2275 		break;
2276 	}
2277 	case ISPASYNC_TARGET_EVENT:
2278 	{
2279 		tmd_event_t *ep = arg;
2280 		isp_prt(isp, ISP_LOGDEBUG2,
2281 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2282 		break;
2283 	}
2284 	case ISPASYNC_TARGET_ACTION:
2285 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2286 		default:
2287 			isp_prt(isp, ISP_LOGWARN,
2288 			   "event 0x%x for unhandled target action",
2289 			    ((isphdr_t *)arg)->rqs_entry_type);
2290 			break;
2291 		case RQSTYPE_ATIO:
2292 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2293 			break;
2294 		case RQSTYPE_ATIO2:
2295 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2296 			break;
2297 		case RQSTYPE_CTIO2:
2298 		case RQSTYPE_CTIO:
2299 			rv = isp_handle_platform_ctio(isp, arg);
2300 			break;
2301 		case RQSTYPE_ENABLE_LUN:
2302 		case RQSTYPE_MODIFY_LUN:
2303 			isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status);
2304 			break;
2305 		}
2306 		break;
2307 #endif
2308 	default:
2309 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2310 		rv = -1;
2311 		break;
2312 	}
2313 	return (rv);
2314 }
2315 
2316 
2317 /*
2318  * Locks are held before coming here.
2319  */
2320 void
2321 isp_uninit(struct ispsoftc *isp)
2322 {
2323 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2324 	DISABLE_INTS(isp);
2325 }
2326 
2327 void
2328 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2329 {
2330 	va_list ap;
2331 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2332 		return;
2333 	}
2334 	printf("%s: ", device_get_nameunit(isp->isp_dev));
2335 	va_start(ap, fmt);
2336 	vprintf(fmt, ap);
2337 	va_end(ap);
2338 	printf("\n");
2339 }
2340