xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision db901281608f0c69c05dd9ab366155d3225f0fd2)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <dev/isp/isp_freebsd.h>
30 #include <machine/stdarg.h>	/* for use by isp_prt below */
31 
32 static void isp_intr_enable(void *);
33 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
34 static void isp_poll(struct cam_sim *);
35 static void isp_relsim(void *);
36 static timeout_t isp_watchdog;
37 static void isp_action(struct cam_sim *, union ccb *);
38 
39 
40 static struct ispsoftc *isplist = NULL;
41 
42 void
43 isp_attach(struct ispsoftc *isp)
44 {
45 	int primary, secondary;
46 	struct ccb_setasync csa;
47 	struct cam_devq *devq;
48 	struct cam_sim *sim;
49 	struct cam_path *path;
50 
51 	/*
52 	 * Establish (in case of 12X0) which bus is the primary.
53 	 */
54 
55 	primary = 0;
56 	secondary = 1;
57 
58 	/*
59 	 * Create the device queue for our SIM(s).
60 	 */
61 	devq = cam_simq_alloc(isp->isp_maxcmds);
62 	if (devq == NULL) {
63 		return;
64 	}
65 
66 	/*
67 	 * Construct our SIM entry.
68 	 */
69 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
70 	    isp->isp_unit, 1, isp->isp_maxcmds, devq);
71 	if (sim == NULL) {
72 		cam_simq_free(devq);
73 		return;
74 	}
75 
76 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
77 	isp->isp_osinfo.ehook.ich_arg = isp;
78 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
79 		printf("%s: could not establish interrupt enable hook\n",
80 		    isp->isp_name);
81 		cam_sim_free(sim, TRUE);
82 		return;
83 	}
84 
85 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
86 		cam_sim_free(sim, TRUE);
87 		return;
88 	}
89 
90 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
91 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
92 		xpt_bus_deregister(cam_sim_path(sim));
93 		cam_sim_free(sim, TRUE);
94 		return;
95 	}
96 
97 	xpt_setup_ccb(&csa.ccb_h, path, 5);
98 	csa.ccb_h.func_code = XPT_SASYNC_CB;
99 	csa.event_enable = AC_LOST_DEVICE;
100 	csa.callback = isp_cam_async;
101 	csa.callback_arg = sim;
102 	xpt_action((union ccb *)&csa);
103 	isp->isp_sim = sim;
104 	isp->isp_path = path;
105 
106 	/*
107 	 * If we have a second channel, construct SIM entry for that.
108 	 */
109 	if (IS_DUALBUS(isp)) {
110 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
111 		    isp->isp_unit, 1, isp->isp_maxcmds, devq);
112 		if (sim == NULL) {
113 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
114 			xpt_free_path(isp->isp_path);
115 			cam_simq_free(devq);
116 			return;
117 		}
118 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
119 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
120 			xpt_free_path(isp->isp_path);
121 			cam_sim_free(sim, TRUE);
122 			return;
123 		}
124 
125 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
126 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
127 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
128 			xpt_free_path(isp->isp_path);
129 			xpt_bus_deregister(cam_sim_path(sim));
130 			cam_sim_free(sim, TRUE);
131 			return;
132 		}
133 
134 		xpt_setup_ccb(&csa.ccb_h, path, 5);
135 		csa.ccb_h.func_code = XPT_SASYNC_CB;
136 		csa.event_enable = AC_LOST_DEVICE;
137 		csa.callback = isp_cam_async;
138 		csa.callback_arg = sim;
139 		xpt_action((union ccb *)&csa);
140 		isp->isp_sim2 = sim;
141 		isp->isp_path2 = path;
142 	}
143 	isp->isp_state = ISP_RUNSTATE;
144 	ENABLE_INTS(isp);
145 	if (isplist == NULL) {
146 		isplist = isp;
147 	} else {
148 		struct ispsoftc *tmp = isplist;
149 		while (tmp->isp_osinfo.next) {
150 			tmp = tmp->isp_osinfo.next;
151 		}
152 		tmp->isp_osinfo.next = isp;
153 	}
154 }
155 
156 static void
157 isp_intr_enable(void *arg)
158 {
159 	struct ispsoftc *isp = arg;
160 	ENABLE_INTS(isp);
161 #ifdef	SERVICING_INTERRUPT
162 	isp->isp_osinfo.intsok = 1;
163 #endif
164 	/* Release our hook so that the boot can continue. */
165 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
166 }
167 
168 /*
169  * Put the target mode functions here, because some are inlines
170  */
171 
172 #ifdef	ISP_TARGET_MODE
173 
174 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t);
175 static __inline int are_any_luns_enabled(struct ispsoftc *);
176 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t);
177 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
178 static __inline int isp_psema_sig_rqe(struct ispsoftc *);
179 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int);
180 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int);
181 static __inline void isp_vsema_rqe(struct ispsoftc *);
182 static cam_status
183 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **);
184 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
185 static void isp_en_lun(struct ispsoftc *, union ccb *);
186 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
187 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
188 static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *);
189 static timeout_t isp_refire_putback_atio;
190 
191 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
192 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
193 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
194 static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *);
195 
196 static __inline int
197 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun)
198 {
199 	tstate_t *tptr;
200 	ISP_LOCK(isp);
201 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
202 		ISP_UNLOCK(isp);
203 		return (0);
204 	}
205 	do {
206 		if (tptr->lun == (lun_id_t) lun) {
207 			ISP_UNLOCK(isp);
208 			return (1);
209 		}
210 	} while ((tptr = tptr->next) != NULL);
211 	ISP_UNLOCK(isp);
212 	return (0);
213 }
214 
215 static __inline int
216 are_any_luns_enabled(struct ispsoftc *isp)
217 {
218 	int i;
219 	for (i = 0; i < LUN_HASH_SIZE; i++) {
220 		if (isp->isp_osinfo.lun_hash[i]) {
221 			return (1);
222 		}
223 	}
224 	return (0);
225 }
226 
227 static __inline tstate_t *
228 get_lun_statep(struct ispsoftc *isp, lun_id_t lun)
229 {
230 	tstate_t *tptr;
231 
232 	ISP_LOCK(isp);
233 	if (lun == CAM_LUN_WILDCARD) {
234 		tptr = &isp->isp_osinfo.tsdflt;
235 		tptr->hold++;
236 		ISP_UNLOCK(isp);
237 		return (tptr);
238 	} else {
239 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)];
240 	}
241 	if (tptr == NULL) {
242 		ISP_UNLOCK(isp);
243 		return (NULL);
244 	}
245 
246 	do {
247 		if (tptr->lun == lun) {
248 			tptr->hold++;
249 			ISP_UNLOCK(isp);
250 			return (tptr);
251 		}
252 	} while ((tptr = tptr->next) != NULL);
253 	ISP_UNLOCK(isp);
254 	return (tptr);
255 }
256 
257 static __inline void
258 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
259 {
260 	if (tptr->hold)
261 		tptr->hold--;
262 }
263 
264 static __inline int
265 isp_psema_sig_rqe(struct ispsoftc *isp)
266 {
267 	ISP_LOCK(isp);
268 	while (isp->isp_osinfo.tmflags & TM_BUSY) {
269 		isp->isp_osinfo.tmflags |= TM_WANTED;
270 		if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) {
271 			ISP_UNLOCK(isp);
272 			return (-1);
273 		}
274 		isp->isp_osinfo.tmflags |= TM_BUSY;
275 	}
276 	ISP_UNLOCK(isp);
277 	return (0);
278 }
279 
280 static __inline int
281 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo)
282 {
283 	ISP_LOCK(isp);
284 	if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) {
285 		ISP_UNLOCK(isp);
286 		return (-1);
287 	}
288 	ISP_UNLOCK(isp);
289 	return (0);
290 }
291 
292 static __inline void
293 isp_cv_signal_rqe(struct ispsoftc *isp, int status)
294 {
295 	isp->isp_osinfo.rstatus = status;
296 	wakeup(&isp->isp_osinfo.rstatus);
297 }
298 
299 static __inline void
300 isp_vsema_rqe(struct ispsoftc *isp)
301 {
302 	ISP_LOCK(isp);
303 	if (isp->isp_osinfo.tmflags & TM_WANTED) {
304 		isp->isp_osinfo.tmflags &= ~TM_WANTED;
305 		wakeup(&isp->isp_osinfo.tmflags);
306 	}
307 	isp->isp_osinfo.tmflags &= ~TM_BUSY;
308 	ISP_UNLOCK(isp);
309 }
310 
311 static cam_status
312 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt)
313 {
314 	cam_status status;
315 	lun_id_t lun;
316 	tstate_t *tptr, *new;
317 
318 	lun = xpt_path_lun_id(path);
319 	if (lun < 0) {
320 		return (CAM_LUN_INVALID);
321 	}
322 	if (is_lun_enabled(isp, lun)) {
323 		return (CAM_LUN_ALRDY_ENA);
324 	}
325 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT);
326 	if (new == NULL) {
327 		return (CAM_RESRC_UNAVAIL);
328 	}
329 	bzero(new, sizeof (tstate_t));
330 
331 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
332 	    xpt_path_target_id(path), xpt_path_lun_id(path));
333 	if (status != CAM_REQ_CMP) {
334 		free(new, M_DEVBUF);
335 		return (status);
336 	}
337 	new->lun = lun;
338 	SLIST_INIT(&new->atios);
339 	SLIST_INIT(&new->inots);
340 	new->hold = 1;
341 
342 	ISP_LOCK(isp);
343 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
344 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new;
345 	} else {
346 		while (tptr->next)
347 			tptr = tptr->next;
348 		tptr->next = new;
349 	}
350 	ISP_UNLOCK(isp);
351 	*rslt = new;
352 	return (CAM_REQ_CMP);
353 }
354 
355 static __inline void
356 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
357 {
358 	tstate_t *lw, *pw;
359 
360 	ISP_LOCK(isp);
361 	if (tptr->hold) {
362 		ISP_UNLOCK(isp);
363 		return;
364 	}
365 	pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)];
366 	if (pw == NULL) {
367 		ISP_UNLOCK(isp);
368 		return;
369 	} else if (pw->lun == tptr->lun) {
370 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next;
371 	} else {
372 		lw = pw;
373 		pw = lw->next;
374 		while (pw) {
375 			if (pw->lun == tptr->lun) {
376 				lw->next = pw->next;
377 				break;
378 			}
379 			lw = pw;
380 			pw = pw->next;
381 		}
382 		if (pw == NULL) {
383 			ISP_UNLOCK(isp);
384 			return;
385 		}
386 	}
387 	free(tptr, M_DEVBUF);
388 	ISP_UNLOCK(isp);
389 }
390 
391 static void
392 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
393 {
394 	const char *lfmt = "Lun now %sabled for target mode\n";
395 	struct ccb_en_lun *cel = &ccb->cel;
396 	tstate_t *tptr;
397 	u_int16_t rstat;
398 	int bus, frozen = 0;
399 	lun_id_t lun;
400 	target_id_t tgt;
401 
402 
403 	bus = XS_CHANNEL(ccb);
404 	tgt = ccb->ccb_h.target_id;
405 	lun = ccb->ccb_h.target_lun;
406 
407 	/*
408 	 * Do some sanity checking first.
409 	 */
410 
411 	if (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns) {
412 		ccb->ccb_h.status = CAM_LUN_INVALID;
413 		return;
414 	}
415 	if (IS_SCSI(isp)) {
416 		if (tgt != CAM_TARGET_WILDCARD &&
417 		    tgt != ((sdparam *) isp->isp_param)->isp_initiator_id) {
418 			ccb->ccb_h.status = CAM_TID_INVALID;
419 			return;
420 		}
421 	} else {
422 		if (tgt != CAM_TARGET_WILDCARD &&
423 		    tgt != ((fcparam *) isp->isp_param)->isp_loopid) {
424 			ccb->ccb_h.status = CAM_TID_INVALID;
425 			return;
426 		}
427 	}
428 
429 	/*
430 	 * If Fibre Channel, stop and drain all activity to this bus.
431 	 */
432 	if (IS_FC(isp)) {
433 		ISP_LOCK(isp);
434 		frozen = 1;
435 		xpt_freeze_simq(isp->isp_sim, 1);
436 		isp->isp_osinfo.drain = 1;
437 		/* ISP_UNLOCK(isp);  XXX NEED CV_WAIT HERE XXX */
438 		while (isp->isp_osinfo.drain) {
439 			tsleep(&isp->isp_osinfo.drain, PRIBIO, "ispdrain", 0);
440 		}
441 		ISP_UNLOCK(isp);
442 	}
443 
444 	/*
445 	 * Check to see if we're enabling on fibre channel and
446 	 * don't yet have a notion of who the heck we are (no
447 	 * loop yet).
448 	 */
449 	if (IS_FC(isp) && cel->enable &&
450 	    (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
451 		int rv= 2 * 1000000;
452 		fcparam *fcp = isp->isp_param;
453 
454 		ISP_LOCK(isp);
455 		rv = isp_control(isp, ISPCTL_FCLINK_TEST, &rv);
456 		ISP_UNLOCK(isp);
457 		if (rv || fcp->isp_fwstate != FW_READY) {
458 			xpt_print_path(ccb->ccb_h.path);
459 			printf("link status not good yet\n");
460 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
461 			if (frozen)
462 				xpt_release_simq(isp->isp_sim, 1);
463 			return;
464 		}
465 		ISP_LOCK(isp);
466 		rv = isp_control(isp, ISPCTL_PDB_SYNC, NULL);
467 		ISP_UNLOCK(isp);
468 		if (rv || fcp->isp_fwstate != FW_READY) {
469 			xpt_print_path(ccb->ccb_h.path);
470 			printf("could not get a good port database read\n");
471 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
472 			if (frozen)
473 				xpt_release_simq(isp->isp_sim, 1);
474 			return;
475 		}
476 	}
477 
478 
479 	/*
480 	 * Next check to see whether this is a target/lun wildcard action.
481 	 *
482 	 * If so, we enable/disable target mode but don't do any lun enabling.
483 	 */
484 	if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) {
485 		int av;
486 		tptr = &isp->isp_osinfo.tsdflt;
487 		if (cel->enable) {
488 			if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) {
489 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
490 				if (frozen)
491 					xpt_release_simq(isp->isp_sim, 1);
492 				return;
493 			}
494 			ccb->ccb_h.status =
495 			    xpt_create_path(&tptr->owner, NULL,
496 			    xpt_path_path_id(ccb->ccb_h.path),
497 			    xpt_path_target_id(ccb->ccb_h.path),
498 			    xpt_path_lun_id(ccb->ccb_h.path));
499 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
500 				if (frozen)
501 					xpt_release_simq(isp->isp_sim, 1);
502 				return;
503 			}
504 			SLIST_INIT(&tptr->atios);
505 			SLIST_INIT(&tptr->inots);
506 			av = 1;
507 			ISP_LOCK(isp);
508 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
509 			if (av) {
510 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
511 				xpt_free_path(tptr->owner);
512 				ISP_UNLOCK(isp);
513 				if (frozen)
514 					xpt_release_simq(isp->isp_sim, 1);
515 				return;
516 			}
517 			isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED;
518 			ISP_UNLOCK(isp);
519 		} else {
520 			if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
521 				ccb->ccb_h.status = CAM_LUN_INVALID;
522 				if (frozen)
523 					xpt_release_simq(isp->isp_sim, 1);
524 				return;
525 			}
526 			if (are_any_luns_enabled(isp)) {
527 				ccb->ccb_h.status = CAM_SCSI_BUSY;
528 				if (frozen)
529 					xpt_release_simq(isp->isp_sim, 1);
530 				return;
531 			}
532 			av = 0;
533 			ISP_LOCK(isp);
534 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
535 			if (av) {
536 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
537 				ISP_UNLOCK(isp);
538 				if (frozen)
539 					xpt_release_simq(isp->isp_sim, 1);
540 				return;
541 			}
542 			isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED;
543 			ISP_UNLOCK(isp);
544 			ccb->ccb_h.status = CAM_REQ_CMP;
545 		}
546 		xpt_print_path(ccb->ccb_h.path);
547 		printf(lfmt, (cel->enable) ? "en" : "dis");
548 		if (frozen)
549 			xpt_release_simq(isp->isp_sim, 1);
550 		return;
551 	}
552 
553 	/*
554 	 * We can move along now...
555 	 */
556 
557 	if (frozen)
558 		xpt_release_simq(isp->isp_sim, 1);
559 
560 
561 	if (cel->enable) {
562 		ccb->ccb_h.status =
563 		    create_lun_state(isp, ccb->ccb_h.path, &tptr);
564 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
565 			return;
566 		}
567 	} else {
568 		tptr = get_lun_statep(isp, lun);
569 		if (tptr == NULL) {
570 			ccb->ccb_h.status = CAM_LUN_INVALID;
571 			return;
572 		}
573 	}
574 
575 	if (isp_psema_sig_rqe(isp)) {
576 		rls_lun_statep(isp, tptr);
577 		if (cel->enable)
578 			destroy_lun_state(isp, tptr);
579 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
580 		return;
581 	}
582 
583 	ISP_LOCK(isp);
584 	if (cel->enable) {
585 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
586 		rstat = LUN_ERR;
587 		if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
588 			xpt_print_path(ccb->ccb_h.path);
589 			printf("isp_lun_cmd failed\n");
590 			goto out;
591 		}
592 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
593 			xpt_print_path(ccb->ccb_h.path);
594 			printf("wait for ENABLE LUN timed out\n");
595 			goto out;
596 		}
597 		rstat = isp->isp_osinfo.rstatus;
598 		if (rstat != LUN_OK) {
599 			xpt_print_path(ccb->ccb_h.path);
600 			printf("ENABLE LUN returned 0x%x\n", rstat);
601 			goto out;
602 		}
603 	} else {
604 		u_int32_t seq;
605 
606 		seq = isp->isp_osinfo.rollinfo++;
607 		rstat = LUN_ERR;
608 
609 		if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) {
610 			xpt_print_path(ccb->ccb_h.path);
611 			printf("isp_lun_cmd failed\n");
612 			goto out;
613 		}
614 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
615 			xpt_print_path(ccb->ccb_h.path);
616 			printf("wait for MODIFY LUN timed out\n");
617 			goto out;
618 		}
619 		rstat = isp->isp_osinfo.rstatus;
620 		if (rstat != LUN_OK) {
621 			xpt_print_path(ccb->ccb_h.path);
622 			printf("MODIFY LUN returned 0x%x\n", rstat);
623 			goto out;
624 		}
625 		rstat = LUN_ERR;
626 		seq = isp->isp_osinfo.rollinfo++;
627 
628 		if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
629 			xpt_print_path(ccb->ccb_h.path);
630 			printf("isp_lun_cmd failed\n");
631 			goto out;
632 		}
633 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
634 			xpt_print_path(ccb->ccb_h.path);
635 			printf("wait for ENABLE LUN timed out\n");
636 			goto out;
637 		}
638 		rstat = isp->isp_osinfo.rstatus;
639 		if (rstat != LUN_OK) {
640 			xpt_print_path(ccb->ccb_h.path);
641 			printf("ENABLE LUN returned 0x%x\n", rstat);
642 			goto out;
643 		}
644 	}
645 out:
646 	isp_vsema_rqe(isp);
647 	ISP_UNLOCK(isp);
648 
649 	if (rstat != LUN_OK) {
650 		xpt_print_path(ccb->ccb_h.path);
651 		printf("lun %sable failed\n", (cel->enable) ? "en" : "dis");
652 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
653 		rls_lun_statep(isp, tptr);
654 		if (cel->enable)
655 			destroy_lun_state(isp, tptr);
656 	} else {
657 		xpt_print_path(ccb->ccb_h.path);
658 		printf(lfmt, (cel->enable) ? "en" : "dis");
659 		rls_lun_statep(isp, tptr);
660 		if (cel->enable == 0) {
661 			destroy_lun_state(isp, tptr);
662 		}
663 		ccb->ccb_h.status = CAM_REQ_CMP;
664 	}
665 }
666 
667 static cam_status
668 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
669 {
670 	tstate_t *tptr;
671 	struct ccb_hdr_slist *lp;
672 	struct ccb_hdr *curelm;
673 	int found;
674 	union ccb *accb = ccb->cab.abort_ccb;
675 
676 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
677 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
678 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
679 			return (CAM_PATH_INVALID);
680 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
681 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
682 			return (CAM_PATH_INVALID);
683 		}
684 	}
685 	tptr = get_lun_statep(isp, accb->ccb_h.target_lun);
686 	if (tptr == NULL) {
687 		return (CAM_PATH_INVALID);
688 	}
689 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
690 		lp = &tptr->atios;
691 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
692 		lp = &tptr->inots;
693 	} else {
694 		rls_lun_statep(isp, tptr);
695 		return (CAM_UA_ABORT);
696 	}
697 	curelm = SLIST_FIRST(lp);
698 	found = 0;
699 	if (curelm == &accb->ccb_h) {
700 		found = 1;
701 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
702 	} else {
703 		while(curelm != NULL) {
704 			struct ccb_hdr *nextelm;
705 
706 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
707 			if (nextelm == &accb->ccb_h) {
708 				found = 1;
709 				SLIST_NEXT(curelm, sim_links.sle) =
710 				    SLIST_NEXT(nextelm, sim_links.sle);
711 				break;
712 			}
713 			curelm = nextelm;
714 		}
715 	}
716 	rls_lun_statep(isp, tptr);
717 	if (found) {
718 		accb->ccb_h.status = CAM_REQ_ABORTED;
719 		return (CAM_REQ_CMP);
720 	}
721 	return(CAM_PATH_INVALID);
722 }
723 
724 static cam_status
725 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
726 {
727 	void *qe;
728 	struct ccb_scsiio *cso = &ccb->csio;
729 	u_int32_t *hp, save_handle;
730 	u_int16_t iptr, optr;
731 
732 
733 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
734 		xpt_print_path(ccb->ccb_h.path);
735 		printf("Request Queue Overflow in isp_target_start_ctio\n");
736 		return (CAM_RESRC_UNAVAIL);
737 	}
738 	bzero(qe, QENTRY_LEN);
739 
740 	/*
741 	 * We're either moving data or completing a command here.
742 	 */
743 
744 	if (IS_FC(isp)) {
745 		struct ccb_accept_tio *atiop;
746 		ct2_entry_t *cto = qe;
747 
748 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
749 		cto->ct_header.rqs_entry_count = 1;
750 		cto->ct_iid = cso->init_id;
751 		if (isp->isp_maxluns <= 16) {
752 			cto->ct_lun = ccb->ccb_h.target_lun;
753 		}
754 		/*
755 		 * Start with a residual based on what the original datalength
756 		 * was supposed to be. Basically, we ignore what CAM has set
757 		 * for residuals. The data transfer routines will knock off
758 		 * the residual for each byte actually moved- and also will
759 		 * be responsible for setting the underrun flag.
760 		 */
761 		/* HACK! HACK! */
762 		if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) {
763 			cto->ct_resid = atiop->ccb_h.spriv_field0;
764 		}
765 
766 		/*
767 		 * We always have to use the tag_id- it has the RX_ID
768 		 * for this exchage.
769 		 */
770 		cto->ct_rxid = cso->tag_id;
771 		if (cso->dxfer_len == 0) {
772 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
773 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
774 				cto->ct_flags |= CT2_SENDSTATUS;
775 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
776 			}
777 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
778 				int m = min(cso->sense_len, MAXRESPLEN);
779 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
780 				cto->rsp.m1.ct_senselen = m;
781 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
782 			}
783 		} else {
784 			cto->ct_flags |= CT2_FLAG_MODE0;
785 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
786 				cto->ct_flags |= CT2_DATA_IN;
787 			} else {
788 				cto->ct_flags |= CT2_DATA_OUT;
789 			}
790 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
791 				cto->ct_flags |= CT2_SENDSTATUS;
792 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
793 			}
794 			/*
795 			 * If we're sending data and status back together,
796 			 * we can't also send back sense data as well.
797 			 */
798 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
799 		}
800 		if (cto->ct_flags & CAM_SEND_STATUS) {
801 			isp_prt(isp, ISP_LOGTDEBUG2,
802 			    "CTIO2 RX_ID 0x%x SCSI STATUS 0x%x datalength %u",
803 			    cto->ct_rxid, cso->scsi_status, cto->ct_resid);
804 		}
805 		hp = &cto->ct_reserved;
806 	} else {
807 		ct_entry_t *cto = qe;
808 
809 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
810 		cto->ct_header.rqs_entry_count = 1;
811 		cto->ct_iid = cso->init_id;
812 		cto->ct_tgt = ccb->ccb_h.target_id;
813 		cto->ct_lun = ccb->ccb_h.target_lun;
814 		if (cso->tag_id && cso->tag_action) {
815 			/*
816 			 * We don't specify a tag type for regular SCSI.
817 			 * Just the tag value and set the flag.
818 			 */
819 			cto->ct_tag_val = cso->tag_id;
820 			cto->ct_flags |= CT_TQAE;
821 		}
822 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
823 			cto->ct_flags |= CT_NODISC;
824 		}
825 		if (cso->dxfer_len == 0) {
826 			cto->ct_flags |= CT_NO_DATA;
827 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
828 			cto->ct_flags |= CT_DATA_IN;
829 		} else {
830 			cto->ct_flags |= CT_DATA_OUT;
831 		}
832 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
833 			cto->ct_flags |= CT_SENDSTATUS;
834 			cto->ct_scsi_status = cso->scsi_status;
835 			cto->ct_resid = cso->resid;
836 		}
837 		if (cto->ct_flags & CAM_SEND_STATUS) {
838 			isp_prt(isp, ISP_LOGTDEBUG2,
839 			    "CTIO SCSI STATUS 0x%x resid %d",
840 			    cso->scsi_status, cso->resid);
841 		}
842 		hp = &cto->ct_reserved;
843 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
844 	}
845 
846 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
847 		xpt_print_path(ccb->ccb_h.path);
848 		printf("No XFLIST pointers for isp_target_start_ctio\n");
849 		return (CAM_RESRC_UNAVAIL);
850 	}
851 
852 
853 	/*
854 	 * Call the dma setup routines for this entry (and any subsequent
855 	 * CTIOs) if there's data to move, and then tell the f/w it's got
856 	 * new things to play with. As with isp_start's usage of DMA setup,
857 	 * any swizzling is done in the machine dependent layer. Because
858 	 * of this, we put the request onto the queue area first in native
859 	 * format.
860 	 */
861 
862 	save_handle = *hp;
863 	switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) {
864 	case CMD_QUEUED:
865 		ISP_ADD_REQUEST(isp, iptr);
866 		return (CAM_REQ_INPROG);
867 
868 	case CMD_EAGAIN:
869 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
870 		isp_destroy_handle(isp, save_handle);
871 		return (CAM_RESRC_UNAVAIL);
872 
873 	default:
874 		isp_destroy_handle(isp, save_handle);
875 		return (XS_ERR(ccb));
876 	}
877 }
878 
879 static cam_status
880 isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb)
881 {
882 	void *qe;
883 	struct ccb_accept_tio *atiop;
884 	u_int16_t iptr, optr;
885 
886 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
887 		xpt_print_path(ccb->ccb_h.path);
888 		printf("Request Queue Overflow in isp_target_putback_atio\n");
889 		return (CAM_RESRC_UNAVAIL);
890 	}
891 	bzero(qe, QENTRY_LEN);
892 	atiop = (struct ccb_accept_tio *) ccb;
893 	if (IS_FC(isp)) {
894 		at2_entry_t *at = qe;
895 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
896 		at->at_header.rqs_entry_count = 1;
897 		if (isp->isp_maxluns > 16) {
898 			at->at_scclun = (uint16_t) atiop->ccb_h.target_lun;
899 		} else {
900 			at->at_lun = (uint8_t) atiop->ccb_h.target_lun;
901 		}
902 		at->at_status = CT_OK;
903 		at->at_rxid = atiop->tag_id;
904 		ISP_SWIZ_ATIO2(isp, qe, qe);
905 	} else {
906 		at_entry_t *at = qe;
907 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
908 		at->at_header.rqs_entry_count = 1;
909 		at->at_iid = atiop->init_id;
910 		at->at_tgt = atiop->ccb_h.target_id;
911 		at->at_lun = atiop->ccb_h.target_lun;
912 		at->at_status = CT_OK;
913 		if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) {
914 			at->at_tag_type = atiop->tag_action;
915 		}
916 		at->at_tag_val = atiop->tag_id;
917 		ISP_SWIZ_ATIO(isp, qe, qe);
918 	}
919 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
920 	ISP_ADD_REQUEST(isp, iptr);
921 	return (CAM_REQ_CMP);
922 }
923 
924 static void
925 isp_refire_putback_atio(void *arg)
926 {
927 	union ccb *ccb = arg;
928 	int s = splcam();
929 	if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) {
930 		(void) timeout(isp_refire_putback_atio, ccb, 10);
931 	} else {
932 		isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb);
933 	}
934 	splx(s);
935 }
936 
937 /*
938  * Handle ATIO stuff that the generic code can't.
939  * This means handling CDBs.
940  */
941 
942 static int
943 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
944 {
945 	tstate_t *tptr;
946 	int status;
947 	struct ccb_accept_tio *atiop;
948 
949 	/*
950 	 * The firmware status (except for the QLTM_SVALID bit)
951 	 * indicates why this ATIO was sent to us.
952 	 *
953 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
954 	 *
955 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
956 	 * we're still connected on the SCSI bus - i.e. the initiator
957 	 * did not set DiscPriv in the identify message. We don't care
958 	 * about this so it's ignored.
959 	 */
960 	status = aep->at_status;
961 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
962 		/*
963 		 * Bus Phase Sequence error. We should have sense data
964 		 * suggested by the f/w. I'm not sure quite yet what
965 		 * to do about this for CAM.
966 		 */
967 		printf("%s: PHASE ERROR\n", isp->isp_name);
968 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
969 		return (0);
970 	}
971 	if ((status & ~QLTM_SVALID) != AT_CDB) {
972 		printf("%s: bogus atio (0x%x) leaked to platform\n",
973 		    isp->isp_name, status);
974 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
975 		return (0);
976 	}
977 
978 	tptr = get_lun_statep(isp, aep->at_lun);
979 	if (tptr == NULL) {
980 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
981 	}
982 
983 	if (tptr == NULL) {
984 		/*
985 		 * Because we can't autofeed sense data back with
986 		 * a command for parallel SCSI, we can't give back
987 		 * a CHECK CONDITION. We'll give back a BUSY status
988 		 * instead. This works out okay because the only
989 		 * time we should, in fact, get this, is in the
990 		 * case that somebody configured us without the
991 		 * blackhole driver, so they get what they deserve.
992 		 */
993 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
994 		return (0);
995 	}
996 
997 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
998 	if (atiop == NULL) {
999 		/*
1000 		 * Because we can't autofeed sense data back with
1001 		 * a command for parallel SCSI, we can't give back
1002 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1003 		 * instead. This works out okay because the only time we
1004 		 * should, in fact, get this, is in the case that we've
1005 		 * run out of ATIOS.
1006 		 */
1007 		xpt_print_path(tptr->owner);
1008 		printf("no ATIOS for lun %d from initiator %d\n",
1009 		    aep->at_lun, aep->at_iid);
1010 		rls_lun_statep(isp, tptr);
1011 		if (aep->at_flags & AT_TQAE)
1012 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1013 		else
1014 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1015 		return (0);
1016 	}
1017 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1018 	if (tptr == &isp->isp_osinfo.tsdflt) {
1019 		atiop->ccb_h.target_id = aep->at_tgt;
1020 		atiop->ccb_h.target_lun = aep->at_lun;
1021 	}
1022 	if (aep->at_flags & AT_NODISC) {
1023 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1024 	} else {
1025 		atiop->ccb_h.flags = 0;
1026 	}
1027 
1028 	if (status & QLTM_SVALID) {
1029 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1030 		atiop->sense_len = amt;
1031 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1032 	} else {
1033 		atiop->sense_len = 0;
1034 	}
1035 
1036 	atiop->init_id = aep->at_iid;
1037 	atiop->cdb_len = aep->at_cdblen;
1038 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1039 	atiop->ccb_h.status = CAM_CDB_RECVD;
1040 	atiop->tag_id = aep->at_tag_val;
1041 	if ((atiop->tag_action = aep->at_tag_type) != 0) {
1042 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1043 	}
1044 	xpt_done((union ccb*)atiop);
1045 	isp_prt(isp, ISP_LOGTDEBUG2,
1046 	    "ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s",
1047 	    aep->at_cdb[0] & 0xff, aep->at_iid, aep->at_lun,
1048 	    aep->at_tag_val & 0xff, aep->at_tag_type,
1049 	    (aep->at_flags & AT_NODISC)? "nondisc" : "disconnecting");
1050 	rls_lun_statep(isp, tptr);
1051 	return (0);
1052 }
1053 
1054 static int
1055 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1056 {
1057 	lun_id_t lun;
1058 	tstate_t *tptr;
1059 	struct ccb_accept_tio *atiop;
1060 
1061 	/*
1062 	 * The firmware status (except for the QLTM_SVALID bit)
1063 	 * indicates why this ATIO was sent to us.
1064 	 *
1065 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1066 	 */
1067 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1068 		printf("%s: bogus atio (0x%x) leaked to platform\n",
1069 		    isp->isp_name, aep->at_status);
1070 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1071 		return (0);
1072 	}
1073 
1074 	if (isp->isp_maxluns > 16) {
1075 		lun = aep->at_scclun;
1076 	} else {
1077 		lun = aep->at_lun;
1078 	}
1079 	tptr = get_lun_statep(isp, lun);
1080 	if (tptr == NULL) {
1081 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
1082 	}
1083 
1084 	if (tptr == NULL) {
1085 		/*
1086 		 * What we'd like to know is whether or not we have a listener
1087 		 * upstream that really hasn't configured yet. If we do, then
1088 		 * we can give a more sensible reply here. If not, then we can
1089 		 * reject this out of hand.
1090 		 *
1091 		 * Choices for what to send were
1092 		 *
1093                  *	Not Ready, Unit Not Self-Configured Yet
1094 		 *	(0x2,0x3e,0x00)
1095 		 *
1096 		 * for the former and
1097 		 *
1098 		 *	Illegal Request, Logical Unit Not Supported
1099 		 *	(0x5,0x25,0x00)
1100 		 *
1101 		 * for the latter.
1102 		 *
1103 		 * We used to decide whether there was at least one listener
1104 		 * based upon whether the black hole driver was configured.
1105 		 * However, recent config(8) changes have made this hard to do
1106 		 * at this time.
1107 		 *
1108 		 */
1109 		u_int32_t ccode = SCSI_STATUS_BUSY;
1110 
1111 		/*
1112 		 * Because we can't autofeed sense data back with
1113 		 * a command for parallel SCSI, we can't give back
1114 		 * a CHECK CONDITION. We'll give back a BUSY status
1115 		 * instead. This works out okay because the only
1116 		 * time we should, in fact, get this, is in the
1117 		 * case that somebody configured us without the
1118 		 * blackhole driver, so they get what they deserve.
1119 		 */
1120 		isp_endcmd(isp, aep, ccode, 0);
1121 		return (0);
1122 	}
1123 
1124 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1125 	if (atiop == NULL) {
1126 		/*
1127 		 * Because we can't autofeed sense data back with
1128 		 * a command for parallel SCSI, we can't give back
1129 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1130 		 * instead. This works out okay because the only time we
1131 		 * should, in fact, get this, is in the case that we've
1132 		 * run out of ATIOS.
1133 		 */
1134 		xpt_print_path(tptr->owner);
1135 		printf("no ATIOS for lun %d from initiator %d\n",
1136 		    lun, aep->at_iid);
1137 		rls_lun_statep(isp, tptr);
1138 		if (aep->at_flags & AT_TQAE)
1139 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1140 		else
1141 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1142 		return (0);
1143 	}
1144 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1145 
1146 	if (tptr == &isp->isp_osinfo.tsdflt) {
1147 		atiop->ccb_h.target_id =
1148 			((fcparam *)isp->isp_param)->isp_loopid;
1149 		atiop->ccb_h.target_lun = lun;
1150 	}
1151 	if (aep->at_status & QLTM_SVALID) {
1152 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1153 		atiop->sense_len = amt;
1154 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1155 	} else {
1156 		atiop->sense_len = 0;
1157 	}
1158 
1159 	atiop->init_id = aep->at_iid;
1160 	atiop->cdb_len = ATIO2_CDBLEN;
1161 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1162 	atiop->ccb_h.status = CAM_CDB_RECVD;
1163 	atiop->tag_id = aep->at_rxid;
1164 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1165 	case ATIO2_TC_ATTR_SIMPLEQ:
1166 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1167 		break;
1168         case ATIO2_TC_ATTR_HEADOFQ:
1169 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1170 		break;
1171         case ATIO2_TC_ATTR_ORDERED:
1172 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1173 		break;
1174         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1175 	case ATIO2_TC_ATTR_UNTAGGED:
1176 	default:
1177 		atiop->tag_action = 0;
1178 		break;
1179 	}
1180 	if (atiop->tag_action != 0) {
1181 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1182 	}
1183 
1184 	/*
1185 	 * Preserve overall command datalength in private field.
1186 	 */
1187 	atiop->ccb_h.spriv_field0 = aep->at_datalen;
1188 
1189 	xpt_done((union ccb*)atiop);
1190 	isp_prt(isp, ISP_LOGTDEBUG2,
1191 	    "ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1192 	    aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff, aep->at_iid,
1193 	    lun, aep->at_taskflags, aep->at_datalen);
1194 	rls_lun_statep(isp, tptr);
1195 	return (0);
1196 }
1197 
1198 static int
1199 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1200 {
1201 	union ccb *ccb;
1202 	int sentstatus, ok, notify_cam;
1203 
1204 	/*
1205 	 * CTIO and CTIO2 are close enough....
1206 	 */
1207 
1208 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_reserved);
1209 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1210 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_reserved);
1211 
1212 	if (IS_FC(isp)) {
1213 		ct2_entry_t *ct = arg;
1214 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1215 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1216 		if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1217 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1218 		}
1219 		isp_prt(isp, ISP_LOGTDEBUG2,
1220 		    "CTIO2 RX_ID 0x%x sts 0x%x flg 0x%x sns %d FIN",
1221 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1222 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0);
1223 		notify_cam = ct->ct_header.rqs_seqno;
1224 	} else {
1225 		ct_entry_t *ct = arg;
1226 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1227 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1228 		isp_prt(isp, ISP_LOGTDEBUG2,
1229 		    "CTIO tag 0x%x sts 0x%x flg 0x%x FIN",
1230 		    ct->ct_tag_val, ct->ct_status, ct->ct_flags);
1231 		notify_cam = ct->ct_header.rqs_seqno;
1232 	}
1233 
1234 	/*
1235 	 * We're here either because data transfers are done (and
1236 	 * it's time to send a final status CTIO) or because the final
1237 	 * status CTIO is done. We don't get called for all intermediate
1238 	 * CTIOs that happen for a large data transfer.
1239 	 *
1240 	 * In any case, for this platform, the upper layers figure out
1241 	 * what to do next, so all we do here is collect status and
1242 	 * pass information along. The exception is that we clear
1243 	 * the notion of handling a non-disconnecting command here.
1244 	 */
1245 
1246 	if (sentstatus) {
1247 		/*
1248 		 * Data transfer done. See if all went okay.
1249 		 */
1250 		if (ok) {
1251 			ccb->csio.resid = 0;
1252 		} else {
1253 			ccb->csio.resid = ccb->csio.dxfer_len;
1254 		}
1255 	}
1256 
1257 	if (notify_cam == 0) {
1258 		isp_prt(isp, ISP_LOGTDEBUG1, "Intermediate CTIO done");
1259 		return (0);
1260 	}
1261 	isp_prt(isp, ISP_LOGTDEBUG1, "Final CTIO done");
1262 	if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) {
1263 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1264 	} else {
1265 		isp_handle_platform_ctio_part2(isp, ccb);
1266 	}
1267 	return (0);
1268 }
1269 
1270 static void
1271 isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb)
1272 {
1273 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1274 		ccb->ccb_h.status |= CAM_REQ_CMP;
1275 	}
1276 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1277 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1278 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1279 		if (isp->isp_osinfo.simqfrozen == 0) {
1280 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1281 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1282 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1283 			} else {
1284 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->devqfrozen");
1285 			}
1286 		} else {
1287 			isp_prt(isp, ISP_LOGDEBUG2,
1288 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1289 		}
1290 	}
1291 	xpt_done(ccb);
1292 }
1293 #endif
1294 
1295 static void
1296 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1297 {
1298 	struct cam_sim *sim;
1299 	struct ispsoftc *isp;
1300 
1301 	sim = (struct cam_sim *)cbarg;
1302 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1303 	switch (code) {
1304 	case AC_LOST_DEVICE:
1305 		if (IS_SCSI(isp)) {
1306 			u_int16_t oflags, nflags;
1307 			sdparam *sdp = isp->isp_param;
1308 			int rvf, tgt;
1309 
1310 			tgt = xpt_path_target_id(path);
1311 			rvf = ISP_FW_REVX(isp->isp_fwrev);
1312 			ISP_LOCK(isp);
1313 			sdp += cam_sim_bus(sim);
1314 			isp->isp_update |= (1 << cam_sim_bus(sim));
1315 			nflags = DPARM_SAFE_DFLT;
1316 			if (rvf >= ISP_FW_REV(7, 55, 0) ||
1317 			   (ISP_FW_REV(4, 55, 0) <= rvf &&
1318 			   (rvf < ISP_FW_REV(5, 0, 0)))) {
1319 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1320 			}
1321 			oflags = sdp->isp_devparam[tgt].dev_flags;
1322 			sdp->isp_devparam[tgt].dev_flags = nflags;
1323 			sdp->isp_devparam[tgt].dev_update = 1;
1324 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1325 			sdp->isp_devparam[tgt].dev_flags = oflags;
1326 			ISP_UNLOCK(isp);
1327 		}
1328 		break;
1329 	default:
1330 		printf("%s: isp_attach Async Code 0x%x\n", isp->isp_name, code);
1331 		break;
1332 	}
1333 }
1334 
1335 static void
1336 isp_poll(struct cam_sim *sim)
1337 {
1338 	isp_intr((struct ispsoftc *) cam_sim_softc(sim));
1339 }
1340 
1341 static void
1342 isp_relsim(void *arg)
1343 {
1344 	struct ispsoftc *isp = arg;
1345 	ISP_LOCK(isp);
1346 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1347 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1348 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1349 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1350 			xpt_release_simq(isp->isp_sim, 1);
1351 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1352 		}
1353 	}
1354 	ISP_UNLOCK(isp);
1355 }
1356 
1357 static void
1358 isp_watchdog(void *arg)
1359 {
1360 	XS_T *xs = arg;
1361 	struct ispsoftc *isp = XS_ISP(xs);
1362 	u_int32_t handle;
1363 
1364 	/*
1365 	 * We've decided this command is dead. Make sure we're not trying
1366 	 * to kill a command that's already dead by getting it's handle and
1367 	 * and seeing whether it's still alive.
1368 	 */
1369 	ISP_LOCK(isp);
1370 	handle = isp_find_handle(isp, xs);
1371 	if (handle) {
1372 		u_int16_t r;
1373 
1374 		if (XS_CMD_DONE_P(xs)) {
1375 			isp_prt(isp, ISP_LOGDEBUG1,
1376 			    "watchdog found done cmd (handle 0x%x)", handle);
1377 			ISP_UNLOCK(isp);
1378 			return;
1379 		}
1380 
1381 		if (XS_CMD_WDOG_P(xs)) {
1382 			isp_prt(isp, ISP_LOGDEBUG2,
1383 			    "recursive watchdog (handle 0x%x)", handle);
1384 			ISP_UNLOCK(isp);
1385 			return;
1386 		}
1387 
1388 		XS_CMD_S_WDOG(xs);
1389 
1390 		r = ISP_READ(isp, BIU_ISR);
1391 
1392 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
1393 			isp_prt(isp, ISP_LOGDEBUG2,
1394 			    "watchdog cleanup (%x, %x)", handle, r);
1395 			xpt_done((union ccb *) xs);
1396 		} else if (XS_CMD_GRACE_P(xs)) {
1397 			/*
1398 			 * Make sure the command is *really* dead before we
1399 			 * release the handle (and DMA resources) for reuse.
1400 			 */
1401 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1402 
1403 			/*
1404 			 * After this point, the comamnd is really dead.
1405 			 */
1406 			if (XS_XFRLEN(xs)) {
1407 				ISP_DMAFREE(isp, xs, handle);
1408                 	}
1409 			isp_destroy_handle(isp, handle);
1410 			xpt_print_path(xs->ccb_h.path);
1411 			printf("%s: watchdog timeout (%x, %x)\n",
1412 			    isp->isp_name, handle, r);
1413 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1414 			XS_CMD_C_WDOG(xs);
1415 			isp_done(xs);
1416 		} else {
1417 			u_int16_t iptr, optr;
1418 			ispreq_t *mp;
1419 
1420 			XS_CMD_C_WDOG(xs);
1421 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1422 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
1423 				ISP_UNLOCK(isp);
1424 				return;
1425 			}
1426 			XS_CMD_S_GRACE(xs);
1427 			MEMZERO((void *) mp, sizeof (*mp));
1428 			mp->req_header.rqs_entry_count = 1;
1429 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1430 			mp->req_modifier = SYNC_ALL;
1431 			mp->req_target = XS_CHANNEL(xs) << 7;
1432 			ISP_SWIZZLE_REQUEST(isp, mp);
1433 			ISP_ADD_REQUEST(isp, iptr);
1434 		}
1435 	} else {
1436 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1437 	}
1438 	ISP_UNLOCK(isp);
1439 }
1440 
1441 static void
1442 isp_action(struct cam_sim *sim, union ccb *ccb)
1443 {
1444 	int bus, tgt, error;
1445 	struct ispsoftc *isp;
1446 	struct ccb_trans_settings *cts;
1447 
1448 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1449 
1450 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1451 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1452 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1453 	if (isp->isp_state != ISP_RUNSTATE &&
1454 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1455 		ISP_LOCK(isp);
1456 		isp_init(isp);
1457 		if (isp->isp_state != ISP_INITSTATE) {
1458 			ISP_UNLOCK(isp);
1459 			/*
1460 			 * Lie. Say it was a selection timeout.
1461 			 */
1462 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1463 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1464 			xpt_done(ccb);
1465 			return;
1466 		}
1467 		isp->isp_state = ISP_RUNSTATE;
1468 		ISP_UNLOCK(isp);
1469 	}
1470 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1471 
1472 	switch (ccb->ccb_h.func_code) {
1473 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1474 		/*
1475 		 * Do a couple of preliminary checks...
1476 		 */
1477 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1478 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1479 				ccb->ccb_h.status = CAM_REQ_INVALID;
1480 				xpt_done(ccb);
1481 				break;
1482 			}
1483 		}
1484 #ifdef	DIAGNOSTIC
1485 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1486 			ccb->ccb_h.status = CAM_PATH_INVALID;
1487 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1488 			ccb->ccb_h.status = CAM_PATH_INVALID;
1489 		}
1490 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1491 			printf("%s: invalid tgt/lun (%d.%d) in XPT_SCSI_IO\n",
1492 			    isp->isp_name, ccb->ccb_h.target_id,
1493 			    ccb->ccb_h.target_lun);
1494 			xpt_done(ccb);
1495 			break;
1496 		}
1497 #endif
1498 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1499 		ISP_LOCK(isp);
1500 		error = isp_start((XS_T *) ccb);
1501 		ISP_UNLOCK(isp);
1502 		switch (error) {
1503 		case CMD_QUEUED:
1504 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1505 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1506 				int ticks;
1507 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1508 					ticks = 60 * 1000 * hz;
1509 				else
1510 					ticks = ccb->ccb_h.timeout * hz;
1511 				ticks = ((ticks + 999) / 1000) + hz + hz;
1512 				ccb->ccb_h.timeout_ch =
1513 				    timeout(isp_watchdog, (caddr_t)ccb, ticks);
1514 			} else {
1515 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1516 			}
1517 			break;
1518 		case CMD_RQLATER:
1519 			if (isp->isp_osinfo.simqfrozen == 0) {
1520 				isp_prt(isp, ISP_LOGDEBUG2,
1521 				    "RQLATER freeze simq");
1522 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED;
1523 				timeout(isp_relsim, isp, 500);
1524 				xpt_freeze_simq(sim, 1);
1525 			}
1526 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1527 			xpt_done(ccb);
1528 			break;
1529 		case CMD_EAGAIN:
1530 			if (isp->isp_osinfo.simqfrozen == 0) {
1531 				xpt_freeze_simq(sim, 1);
1532 				isp_prt(isp, ISP_LOGDEBUG2,
1533 				    "EAGAIN freeze simq");
1534 			}
1535 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1536 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1537 			xpt_done(ccb);
1538 			break;
1539 		case CMD_COMPLETE:
1540 			isp_done((struct ccb_scsiio *) ccb);
1541 			break;
1542 		default:
1543 			printf("%s: What's this? 0x%x at %d in file %s\n",
1544 			    isp->isp_name, error, __LINE__, __FILE__);
1545 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1546 			xpt_done(ccb);
1547 		}
1548 		break;
1549 
1550 #ifdef	ISP_TARGET_MODE
1551 	case XPT_EN_LUN:		/* Enable LUN as a target */
1552 		isp_en_lun(isp, ccb);
1553 		xpt_done(ccb);
1554 		break;
1555 
1556 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1557 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1558 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1559 	{
1560 		tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun);
1561 		if (tptr == NULL) {
1562 			ccb->ccb_h.status = CAM_LUN_INVALID;
1563 			xpt_done(ccb);
1564 			break;
1565 		}
1566 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1567 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1568 		ISP_LOCK(isp);
1569 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1570 #if	0
1571 			(void) isp_target_putback_atio(isp, ccb);
1572 #endif
1573 			SLIST_INSERT_HEAD(&tptr->atios,
1574 			    &ccb->ccb_h, sim_links.sle);
1575 		} else {
1576 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1577 			    sim_links.sle);
1578 		}
1579 		ISP_UNLOCK(isp);
1580 		rls_lun_statep(isp, tptr);
1581 		ccb->ccb_h.status = CAM_REQ_INPROG;
1582 		break;
1583 	}
1584 	case XPT_CONT_TARGET_IO:
1585 	{
1586 		ISP_LOCK(isp);
1587 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1588 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1589 			if (isp->isp_osinfo.simqfrozen == 0) {
1590 				xpt_freeze_simq(sim, 1);
1591 				xpt_print_path(ccb->ccb_h.path);
1592 				printf("XPT_CONT_TARGET_IO freeze simq\n");
1593 			}
1594 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1595 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1596 			xpt_done(ccb);
1597 		} else {
1598 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1599 		}
1600 		ISP_UNLOCK(isp);
1601 		break;
1602 	}
1603 #endif
1604 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
1605 
1606 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1607 		tgt = ccb->ccb_h.target_id;
1608 		tgt |= (bus << 16);
1609 
1610 		ISP_LOCK(isp);
1611 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
1612 		ISP_UNLOCK(isp);
1613 		if (error) {
1614 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1615 		} else {
1616 			ccb->ccb_h.status = CAM_REQ_CMP;
1617 		}
1618 		xpt_done(ccb);
1619 		break;
1620 	case XPT_ABORT:			/* Abort the specified CCB */
1621 	{
1622 		union ccb *accb = ccb->cab.abort_ccb;
1623 		switch (accb->ccb_h.func_code) {
1624 #ifdef	ISP_TARGET_MODE
1625 		case XPT_ACCEPT_TARGET_IO:
1626 		case XPT_IMMED_NOTIFY:
1627         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
1628 			break;
1629 		case XPT_CONT_TARGET_IO:
1630 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
1631 			ccb->ccb_h.status = CAM_UA_ABORT;
1632 			break;
1633 #endif
1634 		case XPT_SCSI_IO:
1635 			ISP_LOCK(isp);
1636 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
1637 			ISP_UNLOCK(isp);
1638 			if (error) {
1639 				ccb->ccb_h.status = CAM_UA_ABORT;
1640 			} else {
1641 				ccb->ccb_h.status = CAM_REQ_CMP;
1642 			}
1643 			break;
1644 		default:
1645 			ccb->ccb_h.status = CAM_REQ_INVALID;
1646 			break;
1647 		}
1648 		xpt_done(ccb);
1649 		break;
1650 	}
1651 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1652 
1653 		cts = &ccb->cts;
1654 		tgt = cts->ccb_h.target_id;
1655 		ISP_LOCK(isp);
1656 		if (IS_SCSI(isp)) {
1657 			sdparam *sdp = isp->isp_param;
1658 			u_int16_t *dptr;
1659 
1660 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1661 
1662 			sdp += bus;
1663 #if	0
1664 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS)
1665 				dptr = &sdp->isp_devparam[tgt].cur_dflags;
1666 			else
1667 				dptr = &sdp->isp_devparam[tgt].dev_flags;
1668 #else
1669 			/*
1670 			 * We always update (internally) from dev_flags
1671 			 * so any request to change settings just gets
1672 			 * vectored to that location.
1673 			 */
1674 			dptr = &sdp->isp_devparam[tgt].dev_flags;
1675 #endif
1676 
1677 			/*
1678 			 * Note that these operations affect the
1679 			 * the goal flags (dev_flags)- not
1680 			 * the current state flags. Then we mark
1681 			 * things so that the next operation to
1682 			 * this HBA will cause the update to occur.
1683 			 */
1684 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1685 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
1686 					*dptr |= DPARM_DISC;
1687 				} else {
1688 					*dptr &= ~DPARM_DISC;
1689 				}
1690 			}
1691 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1692 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
1693 					*dptr |= DPARM_TQING;
1694 				} else {
1695 					*dptr &= ~DPARM_TQING;
1696 				}
1697 			}
1698 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1699 				switch (cts->bus_width) {
1700 				case MSG_EXT_WDTR_BUS_16_BIT:
1701 					*dptr |= DPARM_WIDE;
1702 					break;
1703 				default:
1704 					*dptr &= ~DPARM_WIDE;
1705 				}
1706 			}
1707 			/*
1708 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
1709 			 * of nonzero will cause us to go to the
1710 			 * selected (from NVRAM) maximum value for
1711 			 * this device. At a later point, we'll
1712 			 * allow finer control.
1713 			 */
1714 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1715 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
1716 			    (cts->sync_offset > 0)) {
1717 				*dptr |= DPARM_SYNC;
1718 			} else {
1719 				*dptr &= ~DPARM_SYNC;
1720 			}
1721 			*dptr |= DPARM_SAFE_DFLT;
1722 			if (bootverbose || isp->isp_dblev >= 3)
1723 				printf("%s: %d.%d set %s period 0x%x offset "
1724 				    "0x%x flags 0x%x\n", isp->isp_name, bus,
1725 				    tgt,
1726 				    (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1727 				    "current" : "user",
1728 				    sdp->isp_devparam[tgt].sync_period,
1729 				    sdp->isp_devparam[tgt].sync_offset,
1730 				    sdp->isp_devparam[tgt].dev_flags);
1731 			sdp->isp_devparam[tgt].dev_update = 1;
1732 			isp->isp_update |= (1 << bus);
1733 		}
1734 		ISP_UNLOCK(isp);
1735 		ccb->ccb_h.status = CAM_REQ_CMP;
1736 		xpt_done(ccb);
1737 		break;
1738 
1739 	case XPT_GET_TRAN_SETTINGS:
1740 
1741 		cts = &ccb->cts;
1742 		tgt = cts->ccb_h.target_id;
1743 		if (IS_FC(isp)) {
1744 			/*
1745 			 * a lot of normal SCSI things don't make sense.
1746 			 */
1747 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1748 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1749 			/*
1750 			 * How do you measure the width of a high
1751 			 * speed serial bus? Well, in bytes.
1752 			 *
1753 			 * Offset and period make no sense, though, so we set
1754 			 * (above) a 'base' transfer speed to be gigabit.
1755 			 */
1756 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1757 		} else {
1758 			sdparam *sdp = isp->isp_param;
1759 			u_int16_t dval, pval, oval;
1760 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1761 
1762 			sdp += bus;
1763 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) {
1764 				ISP_LOCK(isp);
1765 				sdp->isp_devparam[tgt].dev_refresh = 1;
1766 				isp->isp_update |= (1 << bus);
1767 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
1768 				    NULL);
1769 				ISP_UNLOCK(isp);
1770 				dval = sdp->isp_devparam[tgt].cur_dflags;
1771 				oval = sdp->isp_devparam[tgt].cur_offset;
1772 				pval = sdp->isp_devparam[tgt].cur_period;
1773 			} else {
1774 				dval = sdp->isp_devparam[tgt].dev_flags;
1775 				oval = sdp->isp_devparam[tgt].sync_offset;
1776 				pval = sdp->isp_devparam[tgt].sync_period;
1777 			}
1778 
1779 			ISP_LOCK(isp);
1780 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1781 
1782 			if (dval & DPARM_DISC) {
1783 				cts->flags |= CCB_TRANS_DISC_ENB;
1784 			}
1785 			if (dval & DPARM_TQING) {
1786 				cts->flags |= CCB_TRANS_TAG_ENB;
1787 			}
1788 			if (dval & DPARM_WIDE) {
1789 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1790 			} else {
1791 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1792 			}
1793 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1794 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1795 
1796 			if ((dval & DPARM_SYNC) && oval != 0) {
1797 				cts->sync_period = pval;
1798 				cts->sync_offset = oval;
1799 				cts->valid |=
1800 				    CCB_TRANS_SYNC_RATE_VALID |
1801 				    CCB_TRANS_SYNC_OFFSET_VALID;
1802 			}
1803 			ISP_UNLOCK(isp);
1804 			if (bootverbose || isp->isp_dblev >= 3)
1805 				printf("%s: %d.%d get %s period 0x%x offset "
1806 				    "0x%x flags 0x%x\n", isp->isp_name, bus,
1807 				    tgt,
1808 			    	    (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1809 				    "current" : "user", pval, oval, dval);
1810 		}
1811 		ccb->ccb_h.status = CAM_REQ_CMP;
1812 		xpt_done(ccb);
1813 		break;
1814 
1815 	case XPT_CALC_GEOMETRY:
1816 	{
1817 		struct ccb_calc_geometry *ccg;
1818 		u_int32_t secs_per_cylinder;
1819 		u_int32_t size_mb;
1820 
1821 		ccg = &ccb->ccg;
1822 		if (ccg->block_size == 0) {
1823 			printf("%s: %d.%d XPT_CALC_GEOMETRY block size 0?\n",
1824 				isp->isp_name, ccg->ccb_h.target_id,
1825 				ccg->ccb_h.target_lun);
1826 			ccb->ccb_h.status = CAM_REQ_INVALID;
1827 			xpt_done(ccb);
1828 			break;
1829 		}
1830 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
1831 		if (size_mb > 1024) {
1832 			ccg->heads = 255;
1833 			ccg->secs_per_track = 63;
1834 		} else {
1835 			ccg->heads = 64;
1836 			ccg->secs_per_track = 32;
1837 		}
1838 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1839 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1840 		ccb->ccb_h.status = CAM_REQ_CMP;
1841 		xpt_done(ccb);
1842 		break;
1843 	}
1844 	case XPT_RESET_BUS:		/* Reset the specified bus */
1845 		bus = cam_sim_bus(sim);
1846 		ISP_LOCK(isp);
1847 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
1848 		ISP_UNLOCK(isp);
1849 		if (error)
1850 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1851 		else {
1852 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
1853 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
1854 			else if (isp->isp_path != NULL)
1855 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
1856 			ccb->ccb_h.status = CAM_REQ_CMP;
1857 		}
1858 		xpt_done(ccb);
1859 		break;
1860 
1861 	case XPT_TERM_IO:		/* Terminate the I/O process */
1862 		ccb->ccb_h.status = CAM_REQ_INVALID;
1863 		xpt_done(ccb);
1864 		break;
1865 
1866 	case XPT_PATH_INQ:		/* Path routing inquiry */
1867 	{
1868 		struct ccb_pathinq *cpi = &ccb->cpi;
1869 
1870 		cpi->version_num = 1;
1871 #ifdef	ISP_TARGET_MODE
1872 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
1873 #else
1874 		cpi->target_sprt = 0;
1875 #endif
1876 		cpi->hba_eng_cnt = 0;
1877 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
1878 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
1879 		cpi->bus_id = cam_sim_bus(sim);
1880 		if (IS_FC(isp)) {
1881 			cpi->hba_misc = PIM_NOBUSRESET;
1882 			/*
1883 			 * Because our loop ID can shift from time to time,
1884 			 * make our initiator ID out of range of our bus.
1885 			 */
1886 			cpi->initiator_id = cpi->max_target + 1;
1887 
1888 			/*
1889 			 * Set base transfer capabilities for Fibre Channel.
1890 			 * Technically not correct because we don't know
1891 			 * what media we're running on top of- but we'll
1892 			 * look good if we always say 100MB/s.
1893 			 */
1894 			cpi->base_transfer_speed = 100000;
1895 			cpi->hba_inquiry = PI_TAG_ABLE;
1896 		} else {
1897 			sdparam *sdp = isp->isp_param;
1898 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
1899 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1900 			cpi->hba_misc = 0;
1901 			cpi->initiator_id = sdp->isp_initiator_id;
1902 			cpi->base_transfer_speed = 3300;
1903 		}
1904 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1905 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
1906 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1907 		cpi->unit_number = cam_sim_unit(sim);
1908 		cpi->ccb_h.status = CAM_REQ_CMP;
1909 		xpt_done(ccb);
1910 		break;
1911 	}
1912 	default:
1913 		ccb->ccb_h.status = CAM_REQ_INVALID;
1914 		xpt_done(ccb);
1915 		break;
1916 	}
1917 }
1918 
1919 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
1920 void
1921 isp_done(struct ccb_scsiio *sccb)
1922 {
1923 	struct ispsoftc *isp = XS_ISP(sccb);
1924 
1925 	if (XS_NOERR(sccb))
1926 		XS_SETERR(sccb, CAM_REQ_CMP);
1927 
1928 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
1929 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
1930 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
1931 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
1932 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
1933 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1934 		} else {
1935 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1936 		}
1937 	}
1938 
1939 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1940 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1941 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1942 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
1943 			xpt_freeze_devq(sccb->ccb_h.path, 1);
1944 			if (sccb->scsi_status != SCSI_STATUS_OK)
1945 				isp_prt(isp, ISP_LOGDEBUG2,
1946 				    "freeze devq %d.%d %x %x",
1947 				    sccb->ccb_h.target_id,
1948 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
1949 				    sccb->scsi_status);
1950 		}
1951 	}
1952 
1953 	/*
1954 	 * If we were frozen waiting resources, clear that we were frozen
1955 	 * waiting for resources. If we are no longer frozen, and the devq
1956 	 * isn't frozen, mark the completing CCB to have the XPT layer
1957 	 * release the simq.
1958 	 */
1959 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1960 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1961 		if (isp->isp_osinfo.simqfrozen == 0) {
1962 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1963 				isp_prt(isp, ISP_LOGDEBUG2,
1964 				    "isp_done->relsimq");
1965 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1966 			} else {
1967 				isp_prt(isp, ISP_LOGDEBUG2,
1968 				    "isp_done->devq frozen");
1969 			}
1970 		} else {
1971 			isp_prt(isp, ISP_LOGDEBUG2,
1972 			    "isp_done -> simqfrozen = %x",
1973 			    isp->isp_osinfo.simqfrozen);
1974 		}
1975 	}
1976 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
1977 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1978 		xpt_print_path(sccb->ccb_h.path);
1979 		printf("cam completion status 0x%x\n", sccb->ccb_h.status);
1980 	}
1981 
1982 	XS_CMD_S_DONE(sccb);
1983 	if (XS_CMD_WDOG_P(sccb) == 0) {
1984 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
1985 		if (XS_CMD_GRACE_P(sccb)) {
1986 			isp_prt(isp, ISP_LOGDEBUG2,
1987 			    "finished command on borrowed time");
1988 		}
1989 		XS_CMD_S_CLEAR(sccb);
1990 		xpt_done((union ccb *) sccb);
1991 	}
1992 }
1993 
1994 int
1995 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
1996 {
1997 	int bus, rv = 0;
1998 	switch (cmd) {
1999 	case ISPASYNC_NEW_TGT_PARAMS:
2000 	{
2001 		int flags, tgt;
2002 		sdparam *sdp = isp->isp_param;
2003 		struct ccb_trans_settings neg;
2004 		struct cam_path *tmppath;
2005 
2006 		tgt = *((int *)arg);
2007 		bus = (tgt >> 16) & 0xffff;
2008 		tgt &= 0xffff;
2009 		sdp += bus;
2010 		if (xpt_create_path(&tmppath, NULL,
2011 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2012 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2013 			xpt_print_path(isp->isp_path);
2014 			printf("isp_async cannot make temp path for "
2015 			    "target %d bus %d\n", tgt, bus);
2016 			rv = -1;
2017 			break;
2018 		}
2019 		flags = sdp->isp_devparam[tgt].cur_dflags;
2020 		neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2021 		if (flags & DPARM_DISC) {
2022 			neg.flags |= CCB_TRANS_DISC_ENB;
2023 		}
2024 		if (flags & DPARM_TQING) {
2025 			neg.flags |= CCB_TRANS_TAG_ENB;
2026 		}
2027 		neg.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2028 		neg.bus_width = (flags & DPARM_WIDE)?
2029 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2030 		neg.sync_period = sdp->isp_devparam[tgt].cur_period;
2031 		neg.sync_offset = sdp->isp_devparam[tgt].cur_offset;
2032 		if (flags & DPARM_SYNC) {
2033 			neg.valid |=
2034 			    CCB_TRANS_SYNC_RATE_VALID |
2035 			    CCB_TRANS_SYNC_OFFSET_VALID;
2036 		}
2037 		isp_prt(isp, ISP_LOGDEBUG2,
2038 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2039 		    bus, tgt, neg.sync_period, neg.sync_offset, flags);
2040 		xpt_setup_ccb(&neg.ccb_h, tmppath, 1);
2041 		xpt_async(AC_TRANSFER_NEG, tmppath, &neg);
2042 		xpt_free_path(tmppath);
2043 		break;
2044 	}
2045 	case ISPASYNC_BUS_RESET:
2046 		bus = *((int *)arg);
2047 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2048 		    bus);
2049 		if (bus > 0 && isp->isp_path2) {
2050 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2051 		} else if (isp->isp_path) {
2052 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2053 		}
2054 		break;
2055 	case ISPASYNC_LOOP_DOWN:
2056 		if (isp->isp_path) {
2057 			if (isp->isp_osinfo.simqfrozen == 0) {
2058 				isp_prt(isp, ISP_LOGDEBUG2,
2059 				    "loop down freeze simq");
2060 				xpt_freeze_simq(isp->isp_sim, 1);
2061 			}
2062 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2063 		}
2064 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2065 		break;
2066 	case ISPASYNC_LOOP_UP:
2067 		if (isp->isp_path) {
2068 			int wasfrozen =
2069 			    isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2070 			isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2071 			if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2072 				xpt_release_simq(isp->isp_sim, 1);
2073 				isp_prt(isp, ISP_LOGDEBUG2,
2074 				    "loop up release simq");
2075 			}
2076 		}
2077 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2078 		break;
2079 	case ISPASYNC_PDB_CHANGED:
2080 	{
2081 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2082 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2083 		const static char *roles[4] = {
2084 		    "(none)", "Target", "Initiator", "Target/Initiator"
2085 		};
2086 		char *ptr;
2087 		fcparam *fcp = isp->isp_param;
2088 		int tgt = *((int *) arg);
2089 		struct lportdb *lp = &fcp->portdb[tgt];
2090 
2091 		if (lp->valid) {
2092 			ptr = "arrived";
2093 		} else {
2094 			ptr = "disappeared";
2095 		}
2096 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2097 		    roles[lp->roles & 0x3], ptr,
2098 		    (u_int32_t) (lp->port_wwn >> 32),
2099 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2100 		    (u_int32_t) (lp->node_wwn >> 32),
2101 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2102 		break;
2103 	}
2104 	case ISPASYNC_CHANGE_NOTIFY:
2105 		isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
2106 		break;
2107 #ifdef	ISP2100_FABRIC
2108 	case ISPASYNC_FABRIC_DEV:
2109 	{
2110 		int target;
2111 		struct lportdb *lp;
2112 		char *pt;
2113 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2114 		u_int32_t portid;
2115 		u_int64_t wwpn, wwnn;
2116 		fcparam *fcp = isp->isp_param;
2117 
2118 		rv = -1;
2119 
2120 		portid =
2121 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2122 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2123 		    (((u_int32_t) resp->snscb_port_id[2]));
2124 
2125 		wwpn =
2126 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2127 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2128 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2129 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2130 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2131 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2132 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2133 		    (((u_int64_t)resp->snscb_portname[7]));
2134 
2135 		wwnn =
2136 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2137 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2138 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2139 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2140 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2141 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2142 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2143 		    (((u_int64_t)resp->snscb_nodename[7]));
2144 		if (portid == 0 || wwpn == 0) {
2145 			rv = 0;
2146 			break;
2147 		}
2148 
2149 		switch (resp->snscb_port_type) {
2150 		case 1:
2151 			pt = "   N_Port";
2152 			break;
2153 		case 2:
2154 			pt = "  NL_Port";
2155 			break;
2156 		case 3:
2157 			pt = "F/NL_Port";
2158 			break;
2159 		case 0x7f:
2160 			pt = "  Nx_Port";
2161 			break;
2162 		case 0x81:
2163 			pt = "  F_port";
2164 			break;
2165 		case 0x82:
2166 			pt = "  FL_Port";
2167 			break;
2168 		case 0x84:
2169 			pt = "   E_port";
2170 			break;
2171 		default:
2172 			pt = "?";
2173 			break;
2174 		}
2175 		isp_prt(isp, ISP_LOGINFO,
2176 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2177 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2178 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2179 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
2180 			lp = &fcp->portdb[target];
2181 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn)
2182 				break;
2183 		}
2184 		if (target < MAX_FC_TARG) {
2185 			rv = 0;
2186 			break;
2187 		}
2188 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
2189 			lp = &fcp->portdb[target];
2190 			if (lp->port_wwn == 0)
2191 				break;
2192 		}
2193 		if (target == MAX_FC_TARG) {
2194 			printf("%s: no more space for fabric devices\n",
2195 			    isp->isp_name);
2196 			break;
2197 		}
2198 		lp->node_wwn = wwnn;
2199 		lp->port_wwn = wwpn;
2200 		lp->portid = portid;
2201 		rv = 0;
2202 		break;
2203 	}
2204 #endif
2205 #ifdef	ISP_TARGET_MODE
2206 	case ISPASYNC_TARGET_MESSAGE:
2207 	{
2208 		tmd_msg_t *mp = arg;
2209 		isp_prt(isp, ISP_LOGDEBUG2,
2210 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2211 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2212 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2213 		    mp->nt_msg[0]);
2214 		break;
2215 	}
2216 	case ISPASYNC_TARGET_EVENT:
2217 	{
2218 		tmd_event_t *ep = arg;
2219 		isp_prt(isp, ISP_LOGDEBUG2,
2220 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2221 		break;
2222 	}
2223 	case ISPASYNC_TARGET_ACTION:
2224 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2225 		default:
2226 			printf("%s: event 0x%x for unhandled target action\n",
2227 			    isp->isp_name, ((isphdr_t *)arg)->rqs_entry_type);
2228 			break;
2229 		case RQSTYPE_ATIO:
2230 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2231 			break;
2232 		case RQSTYPE_ATIO2:
2233 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2234 			break;
2235 		case RQSTYPE_CTIO2:
2236 		case RQSTYPE_CTIO:
2237 			rv = isp_handle_platform_ctio(isp, arg);
2238 			break;
2239 		case RQSTYPE_ENABLE_LUN:
2240 		case RQSTYPE_MODIFY_LUN:
2241 			isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status);
2242 			break;
2243 		}
2244 		break;
2245 #endif
2246 	default:
2247 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2248 		rv = -1;
2249 		break;
2250 	}
2251 	return (rv);
2252 }
2253 
2254 
2255 /*
2256  * Locks are held before coming here.
2257  */
2258 void
2259 isp_uninit(struct ispsoftc *isp)
2260 {
2261 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2262 	DISABLE_INTS(isp);
2263 }
2264 
2265 void
2266 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2267 {
2268 	va_list ap;
2269 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2270 		return;
2271 	}
2272 	printf("%s: ", isp->isp_name);
2273 	va_start(ap, fmt);
2274 	vprintf(fmt, ap);
2275 	va_end(ap);
2276 	printf("\n");
2277 }
2278