xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 055aefb1bcd6c54859c45274c8e03f03b3f5e681)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  *---------------------------------------
6  * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
7  * NASA/Ames Research Center
8  * All rights reserved.
9  *---------------------------------------
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice immediately at the beginning of the file, without modification,
16  *    this list of conditions, and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <dev/isp/isp_freebsd.h>
36 #include <machine/stdarg.h>	/* for use by isp_prt below */
37 
38 static void isp_intr_enable(void *);
39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
40 static void isp_poll(struct cam_sim *);
41 static void isp_relsim(void *);
42 static timeout_t isp_watchdog;
43 static void isp_action(struct cam_sim *, union ccb *);
44 
45 
46 static struct ispsoftc *isplist = NULL;
47 
48 void
49 isp_attach(struct ispsoftc *isp)
50 {
51 	int primary, secondary;
52 	struct ccb_setasync csa;
53 	struct cam_devq *devq;
54 	struct cam_sim *sim;
55 	struct cam_path *path;
56 
57 	/*
58 	 * Establish (in case of 12X0) which bus is the primary.
59 	 */
60 
61 	primary = 0;
62 	secondary = 1;
63 
64 	/*
65 	 * Create the device queue for our SIM(s).
66 	 */
67 	devq = cam_simq_alloc(isp->isp_maxcmds);
68 	if (devq == NULL) {
69 		return;
70 	}
71 
72 	/*
73 	 * Construct our SIM entry.
74 	 */
75 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
76 	    isp->isp_unit, 1, isp->isp_maxcmds, devq);
77 	if (sim == NULL) {
78 		cam_simq_free(devq);
79 		return;
80 	}
81 
82 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
83 	isp->isp_osinfo.ehook.ich_arg = isp;
84 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
85 		printf("%s: could not establish interrupt enable hook\n",
86 		    isp->isp_name);
87 		cam_sim_free(sim, TRUE);
88 		return;
89 	}
90 
91 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
92 		cam_sim_free(sim, TRUE);
93 		return;
94 	}
95 
96 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
97 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
98 		xpt_bus_deregister(cam_sim_path(sim));
99 		cam_sim_free(sim, TRUE);
100 		return;
101 	}
102 
103 	xpt_setup_ccb(&csa.ccb_h, path, 5);
104 	csa.ccb_h.func_code = XPT_SASYNC_CB;
105 	csa.event_enable = AC_LOST_DEVICE;
106 	csa.callback = isp_cam_async;
107 	csa.callback_arg = sim;
108 	xpt_action((union ccb *)&csa);
109 	isp->isp_sim = sim;
110 	isp->isp_path = path;
111 
112 	/*
113 	 * If we have a second channel, construct SIM entry for that.
114 	 */
115 	if (IS_DUALBUS(isp)) {
116 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
117 		    isp->isp_unit, 1, isp->isp_maxcmds, devq);
118 		if (sim == NULL) {
119 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
120 			xpt_free_path(isp->isp_path);
121 			cam_simq_free(devq);
122 			return;
123 		}
124 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
125 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
126 			xpt_free_path(isp->isp_path);
127 			cam_sim_free(sim, TRUE);
128 			return;
129 		}
130 
131 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
132 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
133 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
134 			xpt_free_path(isp->isp_path);
135 			xpt_bus_deregister(cam_sim_path(sim));
136 			cam_sim_free(sim, TRUE);
137 			return;
138 		}
139 
140 		xpt_setup_ccb(&csa.ccb_h, path, 5);
141 		csa.ccb_h.func_code = XPT_SASYNC_CB;
142 		csa.event_enable = AC_LOST_DEVICE;
143 		csa.callback = isp_cam_async;
144 		csa.callback_arg = sim;
145 		xpt_action((union ccb *)&csa);
146 		isp->isp_sim2 = sim;
147 		isp->isp_path2 = path;
148 	}
149 	isp->isp_state = ISP_RUNSTATE;
150 	ENABLE_INTS(isp);
151 	if (isplist == NULL) {
152 		isplist = isp;
153 	} else {
154 		struct ispsoftc *tmp = isplist;
155 		while (tmp->isp_osinfo.next) {
156 			tmp = tmp->isp_osinfo.next;
157 		}
158 		tmp->isp_osinfo.next = isp;
159 	}
160 }
161 
162 static void
163 isp_intr_enable(void *arg)
164 {
165 	struct ispsoftc *isp = arg;
166 	ENABLE_INTS(isp);
167 	isp->isp_osinfo.intsok = 1;
168 	/* Release our hook so that the boot can continue. */
169 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
170 }
171 
172 /*
173  * Put the target mode functions here, because some are inlines
174  */
175 
176 #ifdef	ISP_TARGET_MODE
177 
178 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t);
179 static __inline int are_any_luns_enabled(struct ispsoftc *);
180 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t);
181 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
182 static __inline int isp_psema_sig_rqe(struct ispsoftc *);
183 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int);
184 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int);
185 static __inline void isp_vsema_rqe(struct ispsoftc *);
186 static cam_status
187 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **);
188 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
189 static void isp_en_lun(struct ispsoftc *, union ccb *);
190 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
191 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
192 static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *);
193 static timeout_t isp_refire_putback_atio;
194 
195 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
196 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
197 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
198 static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *);
199 
200 static __inline int
201 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun)
202 {
203 	tstate_t *tptr;
204 	ISP_LOCK(isp);
205 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
206 		ISP_UNLOCK(isp);
207 		return (0);
208 	}
209 	do {
210 		if (tptr->lun == (lun_id_t) lun) {
211 			ISP_UNLOCK(isp);
212 			return (1);
213 		}
214 	} while ((tptr = tptr->next) != NULL);
215 	ISP_UNLOCK(isp);
216 	return (0);
217 }
218 
219 static __inline int
220 are_any_luns_enabled(struct ispsoftc *isp)
221 {
222 	int i;
223 	for (i = 0; i < LUN_HASH_SIZE; i++) {
224 		if (isp->isp_osinfo.lun_hash[i]) {
225 			return (1);
226 		}
227 	}
228 	return (0);
229 }
230 
231 static __inline tstate_t *
232 get_lun_statep(struct ispsoftc *isp, lun_id_t lun)
233 {
234 	tstate_t *tptr;
235 
236 	ISP_LOCK(isp);
237 	if (lun == CAM_LUN_WILDCARD) {
238 		tptr = &isp->isp_osinfo.tsdflt;
239 		tptr->hold++;
240 		ISP_UNLOCK(isp);
241 		return (tptr);
242 	} else {
243 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)];
244 	}
245 	if (tptr == NULL) {
246 		ISP_UNLOCK(isp);
247 		return (NULL);
248 	}
249 
250 	do {
251 		if (tptr->lun == lun) {
252 			tptr->hold++;
253 			ISP_UNLOCK(isp);
254 			return (tptr);
255 		}
256 	} while ((tptr = tptr->next) != NULL);
257 	ISP_UNLOCK(isp);
258 	return (tptr);
259 }
260 
261 static __inline void
262 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
263 {
264 	if (tptr->hold)
265 		tptr->hold--;
266 }
267 
268 static __inline int
269 isp_psema_sig_rqe(struct ispsoftc *isp)
270 {
271 	ISP_LOCK(isp);
272 	while (isp->isp_osinfo.tmflags & TM_BUSY) {
273 		isp->isp_osinfo.tmflags |= TM_WANTED;
274 		if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) {
275 			ISP_UNLOCK(isp);
276 			return (-1);
277 		}
278 		isp->isp_osinfo.tmflags |= TM_BUSY;
279 	}
280 	ISP_UNLOCK(isp);
281 	return (0);
282 }
283 
284 static __inline int
285 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo)
286 {
287 	ISP_LOCK(isp);
288 	if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) {
289 		ISP_UNLOCK(isp);
290 		return (-1);
291 	}
292 	ISP_UNLOCK(isp);
293 	return (0);
294 }
295 
296 static __inline void
297 isp_cv_signal_rqe(struct ispsoftc *isp, int status)
298 {
299 	isp->isp_osinfo.rstatus = status;
300 	wakeup(&isp->isp_osinfo.rstatus);
301 }
302 
303 static __inline void
304 isp_vsema_rqe(struct ispsoftc *isp)
305 {
306 	ISP_LOCK(isp);
307 	if (isp->isp_osinfo.tmflags & TM_WANTED) {
308 		isp->isp_osinfo.tmflags &= ~TM_WANTED;
309 		wakeup(&isp->isp_osinfo.tmflags);
310 	}
311 	isp->isp_osinfo.tmflags &= ~TM_BUSY;
312 	ISP_UNLOCK(isp);
313 }
314 
315 static cam_status
316 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt)
317 {
318 	cam_status status;
319 	lun_id_t lun;
320 	tstate_t *tptr, *new;
321 
322 	lun = xpt_path_lun_id(path);
323 	if (lun < 0) {
324 		return (CAM_LUN_INVALID);
325 	}
326 	if (is_lun_enabled(isp, lun)) {
327 		return (CAM_LUN_ALRDY_ENA);
328 	}
329 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT);
330 	if (new == NULL) {
331 		return (CAM_RESRC_UNAVAIL);
332 	}
333 	bzero(new, sizeof (tstate_t));
334 
335 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
336 	    xpt_path_target_id(path), xpt_path_lun_id(path));
337 	if (status != CAM_REQ_CMP) {
338 		free(new, M_DEVBUF);
339 		return (status);
340 	}
341 	new->lun = lun;
342 	SLIST_INIT(&new->atios);
343 	SLIST_INIT(&new->inots);
344 	new->hold = 1;
345 
346 	ISP_LOCK(isp);
347 	if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) {
348 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new;
349 	} else {
350 		while (tptr->next)
351 			tptr = tptr->next;
352 		tptr->next = new;
353 	}
354 	ISP_UNLOCK(isp);
355 	*rslt = new;
356 	return (CAM_REQ_CMP);
357 }
358 
359 static __inline void
360 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
361 {
362 	tstate_t *lw, *pw;
363 
364 	ISP_LOCK(isp);
365 	if (tptr->hold) {
366 		ISP_UNLOCK(isp);
367 		return;
368 	}
369 	pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)];
370 	if (pw == NULL) {
371 		ISP_UNLOCK(isp);
372 		return;
373 	} else if (pw->lun == tptr->lun) {
374 		isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next;
375 	} else {
376 		lw = pw;
377 		pw = lw->next;
378 		while (pw) {
379 			if (pw->lun == tptr->lun) {
380 				lw->next = pw->next;
381 				break;
382 			}
383 			lw = pw;
384 			pw = pw->next;
385 		}
386 		if (pw == NULL) {
387 			ISP_UNLOCK(isp);
388 			return;
389 		}
390 	}
391 	free(tptr, M_DEVBUF);
392 	ISP_UNLOCK(isp);
393 }
394 
395 static void
396 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
397 {
398 	const char *lfmt = "Lun now %sabled for target mode\n";
399 	struct ccb_en_lun *cel = &ccb->cel;
400 	tstate_t *tptr;
401 	u_int16_t rstat;
402 	int bus;
403 	lun_id_t lun;
404 	target_id_t tgt;
405 
406 
407 	bus = XS_CHANNEL(ccb);
408 	tgt = ccb->ccb_h.target_id;
409 	lun = ccb->ccb_h.target_lun;
410 
411 	/*
412 	 * First, check to see if we're enabling on fibre channel
413 	 * and don't yet have a notion of who the heck we are (no
414 	 * loop yet).
415 	 */
416 	if (IS_FC(isp) && cel->enable &&
417 	    (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
418 		int rv= 2 * 1000000;
419 		fcparam *fcp = isp->isp_param;
420 
421 		ISP_LOCK(isp);
422 		rv = isp_control(isp, ISPCTL_FCLINK_TEST, &rv);
423 		ISP_UNLOCK(isp);
424 		if (rv || fcp->isp_fwstate != FW_READY) {
425 			xpt_print_path(ccb->ccb_h.path);
426 			printf("link status not good yet\n");
427 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
428 			return;
429 		}
430 		ISP_LOCK(isp);
431 		rv = isp_control(isp, ISPCTL_PDB_SYNC, NULL);
432 		ISP_UNLOCK(isp);
433 		if (rv || fcp->isp_fwstate != FW_READY) {
434 			xpt_print_path(ccb->ccb_h.path);
435 			printf("could not get a good port database read\n");
436 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
437 			return;
438 		}
439 	}
440 
441 
442 	/*
443 	 * Next check to see whether this is a target/lun wildcard action.
444 	 *
445 	 * If so, we enable/disable target mode but don't do any lun enabling.
446 	 */
447 	if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) {
448 		int av;
449 		tptr = &isp->isp_osinfo.tsdflt;
450 		if (cel->enable) {
451 			if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) {
452 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
453 				return;
454 			}
455 			ccb->ccb_h.status =
456 			    xpt_create_path(&tptr->owner, NULL,
457 			    xpt_path_path_id(ccb->ccb_h.path),
458 			    xpt_path_target_id(ccb->ccb_h.path),
459 			    xpt_path_lun_id(ccb->ccb_h.path));
460 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
461 				return;
462 			}
463 			SLIST_INIT(&tptr->atios);
464 			SLIST_INIT(&tptr->inots);
465 			av = 1;
466 			ISP_LOCK(isp);
467 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
468 			if (av) {
469 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
470 				xpt_free_path(tptr->owner);
471 				ISP_UNLOCK(isp);
472 				return;
473 			}
474 			isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED;
475 			ISP_UNLOCK(isp);
476 		} else {
477 			if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
478 				ccb->ccb_h.status = CAM_LUN_INVALID;
479 				return;
480 			}
481 			if (are_any_luns_enabled(isp)) {
482 				ccb->ccb_h.status = CAM_SCSI_BUSY;
483 				return;
484 			}
485 			av = 0;
486 			ISP_LOCK(isp);
487 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
488 			if (av) {
489 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
490 				ISP_UNLOCK(isp);
491 				return;
492 			}
493 			isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED;
494 			ISP_UNLOCK(isp);
495 			ccb->ccb_h.status = CAM_REQ_CMP;
496 		}
497 		xpt_print_path(ccb->ccb_h.path);
498 		printf(lfmt, (cel->enable) ? "en" : "dis");
499 		return;
500 	}
501 
502 	/*
503 	 * Do some sanity checking first.
504 	 */
505 
506 	if (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns) {
507 		ccb->ccb_h.status = CAM_LUN_INVALID;
508 		return;
509 	}
510 	if (IS_SCSI(isp)) {
511 		if (tgt != CAM_TARGET_WILDCARD &&
512 		    tgt != ((sdparam *) isp->isp_param)->isp_initiator_id) {
513 			ccb->ccb_h.status = CAM_TID_INVALID;
514 			return;
515 		}
516 	} else {
517 		if (tgt != CAM_TARGET_WILDCARD &&
518 		    tgt != ((fcparam *) isp->isp_param)->isp_loopid) {
519 			ccb->ccb_h.status = CAM_TID_INVALID;
520 			return;
521 		}
522 	}
523 
524 
525 	if (cel->enable) {
526 		ccb->ccb_h.status =
527 		    create_lun_state(isp, ccb->ccb_h.path, &tptr);
528 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
529 			return;
530 		}
531 	} else {
532 		tptr = get_lun_statep(isp, lun);
533 		if (tptr == NULL) {
534 			ccb->ccb_h.status = CAM_LUN_INVALID;
535 			return;
536 		}
537 	}
538 
539 	if (isp_psema_sig_rqe(isp)) {
540 		rls_lun_statep(isp, tptr);
541 		if (cel->enable)
542 			destroy_lun_state(isp, tptr);
543 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
544 		return;
545 	}
546 
547 	ISP_LOCK(isp);
548 	if (cel->enable) {
549 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
550 		rstat = LUN_ERR;
551 		if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
552 			xpt_print_path(ccb->ccb_h.path);
553 			printf("isp_lun_cmd failed\n");
554 			goto out;
555 		}
556 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
557 			xpt_print_path(ccb->ccb_h.path);
558 			printf("wait for ENABLE LUN timed out\n");
559 			goto out;
560 		}
561 		rstat = isp->isp_osinfo.rstatus;
562 		if (rstat != LUN_OK) {
563 			xpt_print_path(ccb->ccb_h.path);
564 			printf("ENABLE LUN returned 0x%x\n", rstat);
565 			goto out;
566 		}
567 	} else {
568 		u_int32_t seq;
569 
570 		seq = isp->isp_osinfo.rollinfo++;
571 		rstat = LUN_ERR;
572 
573 		if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) {
574 			xpt_print_path(ccb->ccb_h.path);
575 			printf("isp_lun_cmd failed\n");
576 			goto out;
577 		}
578 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
579 			xpt_print_path(ccb->ccb_h.path);
580 			printf("wait for MODIFY LUN timed out\n");
581 			goto out;
582 		}
583 		rstat = isp->isp_osinfo.rstatus;
584 		if (rstat != LUN_OK) {
585 			xpt_print_path(ccb->ccb_h.path);
586 			printf("MODIFY LUN returned 0x%x\n", rstat);
587 			goto out;
588 		}
589 		rstat = LUN_ERR;
590 		seq = isp->isp_osinfo.rollinfo++;
591 
592 		if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) {
593 			xpt_print_path(ccb->ccb_h.path);
594 			printf("isp_lun_cmd failed\n");
595 			goto out;
596 		}
597 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
598 			xpt_print_path(ccb->ccb_h.path);
599 			printf("wait for ENABLE LUN timed out\n");
600 			goto out;
601 		}
602 		rstat = isp->isp_osinfo.rstatus;
603 		if (rstat != LUN_OK) {
604 			xpt_print_path(ccb->ccb_h.path);
605 			printf("ENABLE LUN returned 0x%x\n", rstat);
606 			goto out;
607 		}
608 	}
609 out:
610 	isp_vsema_rqe(isp);
611 	ISP_UNLOCK(isp);
612 
613 	if (rstat != LUN_OK) {
614 		xpt_print_path(ccb->ccb_h.path);
615 		printf("lun %sable failed\n", (cel->enable) ? "en" : "dis");
616 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
617 		rls_lun_statep(isp, tptr);
618 		if (cel->enable)
619 			destroy_lun_state(isp, tptr);
620 	} else {
621 		xpt_print_path(ccb->ccb_h.path);
622 		printf(lfmt, (cel->enable) ? "en" : "dis");
623 		rls_lun_statep(isp, tptr);
624 		if (cel->enable == 0) {
625 			destroy_lun_state(isp, tptr);
626 		}
627 		ccb->ccb_h.status = CAM_REQ_CMP;
628 	}
629 }
630 
631 static cam_status
632 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
633 {
634 	tstate_t *tptr;
635 	struct ccb_hdr_slist *lp;
636 	struct ccb_hdr *curelm;
637 	int found;
638 	union ccb *accb = ccb->cab.abort_ccb;
639 
640 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
641 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
642 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
643 			return (CAM_PATH_INVALID);
644 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
645 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
646 			return (CAM_PATH_INVALID);
647 		}
648 	}
649 	tptr = get_lun_statep(isp, accb->ccb_h.target_lun);
650 	if (tptr == NULL) {
651 		return (CAM_PATH_INVALID);
652 	}
653 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
654 		lp = &tptr->atios;
655 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
656 		lp = &tptr->inots;
657 	} else {
658 		rls_lun_statep(isp, tptr);
659 		return (CAM_UA_ABORT);
660 	}
661 	curelm = SLIST_FIRST(lp);
662 	found = 0;
663 	if (curelm == &accb->ccb_h) {
664 		found = 1;
665 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
666 	} else {
667 		while(curelm != NULL) {
668 			struct ccb_hdr *nextelm;
669 
670 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
671 			if (nextelm == &accb->ccb_h) {
672 				found = 1;
673 				SLIST_NEXT(curelm, sim_links.sle) =
674 				    SLIST_NEXT(nextelm, sim_links.sle);
675 				break;
676 			}
677 			curelm = nextelm;
678 		}
679 	}
680 	rls_lun_statep(isp, tptr);
681 	if (found) {
682 		accb->ccb_h.status = CAM_REQ_ABORTED;
683 		return (CAM_REQ_CMP);
684 	}
685 	return(CAM_PATH_INVALID);
686 }
687 
688 static cam_status
689 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
690 {
691 	void *qe;
692 	struct ccb_scsiio *cso = &ccb->csio;
693 	u_int32_t *hp, save_handle;
694 	u_int16_t iptr, optr;
695 
696 
697 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
698 		xpt_print_path(ccb->ccb_h.path);
699 		printf("Request Queue Overflow in isp_target_start_ctio\n");
700 		return (CAM_RESRC_UNAVAIL);
701 	}
702 	bzero(qe, QENTRY_LEN);
703 
704 	/*
705 	 * We're either moving data or completing a command here.
706 	 */
707 
708 	if (IS_FC(isp)) {
709 		struct ccb_accept_tio *atiop;
710 		ct2_entry_t *cto = qe;
711 
712 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
713 		cto->ct_header.rqs_entry_count = 1;
714 		cto->ct_iid = cso->init_id;
715 		if (isp->isp_maxluns <= 16) {
716 			cto->ct_lun = ccb->ccb_h.target_lun;
717 		}
718 		/*
719 		 * Start with a residual based on what the original datalength
720 		 * was supposed to be. Basically, we ignore what CAM has set
721 		 * for residuals. The data transfer routines will knock off
722 		 * the residual for each byte actually moved- and also will
723 		 * be responsible for setting the underrun flag.
724 		 */
725 		/* HACK! HACK! */
726 		if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) {
727 			cto->ct_resid = atiop->ccb_h.spriv_field0;
728 		}
729 
730 		/*
731 		 * We always have to use the tag_id- it has the RX_ID
732 		 * for this exchage.
733 		 */
734 		cto->ct_rxid = cso->tag_id;
735 		if (cso->dxfer_len == 0) {
736 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
737 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
738 				cto->ct_flags |= CT2_SENDSTATUS;
739 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
740 			}
741 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
742 				int m = min(cso->sense_len, MAXRESPLEN);
743 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
744 				cto->rsp.m1.ct_senselen = m;
745 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
746 			}
747 		} else {
748 			cto->ct_flags |= CT2_FLAG_MODE0;
749 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
750 				cto->ct_flags |= CT2_DATA_IN;
751 			} else {
752 				cto->ct_flags |= CT2_DATA_OUT;
753 			}
754 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
755 				cto->ct_flags |= CT2_SENDSTATUS;
756 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
757 			}
758 			/*
759 			 * If we're sending data and status back together,
760 			 * we can't also send back sense data as well.
761 			 */
762 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
763 		}
764 		if (cto->ct_flags & CAM_SEND_STATUS) {
765 			isp_prt(isp, ISP_LOGTDEBUG2,
766 			    "CTIO2 RX_ID 0x%x SCSI STATUS 0x%x datalength %u",
767 			    cto->ct_rxid, cso->scsi_status, cto->ct_resid);
768 		}
769 		hp = &cto->ct_reserved;
770 	} else {
771 		ct_entry_t *cto = qe;
772 
773 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
774 		cto->ct_header.rqs_entry_count = 1;
775 		cto->ct_iid = cso->init_id;
776 		cto->ct_tgt = ccb->ccb_h.target_id;
777 		cto->ct_lun = ccb->ccb_h.target_lun;
778 		if (cso->tag_id && cso->tag_action) {
779 			/*
780 			 * We don't specify a tag type for regular SCSI.
781 			 * Just the tag value and set the flag.
782 			 */
783 			cto->ct_tag_val = cso->tag_id;
784 			cto->ct_flags |= CT_TQAE;
785 		}
786 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
787 			cto->ct_flags |= CT_NODISC;
788 		}
789 		if (cso->dxfer_len == 0) {
790 			cto->ct_flags |= CT_NO_DATA;
791 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
792 			cto->ct_flags |= CT_DATA_IN;
793 		} else {
794 			cto->ct_flags |= CT_DATA_OUT;
795 		}
796 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
797 			cto->ct_flags |= CT_SENDSTATUS;
798 			cto->ct_scsi_status = cso->scsi_status;
799 			cto->ct_resid = cso->resid;
800 		}
801 		if (cto->ct_flags & CAM_SEND_STATUS) {
802 			isp_prt(isp, ISP_LOGTDEBUG2,
803 			    "CTIO SCSI STATUS 0x%x resid %d",
804 			    cso->scsi_status, cso->resid);
805 		}
806 		hp = &cto->ct_reserved;
807 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
808 	}
809 
810 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
811 		xpt_print_path(ccb->ccb_h.path);
812 		printf("No XFLIST pointers for isp_target_start_ctio\n");
813 		return (CAM_RESRC_UNAVAIL);
814 	}
815 
816 
817 	/*
818 	 * Call the dma setup routines for this entry (and any subsequent
819 	 * CTIOs) if there's data to move, and then tell the f/w it's got
820 	 * new things to play with. As with isp_start's usage of DMA setup,
821 	 * any swizzling is done in the machine dependent layer. Because
822 	 * of this, we put the request onto the queue area first in native
823 	 * format.
824 	 */
825 
826 	save_handle = *hp;
827 	switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) {
828 	case CMD_QUEUED:
829 		ISP_ADD_REQUEST(isp, iptr);
830 		return (CAM_REQ_INPROG);
831 
832 	case CMD_EAGAIN:
833 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
834 		isp_destroy_handle(isp, save_handle);
835 		return (CAM_RESRC_UNAVAIL);
836 
837 	default:
838 		isp_destroy_handle(isp, save_handle);
839 		return (XS_ERR(ccb));
840 	}
841 }
842 
843 static cam_status
844 isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb)
845 {
846 	void *qe;
847 	struct ccb_accept_tio *atiop;
848 	u_int16_t iptr, optr;
849 
850 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
851 		xpt_print_path(ccb->ccb_h.path);
852 		printf("Request Queue Overflow in isp_target_putback_atio\n");
853 		return (CAM_RESRC_UNAVAIL);
854 	}
855 	bzero(qe, QENTRY_LEN);
856 	atiop = (struct ccb_accept_tio *) ccb;
857 	if (IS_FC(isp)) {
858 		at2_entry_t *at = qe;
859 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
860 		at->at_header.rqs_entry_count = 1;
861 		if (isp->isp_maxluns > 16) {
862 			at->at_scclun = (uint16_t) atiop->ccb_h.target_lun;
863 		} else {
864 			at->at_lun = (uint8_t) atiop->ccb_h.target_lun;
865 		}
866 		at->at_status = CT_OK;
867 		at->at_rxid = atiop->tag_id;
868 		ISP_SWIZ_ATIO2(isp, qe, qe);
869 	} else {
870 		at_entry_t *at = qe;
871 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
872 		at->at_header.rqs_entry_count = 1;
873 		at->at_iid = atiop->init_id;
874 		at->at_tgt = atiop->ccb_h.target_id;
875 		at->at_lun = atiop->ccb_h.target_lun;
876 		at->at_status = CT_OK;
877 		if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) {
878 			at->at_tag_type = atiop->tag_action;
879 		}
880 		at->at_tag_val = atiop->tag_id;
881 		ISP_SWIZ_ATIO(isp, qe, qe);
882 	}
883 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
884 	ISP_ADD_REQUEST(isp, iptr);
885 	return (CAM_REQ_CMP);
886 }
887 
888 static void
889 isp_refire_putback_atio(void *arg)
890 {
891 	union ccb *ccb = arg;
892 	int s = splcam();
893 	if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) {
894 		(void) timeout(isp_refire_putback_atio, ccb, 10);
895 	} else {
896 		isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb);
897 	}
898 	splx(s);
899 }
900 
901 /*
902  * Handle ATIO stuff that the generic code can't.
903  * This means handling CDBs.
904  */
905 
906 static int
907 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
908 {
909 	tstate_t *tptr;
910 	int status;
911 	struct ccb_accept_tio *atiop;
912 
913 	/*
914 	 * The firmware status (except for the QLTM_SVALID bit)
915 	 * indicates why this ATIO was sent to us.
916 	 *
917 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
918 	 *
919 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
920 	 * we're still connected on the SCSI bus - i.e. the initiator
921 	 * did not set DiscPriv in the identify message. We don't care
922 	 * about this so it's ignored.
923 	 */
924 	status = aep->at_status;
925 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
926 		/*
927 		 * Bus Phase Sequence error. We should have sense data
928 		 * suggested by the f/w. I'm not sure quite yet what
929 		 * to do about this for CAM.
930 		 */
931 		printf("%s: PHASE ERROR\n", isp->isp_name);
932 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
933 		return (0);
934 	}
935 	if ((status & ~QLTM_SVALID) != AT_CDB) {
936 		printf("%s: bogus atio (0x%x) leaked to platform\n",
937 		    isp->isp_name, status);
938 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
939 		return (0);
940 	}
941 
942 	tptr = get_lun_statep(isp, aep->at_lun);
943 	if (tptr == NULL) {
944 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
945 	}
946 
947 	if (tptr == NULL) {
948 		/*
949 		 * Because we can't autofeed sense data back with
950 		 * a command for parallel SCSI, we can't give back
951 		 * a CHECK CONDITION. We'll give back a BUSY status
952 		 * instead. This works out okay because the only
953 		 * time we should, in fact, get this, is in the
954 		 * case that somebody configured us without the
955 		 * blackhole driver, so they get what they deserve.
956 		 */
957 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
958 		return (0);
959 	}
960 
961 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
962 	if (atiop == NULL) {
963 		/*
964 		 * Because we can't autofeed sense data back with
965 		 * a command for parallel SCSI, we can't give back
966 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
967 		 * instead. This works out okay because the only time we
968 		 * should, in fact, get this, is in the case that we've
969 		 * run out of ATIOS.
970 		 */
971 		xpt_print_path(tptr->owner);
972 		printf("no ATIOS for lun %d from initiator %d\n",
973 		    aep->at_lun, aep->at_iid);
974 		rls_lun_statep(isp, tptr);
975 		if (aep->at_flags & AT_TQAE)
976 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
977 		else
978 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
979 		return (0);
980 	}
981 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
982 	if (tptr == &isp->isp_osinfo.tsdflt) {
983 		atiop->ccb_h.target_id = aep->at_tgt;
984 		atiop->ccb_h.target_lun = aep->at_lun;
985 	}
986 	if (aep->at_flags & AT_NODISC) {
987 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
988 	} else {
989 		atiop->ccb_h.flags = 0;
990 	}
991 
992 	if (status & QLTM_SVALID) {
993 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
994 		atiop->sense_len = amt;
995 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
996 	} else {
997 		atiop->sense_len = 0;
998 	}
999 
1000 	atiop->init_id = aep->at_iid;
1001 	atiop->cdb_len = aep->at_cdblen;
1002 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1003 	atiop->ccb_h.status = CAM_CDB_RECVD;
1004 	atiop->tag_id = aep->at_tag_val;
1005 	if ((atiop->tag_action = aep->at_tag_type) != 0) {
1006 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1007 	}
1008 	xpt_done((union ccb*)atiop);
1009 	isp_prt(isp, ISP_LOGTDEBUG2,
1010 	    "ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s",
1011 	    aep->at_cdb[0] & 0xff, aep->at_iid, aep->at_lun,
1012 	    aep->at_tag_val & 0xff, aep->at_tag_type,
1013 	    (aep->at_flags & AT_NODISC)? "nondisc" : "disconnecting");
1014 	rls_lun_statep(isp, tptr);
1015 	return (0);
1016 }
1017 
1018 static int
1019 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1020 {
1021 	lun_id_t lun;
1022 	tstate_t *tptr;
1023 	struct ccb_accept_tio *atiop;
1024 
1025 	/*
1026 	 * The firmware status (except for the QLTM_SVALID bit)
1027 	 * indicates why this ATIO was sent to us.
1028 	 *
1029 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1030 	 */
1031 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1032 		printf("%s: bogus atio (0x%x) leaked to platform\n",
1033 		    isp->isp_name, aep->at_status);
1034 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1035 		return (0);
1036 	}
1037 
1038 	if (isp->isp_maxluns > 16) {
1039 		lun = aep->at_scclun;
1040 	} else {
1041 		lun = aep->at_lun;
1042 	}
1043 	tptr = get_lun_statep(isp, lun);
1044 	if (tptr == NULL) {
1045 		tptr = get_lun_statep(isp, CAM_LUN_WILDCARD);
1046 	}
1047 
1048 	if (tptr == NULL) {
1049 		/*
1050 		 * What we'd like to know is whether or not we have a listener
1051 		 * upstream that really hasn't configured yet. If we do, then
1052 		 * we can give a more sensible reply here. If not, then we can
1053 		 * reject this out of hand.
1054 		 *
1055 		 * Choices for what to send were
1056 		 *
1057                  *	Not Ready, Unit Not Self-Configured Yet
1058 		 *	(0x2,0x3e,0x00)
1059 		 *
1060 		 * for the former and
1061 		 *
1062 		 *	Illegal Request, Logical Unit Not Supported
1063 		 *	(0x5,0x25,0x00)
1064 		 *
1065 		 * for the latter.
1066 		 *
1067 		 * We used to decide whether there was at least one listener
1068 		 * based upon whether the black hole driver was configured.
1069 		 * However, recent config(8) changes have made this hard to do
1070 		 * at this time.
1071 		 *
1072 		 */
1073 		u_int32_t ccode = SCSI_STATUS_BUSY;
1074 
1075 		/*
1076 		 * Because we can't autofeed sense data back with
1077 		 * a command for parallel SCSI, we can't give back
1078 		 * a CHECK CONDITION. We'll give back a BUSY status
1079 		 * instead. This works out okay because the only
1080 		 * time we should, in fact, get this, is in the
1081 		 * case that somebody configured us without the
1082 		 * blackhole driver, so they get what they deserve.
1083 		 */
1084 		isp_endcmd(isp, aep, ccode, 0);
1085 		return (0);
1086 	}
1087 
1088 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1089 	if (atiop == NULL) {
1090 		/*
1091 		 * Because we can't autofeed sense data back with
1092 		 * a command for parallel SCSI, we can't give back
1093 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1094 		 * instead. This works out okay because the only time we
1095 		 * should, in fact, get this, is in the case that we've
1096 		 * run out of ATIOS.
1097 		 */
1098 		xpt_print_path(tptr->owner);
1099 		printf("no ATIOS for lun %d from initiator %d\n",
1100 		    lun, aep->at_iid);
1101 		rls_lun_statep(isp, tptr);
1102 		if (aep->at_flags & AT_TQAE)
1103 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1104 		else
1105 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1106 		return (0);
1107 	}
1108 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1109 
1110 	if (tptr == &isp->isp_osinfo.tsdflt) {
1111 		atiop->ccb_h.target_id =
1112 			((fcparam *)isp->isp_param)->isp_loopid;
1113 		atiop->ccb_h.target_lun = lun;
1114 	}
1115 	if (aep->at_status & QLTM_SVALID) {
1116 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1117 		atiop->sense_len = amt;
1118 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1119 	} else {
1120 		atiop->sense_len = 0;
1121 	}
1122 
1123 	atiop->init_id = aep->at_iid;
1124 	atiop->cdb_len = ATIO2_CDBLEN;
1125 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1126 	atiop->ccb_h.status = CAM_CDB_RECVD;
1127 	atiop->tag_id = aep->at_rxid;
1128 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1129 	case ATIO2_TC_ATTR_SIMPLEQ:
1130 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1131 		break;
1132         case ATIO2_TC_ATTR_HEADOFQ:
1133 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1134 		break;
1135         case ATIO2_TC_ATTR_ORDERED:
1136 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1137 		break;
1138         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1139 	case ATIO2_TC_ATTR_UNTAGGED:
1140 	default:
1141 		atiop->tag_action = 0;
1142 		break;
1143 	}
1144 	if (atiop->tag_action != 0) {
1145 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1146 	}
1147 
1148 	/*
1149 	 * Preserve overall command datalength in private field.
1150 	 */
1151 	atiop->ccb_h.spriv_field0 = aep->at_datalen;
1152 
1153 	xpt_done((union ccb*)atiop);
1154 	isp_prt(isp, ISP_LOGTDEBUG2,
1155 	    "ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1156 	    aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff, aep->at_iid,
1157 	    lun, aep->at_taskflags, aep->at_datalen);
1158 	rls_lun_statep(isp, tptr);
1159 	return (0);
1160 }
1161 
1162 static int
1163 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1164 {
1165 	union ccb *ccb;
1166 	int sentstatus, ok, notify_cam;
1167 
1168 	/*
1169 	 * CTIO and CTIO2 are close enough....
1170 	 */
1171 
1172 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_reserved);
1173 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1174 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_reserved);
1175 
1176 	if (IS_FC(isp)) {
1177 		ct2_entry_t *ct = arg;
1178 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1179 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1180 		if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1181 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1182 		}
1183 		isp_prt(isp, ISP_LOGTDEBUG2,
1184 		    "CTIO2 RX_ID 0x%x sts 0x%x flg 0x%x sns %d FIN",
1185 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1186 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0);
1187 		notify_cam = ct->ct_header.rqs_seqno;
1188 	} else {
1189 		ct_entry_t *ct = arg;
1190 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1191 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1192 		isp_prt(isp, ISP_LOGTDEBUG2,
1193 		    "CTIO tag 0x%x sts 0x%x flg 0x%x FIN",
1194 		    ct->ct_tag_val, ct->ct_status, ct->ct_flags);
1195 		notify_cam = ct->ct_header.rqs_seqno;
1196 	}
1197 
1198 	/*
1199 	 * We're here either because data transfers are done (and
1200 	 * it's time to send a final status CTIO) or because the final
1201 	 * status CTIO is done. We don't get called for all intermediate
1202 	 * CTIOs that happen for a large data transfer.
1203 	 *
1204 	 * In any case, for this platform, the upper layers figure out
1205 	 * what to do next, so all we do here is collect status and
1206 	 * pass information along. The exception is that we clear
1207 	 * the notion of handling a non-disconnecting command here.
1208 	 */
1209 
1210 	if (sentstatus) {
1211 		/*
1212 		 * Data transfer done. See if all went okay.
1213 		 */
1214 		if (ok) {
1215 			ccb->csio.resid = 0;
1216 		} else {
1217 			ccb->csio.resid = ccb->csio.dxfer_len;
1218 		}
1219 	}
1220 
1221 	if (notify_cam == 0) {
1222 		isp_prt(isp, ISP_LOGTDEBUG1, "Intermediate CTIO done");
1223 		return (0);
1224 	}
1225 	isp_prt(isp, ISP_LOGTDEBUG1, "Final CTIO done");
1226 	if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) {
1227 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1228 	} else {
1229 		isp_handle_platform_ctio_part2(isp, ccb);
1230 	}
1231 	return (0);
1232 }
1233 
1234 static void
1235 isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb)
1236 {
1237 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1238 		ccb->ccb_h.status |= CAM_REQ_CMP;
1239 	}
1240 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1241 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1242 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1243 		if (isp->isp_osinfo.simqfrozen == 0) {
1244 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1245 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1246 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1247 			} else {
1248 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->devqfrozen");
1249 			}
1250 		} else {
1251 			isp_prt(isp, ISP_LOGDEBUG2,
1252 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1253 		}
1254 	}
1255 	xpt_done(ccb);
1256 }
1257 #endif
1258 
1259 static void
1260 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1261 {
1262 	struct cam_sim *sim;
1263 	struct ispsoftc *isp;
1264 
1265 	sim = (struct cam_sim *)cbarg;
1266 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1267 	switch (code) {
1268 	case AC_LOST_DEVICE:
1269 		if (IS_SCSI(isp)) {
1270 			u_int16_t oflags, nflags;
1271 			sdparam *sdp = isp->isp_param;
1272 			int rvf, tgt;
1273 
1274 			tgt = xpt_path_target_id(path);
1275 			rvf = ISP_FW_REVX(isp->isp_fwrev);
1276 			ISP_LOCK(isp);
1277 			sdp += cam_sim_bus(sim);
1278 			isp->isp_update |= (1 << cam_sim_bus(sim));
1279 			nflags = DPARM_SAFE_DFLT;
1280 			if (rvf >= ISP_FW_REV(7, 55, 0) ||
1281 			   (ISP_FW_REV(4, 55, 0) <= rvf &&
1282 			   (rvf < ISP_FW_REV(5, 0, 0)))) {
1283 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1284 			}
1285 			oflags = sdp->isp_devparam[tgt].dev_flags;
1286 			sdp->isp_devparam[tgt].dev_flags = nflags;
1287 			sdp->isp_devparam[tgt].dev_update = 1;
1288 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1289 			sdp->isp_devparam[tgt].dev_flags = oflags;
1290 			ISP_UNLOCK(isp);
1291 		}
1292 		break;
1293 	default:
1294 		printf("%s: isp_attach Async Code 0x%x\n", isp->isp_name, code);
1295 		break;
1296 	}
1297 }
1298 
1299 static void
1300 isp_poll(struct cam_sim *sim)
1301 {
1302 	isp_intr((struct ispsoftc *) cam_sim_softc(sim));
1303 }
1304 
1305 static void
1306 isp_relsim(void *arg)
1307 {
1308 	struct ispsoftc *isp = arg;
1309 	ISP_LOCK(isp);
1310 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1311 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1312 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1313 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1314 			xpt_release_simq(isp->isp_sim, 1);
1315 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1316 		}
1317 	}
1318 	ISP_UNLOCK(isp);
1319 }
1320 
1321 static void
1322 isp_watchdog(void *arg)
1323 {
1324 	XS_T *xs = arg;
1325 	struct ispsoftc *isp = XS_ISP(xs);
1326 	u_int32_t handle;
1327 
1328 	/*
1329 	 * We've decided this command is dead. Make sure we're not trying
1330 	 * to kill a command that's already dead by getting it's handle and
1331 	 * and seeing whether it's still alive.
1332 	 */
1333 	ISP_LOCK(isp);
1334 	handle = isp_find_handle(isp, xs);
1335 	if (handle) {
1336 		u_int16_t r;
1337 
1338 		if (XS_CMD_DONE_P(xs)) {
1339 			isp_prt(isp, ISP_LOGDEBUG1,
1340 			    "watchdog found done cmd (handle 0x%x)", handle);
1341 			ISP_UNLOCK(isp);
1342 			return;
1343 		}
1344 
1345 		if (XS_CMD_WDOG_P(xs)) {
1346 			isp_prt(isp, ISP_LOGDEBUG2,
1347 			    "recursive watchdog (handle 0x%x)", handle);
1348 			ISP_UNLOCK(isp);
1349 			return;
1350 		}
1351 
1352 		XS_CMD_S_WDOG(xs);
1353 
1354 		r = ISP_READ(isp, BIU_ISR);
1355 
1356 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
1357 			isp_prt(isp, ISP_LOGDEBUG2,
1358 			    "watchdog cleanup (%x, %x)", handle, r);
1359 			xpt_done((union ccb *) xs);
1360 		} else if (XS_CMD_GRACE_P(xs)) {
1361 			/*
1362 			 * Make sure the command is *really* dead before we
1363 			 * release the handle (and DMA resources) for reuse.
1364 			 */
1365 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1366 
1367 			/*
1368 			 * After this point, the comamnd is really dead.
1369 			 */
1370 			if (XS_XFRLEN(xs)) {
1371 				ISP_DMAFREE(isp, xs, handle);
1372                 	}
1373 			isp_destroy_handle(isp, handle);
1374 			xpt_print_path(xs->ccb_h.path);
1375 			printf("%s: watchdog timeout (%x, %x)\n",
1376 			    isp->isp_name, handle, r);
1377 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1378 			XS_CMD_C_WDOG(xs);
1379 			isp_done(xs);
1380 		} else {
1381 			u_int16_t iptr, optr;
1382 			ispreq_t *mp;
1383 
1384 			XS_CMD_C_WDOG(xs);
1385 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1386 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
1387 				ISP_UNLOCK(isp);
1388 				return;
1389 			}
1390 			XS_CMD_S_GRACE(xs);
1391 			MEMZERO((void *) mp, sizeof (*mp));
1392 			mp->req_header.rqs_entry_count = 1;
1393 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1394 			mp->req_modifier = SYNC_ALL;
1395 			mp->req_target = XS_CHANNEL(xs) << 7;
1396 			ISP_SWIZZLE_REQUEST(isp, mp);
1397 			ISP_ADD_REQUEST(isp, iptr);
1398 		}
1399 	} else {
1400 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1401 	}
1402 	ISP_UNLOCK(isp);
1403 }
1404 
1405 static void
1406 isp_action(struct cam_sim *sim, union ccb *ccb)
1407 {
1408 	int bus, tgt, error;
1409 	struct ispsoftc *isp;
1410 	struct ccb_trans_settings *cts;
1411 
1412 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1413 
1414 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1415 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1416 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1417 	if (isp->isp_state != ISP_RUNSTATE &&
1418 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1419 		ISP_LOCK(isp);
1420 		DISABLE_INTS(isp);
1421 		isp_init(isp);
1422 		if (isp->isp_state != ISP_INITSTATE) {
1423 			ISP_UNLOCK(isp);
1424 			/*
1425 			 * Lie. Say it was a selection timeout.
1426 			 */
1427 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1428 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1429 			xpt_done(ccb);
1430 			return;
1431 		}
1432 		isp->isp_state = ISP_RUNSTATE;
1433 		ENABLE_INTS(isp);
1434 		ISP_UNLOCK(isp);
1435 	}
1436 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1437 
1438 	switch (ccb->ccb_h.func_code) {
1439 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1440 		/*
1441 		 * Do a couple of preliminary checks...
1442 		 */
1443 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1444 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1445 				ccb->ccb_h.status = CAM_REQ_INVALID;
1446 				xpt_done(ccb);
1447 				break;
1448 			}
1449 		}
1450 #ifdef	DIAGNOSTIC
1451 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1452 			ccb->ccb_h.status = CAM_PATH_INVALID;
1453 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1454 			ccb->ccb_h.status = CAM_PATH_INVALID;
1455 		}
1456 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1457 			printf("%s: invalid tgt/lun (%d.%d) in XPT_SCSI_IO\n",
1458 			    isp->isp_name, ccb->ccb_h.target_id,
1459 			    ccb->ccb_h.target_lun);
1460 			xpt_done(ccb);
1461 			break;
1462 		}
1463 #endif
1464 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1465 		ISP_LOCK(isp);
1466 		error = isp_start((XS_T *) ccb);
1467 		ISP_UNLOCK(isp);
1468 		switch (error) {
1469 		case CMD_QUEUED:
1470 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1471 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1472 				int ticks;
1473 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1474 					ticks = 60 * 1000 * hz;
1475 				else
1476 					ticks = ccb->ccb_h.timeout * hz;
1477 				ticks = ((ticks + 999) / 1000) + hz + hz;
1478 				ccb->ccb_h.timeout_ch =
1479 				    timeout(isp_watchdog, (caddr_t)ccb, ticks);
1480 			} else {
1481 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1482 			}
1483 			break;
1484 		case CMD_RQLATER:
1485 			if (isp->isp_osinfo.simqfrozen == 0) {
1486 				isp_prt(isp, ISP_LOGDEBUG2,
1487 				    "RQLATER freeze simq");
1488 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED;
1489 				timeout(isp_relsim, isp, 500);
1490 				xpt_freeze_simq(sim, 1);
1491 			}
1492 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1493 			xpt_done(ccb);
1494 			break;
1495 		case CMD_EAGAIN:
1496 			if (isp->isp_osinfo.simqfrozen == 0) {
1497 				xpt_freeze_simq(sim, 1);
1498 				isp_prt(isp, ISP_LOGDEBUG2,
1499 				    "EAGAIN freeze simq");
1500 			}
1501 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1502 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1503 			xpt_done(ccb);
1504 			break;
1505 		case CMD_COMPLETE:
1506 			isp_done((struct ccb_scsiio *) ccb);
1507 			break;
1508 		default:
1509 			printf("%s: What's this? 0x%x at %d in file %s\n",
1510 			    isp->isp_name, error, __LINE__, __FILE__);
1511 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1512 			xpt_done(ccb);
1513 		}
1514 		break;
1515 
1516 #ifdef	ISP_TARGET_MODE
1517 	case XPT_EN_LUN:		/* Enable LUN as a target */
1518 		isp_en_lun(isp, ccb);
1519 		xpt_done(ccb);
1520 		break;
1521 
1522 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1523 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1524 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1525 	{
1526 		tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun);
1527 		if (tptr == NULL) {
1528 			ccb->ccb_h.status = CAM_LUN_INVALID;
1529 			xpt_done(ccb);
1530 			break;
1531 		}
1532 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1533 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1534 		ISP_LOCK(isp);
1535 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1536 #if	0
1537 			(void) isp_target_putback_atio(isp, ccb);
1538 #endif
1539 			SLIST_INSERT_HEAD(&tptr->atios,
1540 			    &ccb->ccb_h, sim_links.sle);
1541 		} else {
1542 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1543 			    sim_links.sle);
1544 		}
1545 		ISP_UNLOCK(isp);
1546 		rls_lun_statep(isp, tptr);
1547 		ccb->ccb_h.status = CAM_REQ_INPROG;
1548 		break;
1549 	}
1550 	case XPT_CONT_TARGET_IO:
1551 	{
1552 		ISP_LOCK(isp);
1553 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1554 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1555 			if (isp->isp_osinfo.simqfrozen == 0) {
1556 				xpt_freeze_simq(sim, 1);
1557 				xpt_print_path(ccb->ccb_h.path);
1558 				printf("XPT_CONT_TARGET_IO freeze simq\n");
1559 			}
1560 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1561 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1562 			xpt_done(ccb);
1563 		} else {
1564 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1565 		}
1566 		ISP_UNLOCK(isp);
1567 		break;
1568 	}
1569 #endif
1570 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
1571 
1572 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1573 		tgt = ccb->ccb_h.target_id;
1574 		tgt |= (bus << 16);
1575 
1576 		ISP_LOCK(isp);
1577 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
1578 		ISP_UNLOCK(isp);
1579 		if (error) {
1580 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1581 		} else {
1582 			ccb->ccb_h.status = CAM_REQ_CMP;
1583 		}
1584 		xpt_done(ccb);
1585 		break;
1586 	case XPT_ABORT:			/* Abort the specified CCB */
1587 	{
1588 		union ccb *accb = ccb->cab.abort_ccb;
1589 		switch (accb->ccb_h.func_code) {
1590 #ifdef	ISP_TARGET_MODE
1591 		case XPT_ACCEPT_TARGET_IO:
1592 		case XPT_IMMED_NOTIFY:
1593         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
1594 			break;
1595 		case XPT_CONT_TARGET_IO:
1596 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
1597 			ccb->ccb_h.status = CAM_UA_ABORT;
1598 			break;
1599 #endif
1600 		case XPT_SCSI_IO:
1601 			ISP_LOCK(isp);
1602 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
1603 			ISP_UNLOCK(isp);
1604 			if (error) {
1605 				ccb->ccb_h.status = CAM_UA_ABORT;
1606 			} else {
1607 				ccb->ccb_h.status = CAM_REQ_CMP;
1608 			}
1609 			break;
1610 		default:
1611 			ccb->ccb_h.status = CAM_REQ_INVALID;
1612 			break;
1613 		}
1614 		xpt_done(ccb);
1615 		break;
1616 	}
1617 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1618 
1619 		cts = &ccb->cts;
1620 		tgt = cts->ccb_h.target_id;
1621 		ISP_LOCK(isp);
1622 		if (IS_SCSI(isp)) {
1623 			sdparam *sdp = isp->isp_param;
1624 			u_int16_t *dptr;
1625 
1626 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1627 
1628 			sdp += bus;
1629 #if	0
1630 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS)
1631 				dptr = &sdp->isp_devparam[tgt].cur_dflags;
1632 			else
1633 				dptr = &sdp->isp_devparam[tgt].dev_flags;
1634 #else
1635 			/*
1636 			 * We always update (internally) from dev_flags
1637 			 * so any request to change settings just gets
1638 			 * vectored to that location.
1639 			 */
1640 			dptr = &sdp->isp_devparam[tgt].dev_flags;
1641 #endif
1642 
1643 			/*
1644 			 * Note that these operations affect the
1645 			 * the goal flags (dev_flags)- not
1646 			 * the current state flags. Then we mark
1647 			 * things so that the next operation to
1648 			 * this HBA will cause the update to occur.
1649 			 */
1650 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1651 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
1652 					*dptr |= DPARM_DISC;
1653 				} else {
1654 					*dptr &= ~DPARM_DISC;
1655 				}
1656 			}
1657 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1658 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
1659 					*dptr |= DPARM_TQING;
1660 				} else {
1661 					*dptr &= ~DPARM_TQING;
1662 				}
1663 			}
1664 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1665 				switch (cts->bus_width) {
1666 				case MSG_EXT_WDTR_BUS_16_BIT:
1667 					*dptr |= DPARM_WIDE;
1668 					break;
1669 				default:
1670 					*dptr &= ~DPARM_WIDE;
1671 				}
1672 			}
1673 			/*
1674 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
1675 			 * of nonzero will cause us to go to the
1676 			 * selected (from NVRAM) maximum value for
1677 			 * this device. At a later point, we'll
1678 			 * allow finer control.
1679 			 */
1680 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1681 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
1682 			    (cts->sync_offset > 0)) {
1683 				*dptr |= DPARM_SYNC;
1684 			} else {
1685 				*dptr &= ~DPARM_SYNC;
1686 			}
1687 			*dptr |= DPARM_SAFE_DFLT;
1688 			if (bootverbose || isp->isp_dblev >= 3)
1689 				printf("%s: %d.%d set %s period 0x%x offset "
1690 				    "0x%x flags 0x%x\n", isp->isp_name, bus,
1691 				    tgt,
1692 				    (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1693 				    "current" : "user",
1694 				    sdp->isp_devparam[tgt].sync_period,
1695 				    sdp->isp_devparam[tgt].sync_offset,
1696 				    sdp->isp_devparam[tgt].dev_flags);
1697 			sdp->isp_devparam[tgt].dev_update = 1;
1698 			isp->isp_update |= (1 << bus);
1699 		}
1700 		ISP_UNLOCK(isp);
1701 		ccb->ccb_h.status = CAM_REQ_CMP;
1702 		xpt_done(ccb);
1703 		break;
1704 
1705 	case XPT_GET_TRAN_SETTINGS:
1706 
1707 		cts = &ccb->cts;
1708 		tgt = cts->ccb_h.target_id;
1709 		if (IS_FC(isp)) {
1710 			/*
1711 			 * a lot of normal SCSI things don't make sense.
1712 			 */
1713 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1714 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1715 			/*
1716 			 * How do you measure the width of a high
1717 			 * speed serial bus? Well, in bytes.
1718 			 *
1719 			 * Offset and period make no sense, though, so we set
1720 			 * (above) a 'base' transfer speed to be gigabit.
1721 			 */
1722 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1723 		} else {
1724 			sdparam *sdp = isp->isp_param;
1725 			u_int16_t dval, pval, oval;
1726 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1727 
1728 			sdp += bus;
1729 			if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) {
1730 				ISP_LOCK(isp);
1731 				sdp->isp_devparam[tgt].dev_refresh = 1;
1732 				isp->isp_update |= (1 << bus);
1733 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
1734 				    NULL);
1735 				ISP_UNLOCK(isp);
1736 				dval = sdp->isp_devparam[tgt].cur_dflags;
1737 				oval = sdp->isp_devparam[tgt].cur_offset;
1738 				pval = sdp->isp_devparam[tgt].cur_period;
1739 			} else {
1740 				dval = sdp->isp_devparam[tgt].dev_flags;
1741 				oval = sdp->isp_devparam[tgt].sync_offset;
1742 				pval = sdp->isp_devparam[tgt].sync_period;
1743 			}
1744 
1745 			ISP_LOCK(isp);
1746 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1747 
1748 			if (dval & DPARM_DISC) {
1749 				cts->flags |= CCB_TRANS_DISC_ENB;
1750 			}
1751 			if (dval & DPARM_TQING) {
1752 				cts->flags |= CCB_TRANS_TAG_ENB;
1753 			}
1754 			if (dval & DPARM_WIDE) {
1755 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1756 			} else {
1757 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1758 			}
1759 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1760 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1761 
1762 			if ((dval & DPARM_SYNC) && oval != 0) {
1763 				cts->sync_period = pval;
1764 				cts->sync_offset = oval;
1765 				cts->valid |=
1766 				    CCB_TRANS_SYNC_RATE_VALID |
1767 				    CCB_TRANS_SYNC_OFFSET_VALID;
1768 			}
1769 			ISP_UNLOCK(isp);
1770 			if (bootverbose || isp->isp_dblev >= 3)
1771 				printf("%s: %d.%d get %s period 0x%x offset "
1772 				    "0x%x flags 0x%x\n", isp->isp_name, bus,
1773 				    tgt,
1774 			    	    (cts->flags & CCB_TRANS_CURRENT_SETTINGS)?
1775 				    "current" : "user", pval, oval, dval);
1776 		}
1777 		ccb->ccb_h.status = CAM_REQ_CMP;
1778 		xpt_done(ccb);
1779 		break;
1780 
1781 	case XPT_CALC_GEOMETRY:
1782 	{
1783 		struct ccb_calc_geometry *ccg;
1784 		u_int32_t secs_per_cylinder;
1785 		u_int32_t size_mb;
1786 
1787 		ccg = &ccb->ccg;
1788 		if (ccg->block_size == 0) {
1789 			printf("%s: %d.%d XPT_CALC_GEOMETRY block size 0?\n",
1790 				isp->isp_name, ccg->ccb_h.target_id,
1791 				ccg->ccb_h.target_lun);
1792 			ccb->ccb_h.status = CAM_REQ_INVALID;
1793 			xpt_done(ccb);
1794 			break;
1795 		}
1796 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
1797 		if (size_mb > 1024) {
1798 			ccg->heads = 255;
1799 			ccg->secs_per_track = 63;
1800 		} else {
1801 			ccg->heads = 64;
1802 			ccg->secs_per_track = 32;
1803 		}
1804 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1805 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1806 		ccb->ccb_h.status = CAM_REQ_CMP;
1807 		xpt_done(ccb);
1808 		break;
1809 	}
1810 	case XPT_RESET_BUS:		/* Reset the specified bus */
1811 		bus = cam_sim_bus(sim);
1812 		ISP_LOCK(isp);
1813 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
1814 		ISP_UNLOCK(isp);
1815 		if (error)
1816 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1817 		else {
1818 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
1819 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
1820 			else if (isp->isp_path != NULL)
1821 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
1822 			ccb->ccb_h.status = CAM_REQ_CMP;
1823 		}
1824 		xpt_done(ccb);
1825 		break;
1826 
1827 	case XPT_TERM_IO:		/* Terminate the I/O process */
1828 		ccb->ccb_h.status = CAM_REQ_INVALID;
1829 		xpt_done(ccb);
1830 		break;
1831 
1832 	case XPT_PATH_INQ:		/* Path routing inquiry */
1833 	{
1834 		struct ccb_pathinq *cpi = &ccb->cpi;
1835 
1836 		cpi->version_num = 1;
1837 #ifdef	ISP_TARGET_MODE
1838 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
1839 #else
1840 		cpi->target_sprt = 0;
1841 #endif
1842 		cpi->hba_eng_cnt = 0;
1843 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
1844 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
1845 		cpi->bus_id = cam_sim_bus(sim);
1846 		if (IS_FC(isp)) {
1847 			cpi->hba_misc = PIM_NOBUSRESET;
1848 			/*
1849 			 * Because our loop ID can shift from time to time,
1850 			 * make our initiator ID out of range of our bus.
1851 			 */
1852 			cpi->initiator_id = cpi->max_target + 1;
1853 
1854 			/*
1855 			 * Set base transfer capabilities for Fibre Channel.
1856 			 * Technically not correct because we don't know
1857 			 * what media we're running on top of- but we'll
1858 			 * look good if we always say 100MB/s.
1859 			 */
1860 			cpi->base_transfer_speed = 100000;
1861 			cpi->hba_inquiry = PI_TAG_ABLE;
1862 		} else {
1863 			sdparam *sdp = isp->isp_param;
1864 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
1865 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1866 			cpi->hba_misc = 0;
1867 			cpi->initiator_id = sdp->isp_initiator_id;
1868 			cpi->base_transfer_speed = 3300;
1869 		}
1870 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1871 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
1872 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1873 		cpi->unit_number = cam_sim_unit(sim);
1874 		cpi->ccb_h.status = CAM_REQ_CMP;
1875 		xpt_done(ccb);
1876 		break;
1877 	}
1878 	default:
1879 		ccb->ccb_h.status = CAM_REQ_INVALID;
1880 		xpt_done(ccb);
1881 		break;
1882 	}
1883 }
1884 
1885 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
1886 void
1887 isp_done(struct ccb_scsiio *sccb)
1888 {
1889 	struct ispsoftc *isp = XS_ISP(sccb);
1890 
1891 	if (XS_NOERR(sccb))
1892 		XS_SETERR(sccb, CAM_REQ_CMP);
1893 
1894 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
1895 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
1896 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
1897 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
1898 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
1899 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1900 		} else {
1901 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1902 		}
1903 	}
1904 
1905 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1906 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1907 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1908 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
1909 			xpt_freeze_devq(sccb->ccb_h.path, 1);
1910 			if (sccb->scsi_status != SCSI_STATUS_OK)
1911 				isp_prt(isp, ISP_LOGDEBUG2,
1912 				    "freeze devq %d.%d %x %x",
1913 				    sccb->ccb_h.target_id,
1914 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
1915 				    sccb->scsi_status);
1916 		}
1917 	}
1918 
1919 	/*
1920 	 * If we were frozen waiting resources, clear that we were frozen
1921 	 * waiting for resources. If we are no longer frozen, and the devq
1922 	 * isn't frozen, mark the completing CCB to have the XPT layer
1923 	 * release the simq.
1924 	 */
1925 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1926 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1927 		if (isp->isp_osinfo.simqfrozen == 0) {
1928 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1929 				isp_prt(isp, ISP_LOGDEBUG2,
1930 				    "isp_done->relsimq");
1931 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1932 			} else {
1933 				isp_prt(isp, ISP_LOGDEBUG2,
1934 				    "isp_done->devq frozen");
1935 			}
1936 		} else {
1937 			isp_prt(isp, ISP_LOGDEBUG2,
1938 			    "isp_done -> simqfrozen = %x",
1939 			    isp->isp_osinfo.simqfrozen);
1940 		}
1941 	}
1942 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
1943 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1944 		xpt_print_path(sccb->ccb_h.path);
1945 		printf("cam completion status 0x%x\n", sccb->ccb_h.status);
1946 	}
1947 
1948 	XS_CMD_S_DONE(sccb);
1949 	if (XS_CMD_WDOG_P(sccb) == 0) {
1950 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
1951 		if (XS_CMD_GRACE_P(sccb)) {
1952 			isp_prt(isp, ISP_LOGDEBUG2,
1953 			    "finished command on borrowed time");
1954 		}
1955 		XS_CMD_S_CLEAR(sccb);
1956 		xpt_done((union ccb *) sccb);
1957 	}
1958 }
1959 
1960 int
1961 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
1962 {
1963 	int bus, rv = 0;
1964 	switch (cmd) {
1965 	case ISPASYNC_NEW_TGT_PARAMS:
1966 	{
1967 		int flags, tgt;
1968 		sdparam *sdp = isp->isp_param;
1969 		struct ccb_trans_settings neg;
1970 		struct cam_path *tmppath;
1971 
1972 		tgt = *((int *)arg);
1973 		bus = (tgt >> 16) & 0xffff;
1974 		tgt &= 0xffff;
1975 		sdp += bus;
1976 		if (xpt_create_path(&tmppath, NULL,
1977 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
1978 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1979 			xpt_print_path(isp->isp_path);
1980 			printf("isp_async cannot make temp path for "
1981 			    "target %d bus %d\n", tgt, bus);
1982 			rv = -1;
1983 			break;
1984 		}
1985 		flags = sdp->isp_devparam[tgt].cur_dflags;
1986 		neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1987 		if (flags & DPARM_DISC) {
1988 			neg.flags |= CCB_TRANS_DISC_ENB;
1989 		}
1990 		if (flags & DPARM_TQING) {
1991 			neg.flags |= CCB_TRANS_TAG_ENB;
1992 		}
1993 		neg.valid |= CCB_TRANS_BUS_WIDTH_VALID;
1994 		neg.bus_width = (flags & DPARM_WIDE)?
1995 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
1996 		neg.sync_period = sdp->isp_devparam[tgt].cur_period;
1997 		neg.sync_offset = sdp->isp_devparam[tgt].cur_offset;
1998 		if (flags & DPARM_SYNC) {
1999 			neg.valid |=
2000 			    CCB_TRANS_SYNC_RATE_VALID |
2001 			    CCB_TRANS_SYNC_OFFSET_VALID;
2002 		}
2003 		isp_prt(isp, ISP_LOGDEBUG2,
2004 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2005 		    bus, tgt, neg.sync_period, neg.sync_offset, flags);
2006 		xpt_setup_ccb(&neg.ccb_h, tmppath, 1);
2007 		xpt_async(AC_TRANSFER_NEG, tmppath, &neg);
2008 		xpt_free_path(tmppath);
2009 		break;
2010 	}
2011 	case ISPASYNC_BUS_RESET:
2012 		bus = *((int *)arg);
2013 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2014 		    bus);
2015 		if (bus > 0 && isp->isp_path2) {
2016 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2017 		} else if (isp->isp_path) {
2018 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2019 		}
2020 		break;
2021 	case ISPASYNC_LOOP_DOWN:
2022 		if (isp->isp_path) {
2023 			if (isp->isp_osinfo.simqfrozen == 0) {
2024 				isp_prt(isp, ISP_LOGDEBUG2,
2025 				    "loop down freeze simq");
2026 				xpt_freeze_simq(isp->isp_sim, 1);
2027 			}
2028 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2029 		}
2030 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2031 		break;
2032 	case ISPASYNC_LOOP_UP:
2033 		if (isp->isp_path) {
2034 			int wasfrozen =
2035 			    isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2036 			isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2037 			if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2038 				xpt_release_simq(isp->isp_sim, 1);
2039 				isp_prt(isp, ISP_LOGDEBUG2,
2040 				    "loop up release simq");
2041 			}
2042 		}
2043 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2044 		break;
2045 	case ISPASYNC_PDB_CHANGED:
2046 	{
2047 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2048 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2049 		const static char *roles[4] = {
2050 		    "(none)", "Target", "Initiator", "Target/Initiator"
2051 		};
2052 		char *ptr;
2053 		fcparam *fcp = isp->isp_param;
2054 		int tgt = *((int *) arg);
2055 		struct lportdb *lp = &fcp->portdb[tgt];
2056 
2057 		if (lp->valid) {
2058 			ptr = "arrived";
2059 		} else {
2060 			ptr = "disappeared";
2061 		}
2062 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2063 		    roles[lp->roles & 0x3], ptr,
2064 		    (u_int32_t) (lp->port_wwn >> 32),
2065 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2066 		    (u_int32_t) (lp->node_wwn >> 32),
2067 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2068 		break;
2069 	}
2070 	case ISPASYNC_CHANGE_NOTIFY:
2071 		isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
2072 		break;
2073 #ifdef	ISP2100_FABRIC
2074 	case ISPASYNC_FABRIC_DEV:
2075 	{
2076 		int target;
2077 		struct lportdb *lp;
2078 		char *pt;
2079 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2080 		u_int32_t portid;
2081 		u_int64_t wwpn, wwnn;
2082 		fcparam *fcp = isp->isp_param;
2083 
2084 		rv = -1;
2085 
2086 		portid =
2087 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2088 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2089 		    (((u_int32_t) resp->snscb_port_id[2]));
2090 
2091 		wwpn =
2092 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2093 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2094 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2095 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2096 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2097 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2098 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2099 		    (((u_int64_t)resp->snscb_portname[7]));
2100 
2101 		wwnn =
2102 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2103 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2104 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2105 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2106 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2107 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2108 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2109 		    (((u_int64_t)resp->snscb_nodename[7]));
2110 		if (portid == 0 || wwpn == 0) {
2111 			rv = 0;
2112 			break;
2113 		}
2114 
2115 		switch (resp->snscb_port_type) {
2116 		case 1:
2117 			pt = "   N_Port";
2118 			break;
2119 		case 2:
2120 			pt = "  NL_Port";
2121 			break;
2122 		case 3:
2123 			pt = "F/NL_Port";
2124 			break;
2125 		case 0x7f:
2126 			pt = "  Nx_Port";
2127 			break;
2128 		case 0x81:
2129 			pt = "  F_port";
2130 			break;
2131 		case 0x82:
2132 			pt = "  FL_Port";
2133 			break;
2134 		case 0x84:
2135 			pt = "   E_port";
2136 			break;
2137 		default:
2138 			pt = "?";
2139 			break;
2140 		}
2141 		isp_prt(isp, ISP_LOGINFO,
2142 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2143 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2144 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2145 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
2146 			lp = &fcp->portdb[target];
2147 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn)
2148 				break;
2149 		}
2150 		if (target < MAX_FC_TARG) {
2151 			rv = 0;
2152 			break;
2153 		}
2154 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
2155 			lp = &fcp->portdb[target];
2156 			if (lp->port_wwn == 0)
2157 				break;
2158 		}
2159 		if (target == MAX_FC_TARG) {
2160 			printf("%s: no more space for fabric devices\n",
2161 			    isp->isp_name);
2162 			break;
2163 		}
2164 		lp->node_wwn = wwnn;
2165 		lp->port_wwn = wwpn;
2166 		lp->portid = portid;
2167 		rv = 0;
2168 		break;
2169 	}
2170 #endif
2171 #ifdef	ISP_TARGET_MODE
2172 	case ISPASYNC_TARGET_MESSAGE:
2173 	{
2174 		tmd_msg_t *mp = arg;
2175 		isp_prt(isp, ISP_LOGDEBUG2,
2176 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2177 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2178 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2179 		    mp->nt_msg[0]);
2180 		break;
2181 	}
2182 	case ISPASYNC_TARGET_EVENT:
2183 	{
2184 		tmd_event_t *ep = arg;
2185 		isp_prt(isp, ISP_LOGDEBUG2,
2186 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2187 		break;
2188 	}
2189 	case ISPASYNC_TARGET_ACTION:
2190 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2191 		default:
2192 			printf("%s: event 0x%x for unhandled target action\n",
2193 			    isp->isp_name, ((isphdr_t *)arg)->rqs_entry_type);
2194 			break;
2195 		case RQSTYPE_ATIO:
2196 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2197 			break;
2198 		case RQSTYPE_ATIO2:
2199 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2200 			break;
2201 		case RQSTYPE_CTIO2:
2202 		case RQSTYPE_CTIO:
2203 			rv = isp_handle_platform_ctio(isp, arg);
2204 			break;
2205 		case RQSTYPE_ENABLE_LUN:
2206 		case RQSTYPE_MODIFY_LUN:
2207 			isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status);
2208 			break;
2209 		}
2210 		break;
2211 #endif
2212 	default:
2213 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2214 		rv = -1;
2215 		break;
2216 	}
2217 	return (rv);
2218 }
2219 
2220 
2221 /*
2222  * Locks are held before coming here.
2223  */
2224 void
2225 isp_uninit(struct ispsoftc *isp)
2226 {
2227 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2228 	DISABLE_INTS(isp);
2229 }
2230 
2231 void
2232 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2233 {
2234 	va_list ap;
2235 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2236 		return;
2237 	}
2238 	printf("%s: ", isp->isp_name);
2239 	va_start(ap, fmt);
2240 	vprintf(fmt, ap);
2241 	va_end(ap);
2242 	printf("\n");
2243 }
2244