xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision b9f78d2b4a8b3f55b6e04cfcc94105dd896d6f5c)
1 /*
2  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
3  *
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice immediately at the beginning of the file, without modification,
11  *    this list of conditions, and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <dev/isp/isp_freebsd.h>
32 #include <sys/unistd.h>
33 #include <sys/kthread.h>
34 #include <machine/stdarg.h>	/* for use by isp_prt below */
35 #include <sys/conf.h>
36 #include <sys/module.h>
37 #include <sys/ioccom.h>
38 #include <dev/isp/isp_ioctl.h>
39 
40 
41 MODULE_VERSION(isp, 1);
42 int isp_announced = 0;
43 ispfwfunc *isp_get_firmware_p = NULL;
44 
45 static d_ioctl_t ispioctl;
46 static void isp_intr_enable(void *);
47 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
48 static void isp_poll(struct cam_sim *);
49 static timeout_t isp_watchdog;
50 static void isp_kthread(void *);
51 static void isp_action(struct cam_sim *, union ccb *);
52 
53 
54 #define ISP_CDEV_MAJOR	248
55 static struct cdevsw isp_cdevsw = {
56 	.d_open =	nullopen,
57 	.d_close =	nullclose,
58 	.d_ioctl =	ispioctl,
59 	.d_name =	"isp",
60 	.d_maj =	ISP_CDEV_MAJOR,
61 	.d_flags =	D_TAPE,
62 };
63 
64 static struct ispsoftc *isplist = NULL;
65 
66 void
67 isp_attach(struct ispsoftc *isp)
68 {
69 	int primary, secondary;
70 	struct ccb_setasync csa;
71 	struct cam_devq *devq;
72 	struct cam_sim *sim;
73 	struct cam_path *path;
74 
75 	/*
76 	 * Establish (in case of 12X0) which bus is the primary.
77 	 */
78 
79 	primary = 0;
80 	secondary = 1;
81 
82 	/*
83 	 * Create the device queue for our SIM(s).
84 	 */
85 	devq = cam_simq_alloc(isp->isp_maxcmds);
86 	if (devq == NULL) {
87 		return;
88 	}
89 
90 	/*
91 	 * Construct our SIM entry.
92 	 */
93 	ISPLOCK_2_CAMLOCK(isp);
94 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
95 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
96 	if (sim == NULL) {
97 		cam_simq_free(devq);
98 		CAMLOCK_2_ISPLOCK(isp);
99 		return;
100 	}
101 	CAMLOCK_2_ISPLOCK(isp);
102 
103 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
104 	isp->isp_osinfo.ehook.ich_arg = isp;
105 	ISPLOCK_2_CAMLOCK(isp);
106 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
107 		cam_sim_free(sim, TRUE);
108 		CAMLOCK_2_ISPLOCK(isp);
109 		isp_prt(isp, ISP_LOGERR,
110 		    "could not establish interrupt enable hook");
111 		return;
112 	}
113 
114 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
115 		cam_sim_free(sim, TRUE);
116 		CAMLOCK_2_ISPLOCK(isp);
117 		return;
118 	}
119 
120 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
121 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
122 		xpt_bus_deregister(cam_sim_path(sim));
123 		cam_sim_free(sim, TRUE);
124 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
125 		CAMLOCK_2_ISPLOCK(isp);
126 		return;
127 	}
128 
129 	xpt_setup_ccb(&csa.ccb_h, path, 5);
130 	csa.ccb_h.func_code = XPT_SASYNC_CB;
131 	csa.event_enable = AC_LOST_DEVICE;
132 	csa.callback = isp_cam_async;
133 	csa.callback_arg = sim;
134 	xpt_action((union ccb *)&csa);
135 	CAMLOCK_2_ISPLOCK(isp);
136 	isp->isp_sim = sim;
137 	isp->isp_path = path;
138 	/*
139 	 * Create a kernel thread for fibre channel instances. We
140 	 * don't have dual channel FC cards.
141 	 */
142 	if (IS_FC(isp)) {
143 		ISPLOCK_2_CAMLOCK(isp);
144 		/* XXX: LOCK VIOLATION */
145 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
146 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
147 		    RFHIGHPID, 0, "%s: fc_thrd",
148 		    device_get_nameunit(isp->isp_dev))) {
149 			xpt_bus_deregister(cam_sim_path(sim));
150 			cam_sim_free(sim, TRUE);
151 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
152 			CAMLOCK_2_ISPLOCK(isp);
153 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
154 			return;
155 		}
156 		CAMLOCK_2_ISPLOCK(isp);
157 	}
158 
159 
160 	/*
161 	 * If we have a second channel, construct SIM entry for that.
162 	 */
163 	if (IS_DUALBUS(isp)) {
164 		ISPLOCK_2_CAMLOCK(isp);
165 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
166 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
167 		if (sim == NULL) {
168 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
169 			xpt_free_path(isp->isp_path);
170 			cam_simq_free(devq);
171 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
172 			return;
173 		}
174 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
175 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
176 			xpt_free_path(isp->isp_path);
177 			cam_sim_free(sim, TRUE);
178 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
179 			CAMLOCK_2_ISPLOCK(isp);
180 			return;
181 		}
182 
183 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
184 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
185 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
186 			xpt_free_path(isp->isp_path);
187 			xpt_bus_deregister(cam_sim_path(sim));
188 			cam_sim_free(sim, TRUE);
189 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
190 			CAMLOCK_2_ISPLOCK(isp);
191 			return;
192 		}
193 
194 		xpt_setup_ccb(&csa.ccb_h, path, 5);
195 		csa.ccb_h.func_code = XPT_SASYNC_CB;
196 		csa.event_enable = AC_LOST_DEVICE;
197 		csa.callback = isp_cam_async;
198 		csa.callback_arg = sim;
199 		xpt_action((union ccb *)&csa);
200 		CAMLOCK_2_ISPLOCK(isp);
201 		isp->isp_sim2 = sim;
202 		isp->isp_path2 = path;
203 	}
204 
205 #ifdef	ISP_TARGET_MODE
206 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
207 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
208 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
209 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
210 #endif
211 	/*
212 	 * Create device nodes
213 	 */
214 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
215 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
216 
217 	if (isp->isp_role != ISP_ROLE_NONE) {
218 		isp->isp_state = ISP_RUNSTATE;
219 		ENABLE_INTS(isp);
220 	}
221 	if (isplist == NULL) {
222 		isplist = isp;
223 	} else {
224 		struct ispsoftc *tmp = isplist;
225 		while (tmp->isp_osinfo.next) {
226 			tmp = tmp->isp_osinfo.next;
227 		}
228 		tmp->isp_osinfo.next = isp;
229 	}
230 
231 }
232 
233 static INLINE void
234 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
235 {
236 	if (isp->isp_osinfo.simqfrozen == 0) {
237 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
238 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
239 		ISPLOCK_2_CAMLOCK(isp);
240 		xpt_freeze_simq(isp->isp_sim, 1);
241 		CAMLOCK_2_ISPLOCK(isp);
242 	} else {
243 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
244 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
245 	}
246 }
247 
248 static int
249 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
250 {
251 	struct ispsoftc *isp;
252 	int retval = ENOTTY;
253 
254 	isp = isplist;
255 	while (isp) {
256 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
257 			break;
258 		}
259 		isp = isp->isp_osinfo.next;
260 	}
261 	if (isp == NULL)
262 		return (ENXIO);
263 
264 	switch (cmd) {
265 #ifdef	ISP_FW_CRASH_DUMP
266 	case ISP_GET_FW_CRASH_DUMP:
267 	{
268 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
269 		size_t sz;
270 
271 		retval = 0;
272 		if (IS_2200(isp))
273 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
274 		else
275 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
276 		ISP_LOCK(isp);
277 		if (ptr && *ptr) {
278 			void *uaddr = *((void **) addr);
279 			if (copyout(ptr, uaddr, sz)) {
280 				retval = EFAULT;
281 			} else {
282 				*ptr = 0;
283 			}
284 		} else {
285 			retval = ENXIO;
286 		}
287 		ISP_UNLOCK(isp);
288 		break;
289 	}
290 
291 	case ISP_FORCE_CRASH_DUMP:
292 		ISP_LOCK(isp);
293 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
294 		isp_fw_dump(isp);
295 		isp_reinit(isp);
296 		ISP_UNLOCK(isp);
297 		retval = 0;
298 		break;
299 #endif
300 	case ISP_SDBLEV:
301 	{
302 		int olddblev = isp->isp_dblev;
303 		isp->isp_dblev = *(int *)addr;
304 		*(int *)addr = olddblev;
305 		retval = 0;
306 		break;
307 	}
308 	case ISP_RESETHBA:
309 		ISP_LOCK(isp);
310 		isp_reinit(isp);
311 		ISP_UNLOCK(isp);
312 		retval = 0;
313 		break;
314 	case ISP_RESCAN:
315 		if (IS_FC(isp)) {
316 			ISP_LOCK(isp);
317 			if (isp_fc_runstate(isp, 5 * 1000000)) {
318 				retval = EIO;
319 			} else {
320 				retval = 0;
321 			}
322 			ISP_UNLOCK(isp);
323 		}
324 		break;
325 	case ISP_FC_LIP:
326 		if (IS_FC(isp)) {
327 			ISP_LOCK(isp);
328 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
329 				retval = EIO;
330 			} else {
331 				retval = 0;
332 			}
333 			ISP_UNLOCK(isp);
334 		}
335 		break;
336 	case ISP_FC_GETDINFO:
337 	{
338 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
339 		struct lportdb *lp;
340 
341 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
342 			retval = EINVAL;
343 			break;
344 		}
345 		ISP_LOCK(isp);
346 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
347 		if (lp->valid) {
348 			ifc->loopid = lp->loopid;
349 			ifc->portid = lp->portid;
350 			ifc->node_wwn = lp->node_wwn;
351 			ifc->port_wwn = lp->port_wwn;
352 			retval = 0;
353 		} else {
354 			retval = ENODEV;
355 		}
356 		ISP_UNLOCK(isp);
357 		break;
358 	}
359 	case ISP_GET_STATS:
360 	{
361 		isp_stats_t *sp = (isp_stats_t *) addr;
362 
363 		MEMZERO(sp, sizeof (*sp));
364 		sp->isp_stat_version = ISP_STATS_VERSION;
365 		sp->isp_type = isp->isp_type;
366 		sp->isp_revision = isp->isp_revision;
367 		ISP_LOCK(isp);
368 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
369 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
370 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
371 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
372 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
373 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
374 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
375 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
376 		ISP_UNLOCK(isp);
377 		retval = 0;
378 		break;
379 	}
380 	case ISP_CLR_STATS:
381 		ISP_LOCK(isp);
382 		isp->isp_intcnt = 0;
383 		isp->isp_intbogus = 0;
384 		isp->isp_intmboxc = 0;
385 		isp->isp_intoasync = 0;
386 		isp->isp_rsltccmplt = 0;
387 		isp->isp_fphccmplt = 0;
388 		isp->isp_rscchiwater = 0;
389 		isp->isp_fpcchiwater = 0;
390 		ISP_UNLOCK(isp);
391 		retval = 0;
392 		break;
393 	case ISP_FC_GETHINFO:
394 	{
395 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
396 		MEMZERO(hba, sizeof (*hba));
397 		ISP_LOCK(isp);
398 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
399 		hba->fc_scsi_supported = 1;
400 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
401 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
402 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
403 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
404 		ISP_UNLOCK(isp);
405 		retval = 0;
406 		break;
407 	}
408 	case ISP_GET_FC_PARAM:
409 	{
410 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
411 
412 		if (!IS_FC(isp)) {
413 			retval = EINVAL;
414 			break;
415 		}
416 		f->parameter = 0;
417 		if (strcmp(f->param_name, "framelength") == 0) {
418 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
419 			retval = 0;
420 			break;
421 		}
422 		if (strcmp(f->param_name, "exec_throttle") == 0) {
423 			f->parameter = FCPARAM(isp)->isp_execthrottle;
424 			retval = 0;
425 			break;
426 		}
427 		if (strcmp(f->param_name, "fullduplex") == 0) {
428 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
429 				f->parameter = 1;
430 			retval = 0;
431 			break;
432 		}
433 		if (strcmp(f->param_name, "loopid") == 0) {
434 			f->parameter = FCPARAM(isp)->isp_loopid;
435 			retval = 0;
436 			break;
437 		}
438 		retval = EINVAL;
439 		break;
440 	}
441 	case ISP_SET_FC_PARAM:
442 	{
443 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
444 		u_int32_t param = f->parameter;
445 
446 		if (!IS_FC(isp)) {
447 			retval = EINVAL;
448 			break;
449 		}
450 		f->parameter = 0;
451 		if (strcmp(f->param_name, "framelength") == 0) {
452 			if (param != 512 && param != 1024 && param != 1024) {
453 				retval = EINVAL;
454 				break;
455 			}
456 			FCPARAM(isp)->isp_maxfrmlen = param;
457 			retval = 0;
458 			break;
459 		}
460 		if (strcmp(f->param_name, "exec_throttle") == 0) {
461 			if (param < 16 || param > 255) {
462 				retval = EINVAL;
463 				break;
464 			}
465 			FCPARAM(isp)->isp_execthrottle = param;
466 			retval = 0;
467 			break;
468 		}
469 		if (strcmp(f->param_name, "fullduplex") == 0) {
470 			if (param != 0 && param != 1) {
471 				retval = EINVAL;
472 				break;
473 			}
474 			if (param) {
475 				FCPARAM(isp)->isp_fwoptions |=
476 				    ICBOPT_FULL_DUPLEX;
477 			} else {
478 				FCPARAM(isp)->isp_fwoptions &=
479 				    ~ICBOPT_FULL_DUPLEX;
480 			}
481 			retval = 0;
482 			break;
483 		}
484 		if (strcmp(f->param_name, "loopid") == 0) {
485 			if (param < 0 || param > 125) {
486 				retval = EINVAL;
487 				break;
488 			}
489 			FCPARAM(isp)->isp_loopid = param;
490 			retval = 0;
491 			break;
492 		}
493 		retval = EINVAL;
494 		break;
495 	}
496 	default:
497 		break;
498 	}
499 	return (retval);
500 }
501 
502 static void
503 isp_intr_enable(void *arg)
504 {
505 	struct ispsoftc *isp = arg;
506 	if (isp->isp_role != ISP_ROLE_NONE) {
507 		ENABLE_INTS(isp);
508 		isp->isp_osinfo.intsok = 1;
509 	}
510 	/* Release our hook so that the boot can continue. */
511 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
512 }
513 
514 /*
515  * Put the target mode functions here, because some are inlines
516  */
517 
518 #ifdef	ISP_TARGET_MODE
519 
520 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
521 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
522 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
523 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
524 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
525 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
526 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
527 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
528 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
529 static cam_status
530 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
531 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
532 static void isp_en_lun(struct ispsoftc *, union ccb *);
533 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
534 static timeout_t isp_refire_putback_atio;
535 static void isp_complete_ctio(union ccb *);
536 static void isp_target_putback_atio(union ccb *);
537 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
538 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
539 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
540 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
541 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
542 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
543 
544 static INLINE int
545 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
546 {
547 	tstate_t *tptr;
548 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
549 	if (tptr == NULL) {
550 		return (0);
551 	}
552 	do {
553 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
554 			return (1);
555 		}
556 	} while ((tptr = tptr->next) != NULL);
557 	return (0);
558 }
559 
560 static INLINE int
561 are_any_luns_enabled(struct ispsoftc *isp, int port)
562 {
563 	int lo, hi;
564 	if (IS_DUALBUS(isp)) {
565 		lo = (port * (LUN_HASH_SIZE >> 1));
566 		hi = lo + (LUN_HASH_SIZE >> 1);
567 	} else {
568 		lo = 0;
569 		hi = LUN_HASH_SIZE;
570 	}
571 	for (lo = 0; lo < hi; lo++) {
572 		if (isp->isp_osinfo.lun_hash[lo]) {
573 			return (1);
574 		}
575 	}
576 	return (0);
577 }
578 
579 static INLINE tstate_t *
580 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
581 {
582 	tstate_t *tptr = NULL;
583 
584 	if (lun == CAM_LUN_WILDCARD) {
585 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
586 			tptr = &isp->isp_osinfo.tsdflt[bus];
587 			tptr->hold++;
588 			return (tptr);
589 		}
590 	} else {
591 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
592 		if (tptr == NULL) {
593 			return (NULL);
594 		}
595 	}
596 
597 	do {
598 		if (tptr->lun == lun && tptr->bus == bus) {
599 			tptr->hold++;
600 			return (tptr);
601 		}
602 	} while ((tptr = tptr->next) != NULL);
603 	return (tptr);
604 }
605 
606 static INLINE void
607 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
608 {
609 	if (tptr->hold)
610 		tptr->hold--;
611 }
612 
613 static INLINE int
614 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
615 {
616 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
617 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
618 #ifdef	ISP_SMPLOCK
619 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
620 			return (-1);
621 		}
622 #else
623 		if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) {
624 			return (-1);
625 		}
626 #endif
627 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
628 	}
629 	return (0);
630 }
631 
632 static INLINE int
633 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
634 {
635 #ifdef	ISP_SMPLOCK
636 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
637 		return (-1);
638 	}
639 #else
640 	if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) {
641 		return (-1);
642 	}
643 #endif
644 	return (0);
645 }
646 
647 static INLINE void
648 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
649 {
650 	isp->isp_osinfo.rstatus[bus] = status;
651 #ifdef	ISP_SMPLOCK
652 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
653 #else
654 	wakeup(&isp->isp_osinfo.tgtcv1[bus]);
655 #endif
656 }
657 
658 static INLINE void
659 isp_vsema_rqe(struct ispsoftc *isp, int bus)
660 {
661 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
662 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
663 #ifdef	ISP_SMPLOCK
664 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
665 #else
666 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
667 #endif
668 	}
669 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
670 }
671 
672 static INLINE atio_private_data_t *
673 isp_get_atpd(struct ispsoftc *isp, int tag)
674 {
675 	atio_private_data_t *atp;
676 	for (atp = isp->isp_osinfo.atpdp;
677 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
678 		if (atp->tag == tag)
679 			return (atp);
680 	}
681 	return (NULL);
682 }
683 
684 static cam_status
685 create_lun_state(struct ispsoftc *isp, int bus,
686     struct cam_path *path, tstate_t **rslt)
687 {
688 	cam_status status;
689 	lun_id_t lun;
690 	int hfx;
691 	tstate_t *tptr, *new;
692 
693 	lun = xpt_path_lun_id(path);
694 	if (lun < 0) {
695 		return (CAM_LUN_INVALID);
696 	}
697 	if (is_lun_enabled(isp, bus, lun)) {
698 		return (CAM_LUN_ALRDY_ENA);
699 	}
700 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
701 	if (new == NULL) {
702 		return (CAM_RESRC_UNAVAIL);
703 	}
704 
705 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
706 	    xpt_path_target_id(path), xpt_path_lun_id(path));
707 	if (status != CAM_REQ_CMP) {
708 		free(new, M_DEVBUF);
709 		return (status);
710 	}
711 	new->bus = bus;
712 	new->lun = lun;
713 	SLIST_INIT(&new->atios);
714 	SLIST_INIT(&new->inots);
715 	new->hold = 1;
716 
717 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
718 	tptr = isp->isp_osinfo.lun_hash[hfx];
719 	if (tptr == NULL) {
720 		isp->isp_osinfo.lun_hash[hfx] = new;
721 	} else {
722 		while (tptr->next)
723 			tptr = tptr->next;
724 		tptr->next = new;
725 	}
726 	*rslt = new;
727 	return (CAM_REQ_CMP);
728 }
729 
730 static INLINE void
731 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
732 {
733 	int hfx;
734 	tstate_t *lw, *pw;
735 
736 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
737 	if (tptr->hold) {
738 		return;
739 	}
740 	pw = isp->isp_osinfo.lun_hash[hfx];
741 	if (pw == NULL) {
742 		return;
743 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
744 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
745 	} else {
746 		lw = pw;
747 		pw = lw->next;
748 		while (pw) {
749 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
750 				lw->next = pw->next;
751 				break;
752 			}
753 			lw = pw;
754 			pw = pw->next;
755 		}
756 		if (pw == NULL) {
757 			return;
758 		}
759 	}
760 	free(tptr, M_DEVBUF);
761 }
762 
763 /*
764  * we enter with our locks held.
765  */
766 static void
767 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
768 {
769 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
770 	struct ccb_en_lun *cel = &ccb->cel;
771 	tstate_t *tptr;
772 	u_int16_t rstat;
773 	int bus, cmd, av, wildcard;
774 	lun_id_t lun;
775 	target_id_t tgt;
776 
777 
778 	bus = XS_CHANNEL(ccb) & 0x1;
779 	tgt = ccb->ccb_h.target_id;
780 	lun = ccb->ccb_h.target_lun;
781 
782 	/*
783 	 * Do some sanity checking first.
784 	 */
785 
786 	if ((lun != CAM_LUN_WILDCARD) &&
787 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
788 		ccb->ccb_h.status = CAM_LUN_INVALID;
789 		return;
790 	}
791 
792 	if (IS_SCSI(isp)) {
793 		sdparam *sdp = isp->isp_param;
794 		sdp += bus;
795 		if (tgt != CAM_TARGET_WILDCARD &&
796 		    tgt != sdp->isp_initiator_id) {
797 			ccb->ccb_h.status = CAM_TID_INVALID;
798 			return;
799 		}
800 	} else {
801 		if (tgt != CAM_TARGET_WILDCARD &&
802 		    tgt != FCPARAM(isp)->isp_iid) {
803 			ccb->ccb_h.status = CAM_TID_INVALID;
804 			return;
805 		}
806 		/*
807 		 * This is as a good a place as any to check f/w capabilities.
808 		 */
809 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
810 			isp_prt(isp, ISP_LOGERR,
811 			    "firmware does not support target mode");
812 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
813 			return;
814 		}
815 		/*
816 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
817 		 * XXX: dorks with our already fragile enable/disable code.
818 		 */
819 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
820 			isp_prt(isp, ISP_LOGERR,
821 			    "firmware not SCCLUN capable");
822 		}
823 	}
824 
825 	if (tgt == CAM_TARGET_WILDCARD) {
826 		if (lun == CAM_LUN_WILDCARD) {
827 			wildcard = 1;
828 		} else {
829 			ccb->ccb_h.status = CAM_LUN_INVALID;
830 			return;
831 		}
832 	} else {
833 		wildcard = 0;
834 	}
835 
836 	/*
837 	 * Next check to see whether this is a target/lun wildcard action.
838 	 *
839 	 * If so, we know that we can accept commands for luns that haven't
840 	 * been enabled yet and send them upstream. Otherwise, we have to
841 	 * handle them locally (if we see them at all).
842 	 */
843 
844 	if (wildcard) {
845 		tptr = &isp->isp_osinfo.tsdflt[bus];
846 		if (cel->enable) {
847 			if (isp->isp_osinfo.tmflags[bus] &
848 			    TM_WILDCARD_ENABLED) {
849 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
850 				return;
851 			}
852 			ccb->ccb_h.status =
853 			    xpt_create_path(&tptr->owner, NULL,
854 			    xpt_path_path_id(ccb->ccb_h.path),
855 			    xpt_path_target_id(ccb->ccb_h.path),
856 			    xpt_path_lun_id(ccb->ccb_h.path));
857 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
858 				return;
859 			}
860 			SLIST_INIT(&tptr->atios);
861 			SLIST_INIT(&tptr->inots);
862 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
863 		} else {
864 			if ((isp->isp_osinfo.tmflags[bus] &
865 			    TM_WILDCARD_ENABLED) == 0) {
866 				ccb->ccb_h.status = CAM_REQ_CMP;
867 				return;
868 			}
869 			if (tptr->hold) {
870 				ccb->ccb_h.status = CAM_SCSI_BUSY;
871 				return;
872 			}
873 			xpt_free_path(tptr->owner);
874 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
875 		}
876 	}
877 
878 	/*
879 	 * Now check to see whether this bus needs to be
880 	 * enabled/disabled with respect to target mode.
881 	 */
882 	av = bus << 31;
883 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
884 		av |= ENABLE_TARGET_FLAG;
885 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
886 		if (av) {
887 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
888 			if (wildcard) {
889 				isp->isp_osinfo.tmflags[bus] &=
890 				    ~TM_WILDCARD_ENABLED;
891 				xpt_free_path(tptr->owner);
892 			}
893 			return;
894 		}
895 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
896 		isp_prt(isp, ISP_LOGINFO,
897 		    "Target Mode enabled on channel %d", bus);
898 	} else if (cel->enable == 0 &&
899 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
900 		if (are_any_luns_enabled(isp, bus)) {
901 			ccb->ccb_h.status = CAM_SCSI_BUSY;
902 			return;
903 		}
904 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
905 		if (av) {
906 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
907 			return;
908 		}
909 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
910 		isp_prt(isp, ISP_LOGINFO,
911 		    "Target Mode disabled on channel %d", bus);
912 	}
913 
914 	if (wildcard) {
915 		ccb->ccb_h.status = CAM_REQ_CMP;
916 		return;
917 	}
918 
919 	if (cel->enable) {
920 		ccb->ccb_h.status =
921 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
922 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
923 			return;
924 		}
925 	} else {
926 		tptr = get_lun_statep(isp, bus, lun);
927 		if (tptr == NULL) {
928 			ccb->ccb_h.status = CAM_LUN_INVALID;
929 			return;
930 		}
931 	}
932 
933 	if (isp_psema_sig_rqe(isp, bus)) {
934 		rls_lun_statep(isp, tptr);
935 		if (cel->enable)
936 			destroy_lun_state(isp, tptr);
937 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
938 		return;
939 	}
940 
941 	if (cel->enable) {
942 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
943 		int c, n, ulun = lun;
944 
945 		cmd = RQSTYPE_ENABLE_LUN;
946 		c = DFLT_CMND_CNT;
947 		n = DFLT_INOT_CNT;
948 		if (IS_FC(isp) && lun != 0) {
949 			cmd = RQSTYPE_MODIFY_LUN;
950 			n = 0;
951 			/*
952 		 	 * For SCC firmware, we only deal with setting
953 			 * (enabling or modifying) lun 0.
954 			 */
955 			ulun = 0;
956 		}
957 		rstat = LUN_ERR;
958 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
959 			xpt_print_path(ccb->ccb_h.path);
960 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
961 			goto out;
962 		}
963 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
964 			xpt_print_path(ccb->ccb_h.path);
965 			isp_prt(isp, ISP_LOGERR,
966 			    "wait for ENABLE/MODIFY LUN timed out");
967 			goto out;
968 		}
969 		rstat = isp->isp_osinfo.rstatus[bus];
970 		if (rstat != LUN_OK) {
971 			xpt_print_path(ccb->ccb_h.path);
972 			isp_prt(isp, ISP_LOGERR,
973 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
974 			goto out;
975 		}
976 	} else {
977 		int c, n, ulun = lun;
978 		u_int32_t seq;
979 
980 		rstat = LUN_ERR;
981 		seq = isp->isp_osinfo.rollinfo++;
982 		cmd = -RQSTYPE_MODIFY_LUN;
983 
984 		c = DFLT_CMND_CNT;
985 		n = DFLT_INOT_CNT;
986 		if (IS_FC(isp) && lun != 0) {
987 			n = 0;
988 			/*
989 		 	 * For SCC firmware, we only deal with setting
990 			 * (enabling or modifying) lun 0.
991 			 */
992 			ulun = 0;
993 		}
994 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
995 			xpt_print_path(ccb->ccb_h.path);
996 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
997 			goto out;
998 		}
999 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1000 			xpt_print_path(ccb->ccb_h.path);
1001 			isp_prt(isp, ISP_LOGERR,
1002 			    "wait for MODIFY LUN timed out");
1003 			goto out;
1004 		}
1005 		rstat = isp->isp_osinfo.rstatus[bus];
1006 		if (rstat != LUN_OK) {
1007 			xpt_print_path(ccb->ccb_h.path);
1008 			isp_prt(isp, ISP_LOGERR,
1009 			    "MODIFY LUN returned 0x%x", rstat);
1010 			goto out;
1011 		}
1012 		if (IS_FC(isp) && lun) {
1013 			goto out;
1014 		}
1015 
1016 		seq = isp->isp_osinfo.rollinfo++;
1017 
1018 		rstat = LUN_ERR;
1019 		cmd = -RQSTYPE_ENABLE_LUN;
1020 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1021 			xpt_print_path(ccb->ccb_h.path);
1022 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1023 			goto out;
1024 		}
1025 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1026 			xpt_print_path(ccb->ccb_h.path);
1027 			isp_prt(isp, ISP_LOGERR,
1028 			     "wait for DISABLE LUN timed out");
1029 			goto out;
1030 		}
1031 		rstat = isp->isp_osinfo.rstatus[bus];
1032 		if (rstat != LUN_OK) {
1033 			xpt_print_path(ccb->ccb_h.path);
1034 			isp_prt(isp, ISP_LOGWARN,
1035 			    "DISABLE LUN returned 0x%x", rstat);
1036 			goto out;
1037 		}
1038 		if (are_any_luns_enabled(isp, bus) == 0) {
1039 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1040 			if (av) {
1041 				isp_prt(isp, ISP_LOGWARN,
1042 				    "disable target mode on channel %d failed",
1043 				    bus);
1044 				goto out;
1045 			}
1046 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1047 			xpt_print_path(ccb->ccb_h.path);
1048 			isp_prt(isp, ISP_LOGINFO,
1049 			    "Target Mode disabled on channel %d", bus);
1050 		}
1051 	}
1052 
1053 out:
1054 	isp_vsema_rqe(isp, bus);
1055 
1056 	if (rstat != LUN_OK) {
1057 		xpt_print_path(ccb->ccb_h.path);
1058 		isp_prt(isp, ISP_LOGWARN,
1059 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1060 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1061 		rls_lun_statep(isp, tptr);
1062 		if (cel->enable)
1063 			destroy_lun_state(isp, tptr);
1064 	} else {
1065 		xpt_print_path(ccb->ccb_h.path);
1066 		isp_prt(isp, ISP_LOGINFO, lfmt,
1067 		    (cel->enable) ? "en" : "dis", bus);
1068 		rls_lun_statep(isp, tptr);
1069 		if (cel->enable == 0) {
1070 			destroy_lun_state(isp, tptr);
1071 		}
1072 		ccb->ccb_h.status = CAM_REQ_CMP;
1073 	}
1074 }
1075 
1076 static cam_status
1077 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1078 {
1079 	tstate_t *tptr;
1080 	struct ccb_hdr_slist *lp;
1081 	struct ccb_hdr *curelm;
1082 	int found;
1083 	union ccb *accb = ccb->cab.abort_ccb;
1084 
1085 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1086 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1087 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1088 			return (CAM_PATH_INVALID);
1089 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1090 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1091 			return (CAM_PATH_INVALID);
1092 		}
1093 	}
1094 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1095 	if (tptr == NULL) {
1096 		return (CAM_PATH_INVALID);
1097 	}
1098 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1099 		lp = &tptr->atios;
1100 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1101 		lp = &tptr->inots;
1102 	} else {
1103 		rls_lun_statep(isp, tptr);
1104 		return (CAM_UA_ABORT);
1105 	}
1106 	curelm = SLIST_FIRST(lp);
1107 	found = 0;
1108 	if (curelm == &accb->ccb_h) {
1109 		found = 1;
1110 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1111 	} else {
1112 		while(curelm != NULL) {
1113 			struct ccb_hdr *nextelm;
1114 
1115 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1116 			if (nextelm == &accb->ccb_h) {
1117 				found = 1;
1118 				SLIST_NEXT(curelm, sim_links.sle) =
1119 				    SLIST_NEXT(nextelm, sim_links.sle);
1120 				break;
1121 			}
1122 			curelm = nextelm;
1123 		}
1124 	}
1125 	rls_lun_statep(isp, tptr);
1126 	if (found) {
1127 		accb->ccb_h.status = CAM_REQ_ABORTED;
1128 		return (CAM_REQ_CMP);
1129 	}
1130 	return(CAM_PATH_INVALID);
1131 }
1132 
1133 static cam_status
1134 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1135 {
1136 	void *qe;
1137 	struct ccb_scsiio *cso = &ccb->csio;
1138 	u_int16_t *hp, save_handle;
1139 	u_int16_t nxti, optr;
1140 	u_int8_t local[QENTRY_LEN];
1141 
1142 
1143 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1144 		xpt_print_path(ccb->ccb_h.path);
1145 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1146 		return (CAM_RESRC_UNAVAIL);
1147 	}
1148 	bzero(local, QENTRY_LEN);
1149 
1150 	/*
1151 	 * We're either moving data or completing a command here.
1152 	 */
1153 
1154 	if (IS_FC(isp)) {
1155 		atio_private_data_t *atp;
1156 		ct2_entry_t *cto = (ct2_entry_t *) local;
1157 
1158 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1159 		cto->ct_header.rqs_entry_count = 1;
1160 		cto->ct_iid = cso->init_id;
1161 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1162 			cto->ct_lun = ccb->ccb_h.target_lun;
1163 		}
1164 
1165 		atp = isp_get_atpd(isp, cso->tag_id);
1166 		if (atp == NULL) {
1167 			isp_prt(isp, ISP_LOGERR,
1168 			    "cannot find private data adjunct for tag %x",
1169 			    cso->tag_id);
1170 			return (-1);
1171 		}
1172 
1173 		cto->ct_rxid = cso->tag_id;
1174 		if (cso->dxfer_len == 0) {
1175 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1176 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1177 				cto->ct_flags |= CT2_SENDSTATUS;
1178 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1179 				cto->ct_resid =
1180 				    atp->orig_datalen - atp->bytes_xfered;
1181 				if (cto->ct_resid < 0) {
1182 					cto->rsp.m1.ct_scsi_status |=
1183 					    CT2_DATA_OVER;
1184 				} else if (cto->ct_resid > 0) {
1185 					cto->rsp.m1.ct_scsi_status |=
1186 					    CT2_DATA_UNDER;
1187 				}
1188 			}
1189 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1190 				int m = min(cso->sense_len, MAXRESPLEN);
1191 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1192 				cto->rsp.m1.ct_senselen = m;
1193 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1194 			}
1195 		} else {
1196 			cto->ct_flags |= CT2_FLAG_MODE0;
1197 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1198 				cto->ct_flags |= CT2_DATA_IN;
1199 			} else {
1200 				cto->ct_flags |= CT2_DATA_OUT;
1201 			}
1202 			cto->ct_reloff = atp->bytes_xfered;
1203 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1204 				cto->ct_flags |= CT2_SENDSTATUS;
1205 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1206 				cto->ct_resid =
1207 				    atp->orig_datalen -
1208 				    (atp->bytes_xfered + cso->dxfer_len);
1209 				if (cto->ct_resid < 0) {
1210 					cto->rsp.m0.ct_scsi_status |=
1211 					    CT2_DATA_OVER;
1212 				} else if (cto->ct_resid > 0) {
1213 					cto->rsp.m0.ct_scsi_status |=
1214 					    CT2_DATA_UNDER;
1215 				}
1216 			} else {
1217 				atp->last_xframt = cso->dxfer_len;
1218 			}
1219 			/*
1220 			 * If we're sending data and status back together,
1221 			 * we can't also send back sense data as well.
1222 			 */
1223 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1224 		}
1225 
1226 		if (cto->ct_flags & CT2_SENDSTATUS) {
1227 			isp_prt(isp, ISP_LOGTDEBUG0,
1228 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1229 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1230 			    cso->dxfer_len, cto->ct_resid);
1231 			cto->ct_flags |= CT2_CCINCR;
1232 			atp->state = ATPD_STATE_LAST_CTIO;
1233 		} else
1234 			atp->state = ATPD_STATE_CTIO;
1235 		cto->ct_timeout = 10;
1236 		hp = &cto->ct_syshandle;
1237 	} else {
1238 		ct_entry_t *cto = (ct_entry_t *) local;
1239 
1240 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1241 		cto->ct_header.rqs_entry_count = 1;
1242 		cto->ct_iid = cso->init_id;
1243 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1244 		cto->ct_tgt = ccb->ccb_h.target_id;
1245 		cto->ct_lun = ccb->ccb_h.target_lun;
1246 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1247 		if (AT_HAS_TAG(cso->tag_id)) {
1248 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1249 			cto->ct_flags |= CT_TQAE;
1250 		}
1251 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1252 			cto->ct_flags |= CT_NODISC;
1253 		}
1254 		if (cso->dxfer_len == 0) {
1255 			cto->ct_flags |= CT_NO_DATA;
1256 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1257 			cto->ct_flags |= CT_DATA_IN;
1258 		} else {
1259 			cto->ct_flags |= CT_DATA_OUT;
1260 		}
1261 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1262 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1263 			cto->ct_scsi_status = cso->scsi_status;
1264 			cto->ct_resid = cso->resid;
1265 			isp_prt(isp, ISP_LOGTDEBUG0,
1266 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1267 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1268 			    cso->tag_id);
1269 		}
1270 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1271 		cto->ct_timeout = 10;
1272 		hp = &cto->ct_syshandle;
1273 	}
1274 
1275 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1276 		xpt_print_path(ccb->ccb_h.path);
1277 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1278 		return (CAM_RESRC_UNAVAIL);
1279 	}
1280 
1281 
1282 	/*
1283 	 * Call the dma setup routines for this entry (and any subsequent
1284 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1285 	 * new things to play with. As with isp_start's usage of DMA setup,
1286 	 * any swizzling is done in the machine dependent layer. Because
1287 	 * of this, we put the request onto the queue area first in native
1288 	 * format.
1289 	 */
1290 
1291 	save_handle = *hp;
1292 
1293 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1294 	case CMD_QUEUED:
1295 		ISP_ADD_REQUEST(isp, nxti);
1296 		return (CAM_REQ_INPROG);
1297 
1298 	case CMD_EAGAIN:
1299 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1300 		isp_destroy_handle(isp, save_handle);
1301 		return (CAM_RESRC_UNAVAIL);
1302 
1303 	default:
1304 		isp_destroy_handle(isp, save_handle);
1305 		return (XS_ERR(ccb));
1306 	}
1307 }
1308 
1309 static void
1310 isp_refire_putback_atio(void *arg)
1311 {
1312 	int s = splcam();
1313 	isp_target_putback_atio(arg);
1314 	splx(s);
1315 }
1316 
1317 static void
1318 isp_target_putback_atio(union ccb *ccb)
1319 {
1320 	struct ispsoftc *isp;
1321 	struct ccb_scsiio *cso;
1322 	u_int16_t nxti, optr;
1323 	void *qe;
1324 
1325 	isp = XS_ISP(ccb);
1326 
1327 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1328 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1329 		isp_prt(isp, ISP_LOGWARN,
1330 		    "isp_target_putback_atio: Request Queue Overflow");
1331 		return;
1332 	}
1333 	bzero(qe, QENTRY_LEN);
1334 	cso = &ccb->csio;
1335 	if (IS_FC(isp)) {
1336 		at2_entry_t local, *at = &local;
1337 		MEMZERO(at, sizeof (at2_entry_t));
1338 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1339 		at->at_header.rqs_entry_count = 1;
1340 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1341 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1342 		} else {
1343 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1344 		}
1345 		at->at_status = CT_OK;
1346 		at->at_rxid = cso->tag_id;
1347 		at->at_iid = cso->ccb_h.target_id;
1348 		isp_put_atio2(isp, at, qe);
1349 	} else {
1350 		at_entry_t local, *at = &local;
1351 		MEMZERO(at, sizeof (at_entry_t));
1352 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1353 		at->at_header.rqs_entry_count = 1;
1354 		at->at_iid = cso->init_id;
1355 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1356 		at->at_tgt = cso->ccb_h.target_id;
1357 		at->at_lun = cso->ccb_h.target_lun;
1358 		at->at_status = CT_OK;
1359 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1360 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1361 		isp_put_atio(isp, at, qe);
1362 	}
1363 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1364 	ISP_ADD_REQUEST(isp, nxti);
1365 	isp_complete_ctio(ccb);
1366 }
1367 
1368 static void
1369 isp_complete_ctio(union ccb *ccb)
1370 {
1371 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1372 		ccb->ccb_h.status |= CAM_REQ_CMP;
1373 	}
1374 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1375 	xpt_done(ccb);
1376 }
1377 
1378 /*
1379  * Handle ATIO stuff that the generic code can't.
1380  * This means handling CDBs.
1381  */
1382 
1383 static int
1384 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1385 {
1386 	tstate_t *tptr;
1387 	int status, bus, iswildcard;
1388 	struct ccb_accept_tio *atiop;
1389 
1390 	/*
1391 	 * The firmware status (except for the QLTM_SVALID bit)
1392 	 * indicates why this ATIO was sent to us.
1393 	 *
1394 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1395 	 *
1396 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1397 	 * we're still connected on the SCSI bus.
1398 	 */
1399 	status = aep->at_status;
1400 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1401 		/*
1402 		 * Bus Phase Sequence error. We should have sense data
1403 		 * suggested by the f/w. I'm not sure quite yet what
1404 		 * to do about this for CAM.
1405 		 */
1406 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1407 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1408 		return (0);
1409 	}
1410 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1411 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1412 		    status);
1413 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1414 		return (0);
1415 	}
1416 
1417 	bus = GET_BUS_VAL(aep->at_iid);
1418 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1419 	if (tptr == NULL) {
1420 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1421 		iswildcard = 1;
1422 	} else {
1423 		iswildcard = 0;
1424 	}
1425 
1426 	if (tptr == NULL) {
1427 		/*
1428 		 * Because we can't autofeed sense data back with
1429 		 * a command for parallel SCSI, we can't give back
1430 		 * a CHECK CONDITION. We'll give back a BUSY status
1431 		 * instead. This works out okay because the only
1432 		 * time we should, in fact, get this, is in the
1433 		 * case that somebody configured us without the
1434 		 * blackhole driver, so they get what they deserve.
1435 		 */
1436 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1437 		return (0);
1438 	}
1439 
1440 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1441 	if (atiop == NULL) {
1442 		/*
1443 		 * Because we can't autofeed sense data back with
1444 		 * a command for parallel SCSI, we can't give back
1445 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1446 		 * instead. This works out okay because the only time we
1447 		 * should, in fact, get this, is in the case that we've
1448 		 * run out of ATIOS.
1449 		 */
1450 		xpt_print_path(tptr->owner);
1451 		isp_prt(isp, ISP_LOGWARN,
1452 		    "no ATIOS for lun %d from initiator %d on channel %d",
1453 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1454 		if (aep->at_flags & AT_TQAE)
1455 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1456 		else
1457 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1458 		rls_lun_statep(isp, tptr);
1459 		return (0);
1460 	}
1461 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1462 	if (iswildcard) {
1463 		atiop->ccb_h.target_id = aep->at_tgt;
1464 		atiop->ccb_h.target_lun = aep->at_lun;
1465 	}
1466 	if (aep->at_flags & AT_NODISC) {
1467 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1468 	} else {
1469 		atiop->ccb_h.flags = 0;
1470 	}
1471 
1472 	if (status & QLTM_SVALID) {
1473 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1474 		atiop->sense_len = amt;
1475 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1476 	} else {
1477 		atiop->sense_len = 0;
1478 	}
1479 
1480 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1481 	atiop->cdb_len = aep->at_cdblen;
1482 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1483 	atiop->ccb_h.status = CAM_CDB_RECVD;
1484 	/*
1485 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1486 	 * and the handle (which we have to preserve).
1487 	 */
1488 	AT_MAKE_TAGID(atiop->tag_id, aep);
1489 	if (aep->at_flags & AT_TQAE) {
1490 		atiop->tag_action = aep->at_tag_type;
1491 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1492 	}
1493 	xpt_done((union ccb*)atiop);
1494 	isp_prt(isp, ISP_LOGTDEBUG0,
1495 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1496 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1497 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1498 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1499 	    "nondisc" : "disconnecting");
1500 	rls_lun_statep(isp, tptr);
1501 	return (0);
1502 }
1503 
1504 static int
1505 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1506 {
1507 	lun_id_t lun;
1508 	tstate_t *tptr;
1509 	struct ccb_accept_tio *atiop;
1510 	atio_private_data_t *atp;
1511 
1512 	/*
1513 	 * The firmware status (except for the QLTM_SVALID bit)
1514 	 * indicates why this ATIO was sent to us.
1515 	 *
1516 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1517 	 */
1518 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1519 		isp_prt(isp, ISP_LOGWARN,
1520 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1521 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1522 		return (0);
1523 	}
1524 
1525 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1526 		lun = aep->at_scclun;
1527 	} else {
1528 		lun = aep->at_lun;
1529 	}
1530 	tptr = get_lun_statep(isp, 0, lun);
1531 	if (tptr == NULL) {
1532 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1533 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1534 	}
1535 
1536 	if (tptr == NULL) {
1537 		/*
1538 		 * What we'd like to know is whether or not we have a listener
1539 		 * upstream that really hasn't configured yet. If we do, then
1540 		 * we can give a more sensible reply here. If not, then we can
1541 		 * reject this out of hand.
1542 		 *
1543 		 * Choices for what to send were
1544 		 *
1545                  *	Not Ready, Unit Not Self-Configured Yet
1546 		 *	(0x2,0x3e,0x00)
1547 		 *
1548 		 * for the former and
1549 		 *
1550 		 *	Illegal Request, Logical Unit Not Supported
1551 		 *	(0x5,0x25,0x00)
1552 		 *
1553 		 * for the latter.
1554 		 *
1555 		 * We used to decide whether there was at least one listener
1556 		 * based upon whether the black hole driver was configured.
1557 		 * However, recent config(8) changes have made this hard to do
1558 		 * at this time.
1559 		 *
1560 		 */
1561 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1562 		return (0);
1563 	}
1564 
1565 	atp = isp_get_atpd(isp, 0);
1566 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1567 	if (atiop == NULL || atp == NULL) {
1568 		/*
1569 		 * Because we can't autofeed sense data back with
1570 		 * a command for parallel SCSI, we can't give back
1571 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1572 		 * instead. This works out okay because the only time we
1573 		 * should, in fact, get this, is in the case that we've
1574 		 * run out of ATIOS.
1575 		 */
1576 		xpt_print_path(tptr->owner);
1577 		isp_prt(isp, ISP_LOGWARN,
1578 		    "no %s for lun %d from initiator %d",
1579 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1580 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1581 		rls_lun_statep(isp, tptr);
1582 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1583 		return (0);
1584 	}
1585 	atp->state = ATPD_STATE_ATIO;
1586 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1587 	tptr->atio_count--;
1588 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1589 	    lun, tptr->atio_count);
1590 
1591 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1592 		atiop->ccb_h.target_id =
1593 		    ((fcparam *)isp->isp_param)->isp_loopid;
1594 		atiop->ccb_h.target_lun = lun;
1595 	}
1596 	/*
1597 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1598 	 */
1599 	atiop->sense_len = 0;
1600 
1601 	atiop->init_id = aep->at_iid;
1602 	atiop->cdb_len = ATIO2_CDBLEN;
1603 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1604 	atiop->ccb_h.status = CAM_CDB_RECVD;
1605 	atiop->tag_id = aep->at_rxid;
1606 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1607 	case ATIO2_TC_ATTR_SIMPLEQ:
1608 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1609 		break;
1610         case ATIO2_TC_ATTR_HEADOFQ:
1611 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1612 		break;
1613         case ATIO2_TC_ATTR_ORDERED:
1614 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1615 		break;
1616         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1617 	case ATIO2_TC_ATTR_UNTAGGED:
1618 	default:
1619 		atiop->tag_action = 0;
1620 		break;
1621 	}
1622 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1623 
1624 	atp->tag = atiop->tag_id;
1625 	atp->lun = lun;
1626 	atp->orig_datalen = aep->at_datalen;
1627 	atp->last_xframt = 0;
1628 	atp->bytes_xfered = 0;
1629 	atp->state = ATPD_STATE_CAM;
1630 	xpt_done((union ccb*)atiop);
1631 
1632 	isp_prt(isp, ISP_LOGTDEBUG0,
1633 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1634 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1635 	    lun, aep->at_taskflags, aep->at_datalen);
1636 	rls_lun_statep(isp, tptr);
1637 	return (0);
1638 }
1639 
1640 static int
1641 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1642 {
1643 	union ccb *ccb;
1644 	int sentstatus, ok, notify_cam, resid = 0;
1645 	u_int16_t tval;
1646 
1647 	/*
1648 	 * CTIO and CTIO2 are close enough....
1649 	 */
1650 
1651 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1652 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1653 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1654 
1655 	if (IS_FC(isp)) {
1656 		ct2_entry_t *ct = arg;
1657 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1658 		if (atp == NULL) {
1659 			isp_prt(isp, ISP_LOGERR,
1660 			    "cannot find adjunct for %x after I/O",
1661 			    ct->ct_rxid);
1662 			return (0);
1663 		}
1664 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1665 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1666 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1667 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1668 		}
1669 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1670 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1671 			resid = ct->ct_resid;
1672 			atp->bytes_xfered += (atp->last_xframt - resid);
1673 			atp->last_xframt = 0;
1674 		}
1675 		if (sentstatus || !ok) {
1676 			atp->tag = 0;
1677 		}
1678 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1679 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1680 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1681 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1682 		    resid, sentstatus? "FIN" : "MID");
1683 		tval = ct->ct_rxid;
1684 
1685 		/* XXX: should really come after isp_complete_ctio */
1686 		atp->state = ATPD_STATE_PDON;
1687 	} else {
1688 		ct_entry_t *ct = arg;
1689 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1690 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1691 		/*
1692 		 * We *ought* to be able to get back to the original ATIO
1693 		 * here, but for some reason this gets lost. It's just as
1694 		 * well because it's squirrelled away as part of periph
1695 		 * private data.
1696 		 *
1697 		 * We can live without it as long as we continue to use
1698 		 * the auto-replenish feature for CTIOs.
1699 		 */
1700 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1701 		if (ct->ct_status & QLTM_SVALID) {
1702 			char *sp = (char *)ct;
1703 			sp += CTIO_SENSE_OFFSET;
1704 			ccb->csio.sense_len =
1705 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1706 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1707 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1708 		}
1709 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1710 			resid = ct->ct_resid;
1711 		}
1712 		isp_prt(isp, ISP_LOGTDEBUG0,
1713 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1714 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1715 		    ct->ct_status, ct->ct_flags, resid,
1716 		    sentstatus? "FIN" : "MID");
1717 		tval = ct->ct_fwhandle;
1718 	}
1719 	ccb->csio.resid += resid;
1720 
1721 	/*
1722 	 * We're here either because intermediate data transfers are done
1723 	 * and/or the final status CTIO (which may have joined with a
1724 	 * Data Transfer) is done.
1725 	 *
1726 	 * In any case, for this platform, the upper layers figure out
1727 	 * what to do next, so all we do here is collect status and
1728 	 * pass information along. Any DMA handles have already been
1729 	 * freed.
1730 	 */
1731 	if (notify_cam == 0) {
1732 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1733 		return (0);
1734 	}
1735 
1736 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1737 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1738 
1739 	if (!ok) {
1740 		isp_target_putback_atio(ccb);
1741 	} else {
1742 		isp_complete_ctio(ccb);
1743 
1744 	}
1745 	return (0);
1746 }
1747 
1748 static int
1749 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1750 {
1751 	return (0);	/* XXXX */
1752 }
1753 
1754 static int
1755 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1756 {
1757 
1758 	switch (inp->in_status) {
1759 	case IN_PORT_LOGOUT:
1760 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1761 		   inp->in_iid);
1762 		break;
1763 	case IN_PORT_CHANGED:
1764 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1765 		   inp->in_iid);
1766 		break;
1767 	case IN_GLOBAL_LOGO:
1768 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1769 		break;
1770 	case IN_ABORT_TASK:
1771 	{
1772 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1773 		struct ccb_immed_notify *inot = NULL;
1774 
1775 		if (atp) {
1776 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1777 			if (tptr) {
1778 				inot = (struct ccb_immed_notify *)
1779 				    SLIST_FIRST(&tptr->inots);
1780 				if (inot) {
1781 					SLIST_REMOVE_HEAD(&tptr->inots,
1782 					    sim_links.sle);
1783 				}
1784 			}
1785 			isp_prt(isp, ISP_LOGWARN,
1786 			   "abort task RX_ID %x IID %d state %d",
1787 			   inp->in_seqid, inp->in_iid, atp->state);
1788 		} else {
1789 			isp_prt(isp, ISP_LOGWARN,
1790 			   "abort task RX_ID %x from iid %d, state unknown",
1791 			   inp->in_seqid, inp->in_iid);
1792 		}
1793 		if (inot) {
1794 			inot->initiator_id = inp->in_iid;
1795 			inot->sense_len = 0;
1796 			inot->message_args[0] = MSG_ABORT_TAG;
1797 			inot->message_args[1] = inp->in_seqid & 0xff;
1798 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1799 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1800 			xpt_done((union ccb *)inot);
1801 		}
1802 		break;
1803 	}
1804 	default:
1805 		break;
1806 	}
1807 	return (0);
1808 }
1809 #endif
1810 
1811 static void
1812 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1813 {
1814 	struct cam_sim *sim;
1815 	struct ispsoftc *isp;
1816 
1817 	sim = (struct cam_sim *)cbarg;
1818 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1819 	switch (code) {
1820 	case AC_LOST_DEVICE:
1821 		if (IS_SCSI(isp)) {
1822 			u_int16_t oflags, nflags;
1823 			sdparam *sdp = isp->isp_param;
1824 			int tgt;
1825 
1826 			tgt = xpt_path_target_id(path);
1827 			if (tgt >= 0) {
1828 				sdp += cam_sim_bus(sim);
1829 				ISP_LOCK(isp);
1830 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1831 #ifndef	ISP_TARGET_MODE
1832 				nflags &= DPARM_SAFE_DFLT;
1833 				if (isp->isp_loaded_fw) {
1834 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1835 				}
1836 #else
1837 				nflags = DPARM_DEFAULT;
1838 #endif
1839 				oflags = sdp->isp_devparam[tgt].goal_flags;
1840 				sdp->isp_devparam[tgt].goal_flags = nflags;
1841 				sdp->isp_devparam[tgt].dev_update = 1;
1842 				isp->isp_update |= (1 << cam_sim_bus(sim));
1843 				(void) isp_control(isp,
1844 				    ISPCTL_UPDATE_PARAMS, NULL);
1845 				sdp->isp_devparam[tgt].goal_flags = oflags;
1846 				ISP_UNLOCK(isp);
1847 			}
1848 		}
1849 		break;
1850 	default:
1851 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1852 		break;
1853 	}
1854 }
1855 
1856 static void
1857 isp_poll(struct cam_sim *sim)
1858 {
1859 	struct ispsoftc *isp = cam_sim_softc(sim);
1860 	u_int16_t isr, sema, mbox;
1861 
1862 	ISP_LOCK(isp);
1863 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1864 		isp_intr(isp, isr, sema, mbox);
1865 	}
1866 	ISP_UNLOCK(isp);
1867 }
1868 
1869 
1870 static void
1871 isp_watchdog(void *arg)
1872 {
1873 	XS_T *xs = arg;
1874 	struct ispsoftc *isp = XS_ISP(xs);
1875 	u_int32_t handle;
1876 	int iok;
1877 
1878 	/*
1879 	 * We've decided this command is dead. Make sure we're not trying
1880 	 * to kill a command that's already dead by getting it's handle and
1881 	 * and seeing whether it's still alive.
1882 	 */
1883 	ISP_LOCK(isp);
1884 	iok = isp->isp_osinfo.intsok;
1885 	isp->isp_osinfo.intsok = 0;
1886 	handle = isp_find_handle(isp, xs);
1887 	if (handle) {
1888 		u_int16_t isr, sema, mbox;
1889 
1890 		if (XS_CMD_DONE_P(xs)) {
1891 			isp_prt(isp, ISP_LOGDEBUG1,
1892 			    "watchdog found done cmd (handle 0x%x)", handle);
1893 			ISP_UNLOCK(isp);
1894 			return;
1895 		}
1896 
1897 		if (XS_CMD_WDOG_P(xs)) {
1898 			isp_prt(isp, ISP_LOGDEBUG2,
1899 			    "recursive watchdog (handle 0x%x)", handle);
1900 			ISP_UNLOCK(isp);
1901 			return;
1902 		}
1903 
1904 		XS_CMD_S_WDOG(xs);
1905 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1906 			isp_intr(isp, isr, sema, mbox);
1907 		}
1908 		if (XS_CMD_DONE_P(xs)) {
1909 			isp_prt(isp, ISP_LOGDEBUG2,
1910 			    "watchdog cleanup for handle 0x%x", handle);
1911 			xpt_done((union ccb *) xs);
1912 		} else if (XS_CMD_GRACE_P(xs)) {
1913 			/*
1914 			 * Make sure the command is *really* dead before we
1915 			 * release the handle (and DMA resources) for reuse.
1916 			 */
1917 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1918 
1919 			/*
1920 			 * After this point, the comamnd is really dead.
1921 			 */
1922 			if (XS_XFRLEN(xs)) {
1923 				ISP_DMAFREE(isp, xs, handle);
1924                 	}
1925 			isp_destroy_handle(isp, handle);
1926 			xpt_print_path(xs->ccb_h.path);
1927 			isp_prt(isp, ISP_LOGWARN,
1928 			    "watchdog timeout for handle 0x%x", handle);
1929 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1930 			XS_CMD_C_WDOG(xs);
1931 			isp_done(xs);
1932 		} else {
1933 			u_int16_t nxti, optr;
1934 			ispreq_t local, *mp= &local, *qe;
1935 
1936 			XS_CMD_C_WDOG(xs);
1937 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1938 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1939 				ISP_UNLOCK(isp);
1940 				return;
1941 			}
1942 			XS_CMD_S_GRACE(xs);
1943 			MEMZERO((void *) mp, sizeof (*mp));
1944 			mp->req_header.rqs_entry_count = 1;
1945 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1946 			mp->req_modifier = SYNC_ALL;
1947 			mp->req_target = XS_CHANNEL(xs) << 7;
1948 			isp_put_request(isp, mp, qe);
1949 			ISP_ADD_REQUEST(isp, nxti);
1950 		}
1951 	} else {
1952 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1953 	}
1954 	isp->isp_osinfo.intsok = iok;
1955 	ISP_UNLOCK(isp);
1956 }
1957 
1958 static void
1959 isp_kthread(void *arg)
1960 {
1961 	struct ispsoftc *isp = arg;
1962 
1963 #ifdef	ISP_SMPLOCK
1964 	mtx_lock(&isp->isp_lock);
1965 #else
1966 	mtx_lock(&Giant);
1967 #endif
1968 	/*
1969 	 * The first loop is for our usage where we have yet to have
1970 	 * gotten good fibre channel state.
1971 	 */
1972 	for (;;) {
1973 		int wasfrozen;
1974 
1975 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1976 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1977 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1978 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1979 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1980 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1981 				    isp->isp_osinfo.ktmature == 0) {
1982 					break;
1983 				}
1984 			}
1985 #ifdef	ISP_SMPLOCK
1986 			msleep(isp_kthread, &isp->isp_lock,
1987 			    PRIBIO, "isp_fcthrd", hz);
1988 #else
1989 			(void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1990 #endif
1991 		}
1992 
1993 		/*
1994 		 * Even if we didn't get good loop state we may be
1995 		 * unfreezing the SIMQ so that we can kill off
1996 		 * commands (if we've never seen loop before, for example).
1997 		 */
1998 		isp->isp_osinfo.ktmature = 1;
1999 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2000 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2001 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2002 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2003 			ISPLOCK_2_CAMLOCK(isp);
2004 			xpt_release_simq(isp->isp_sim, 1);
2005 			CAMLOCK_2_ISPLOCK(isp);
2006 		}
2007 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2008 #ifdef	ISP_SMPLOCK
2009 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2010 #else
2011 		(void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2012 #endif
2013 	}
2014 }
2015 
2016 static void
2017 isp_action(struct cam_sim *sim, union ccb *ccb)
2018 {
2019 	int bus, tgt, error;
2020 	struct ispsoftc *isp;
2021 	struct ccb_trans_settings *cts;
2022 
2023 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2024 
2025 	isp = (struct ispsoftc *)cam_sim_softc(sim);
2026 	ccb->ccb_h.sim_priv.entries[0].field = 0;
2027 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2028 	if (isp->isp_state != ISP_RUNSTATE &&
2029 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
2030 		CAMLOCK_2_ISPLOCK(isp);
2031 		isp_init(isp);
2032 		if (isp->isp_state != ISP_INITSTATE) {
2033 			ISP_UNLOCK(isp);
2034 			/*
2035 			 * Lie. Say it was a selection timeout.
2036 			 */
2037 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2038 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2039 			xpt_done(ccb);
2040 			return;
2041 		}
2042 		isp->isp_state = ISP_RUNSTATE;
2043 		ISPLOCK_2_CAMLOCK(isp);
2044 	}
2045 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2046 
2047 
2048 	switch (ccb->ccb_h.func_code) {
2049 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2050 		/*
2051 		 * Do a couple of preliminary checks...
2052 		 */
2053 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2054 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2055 				ccb->ccb_h.status = CAM_REQ_INVALID;
2056 				xpt_done(ccb);
2057 				break;
2058 			}
2059 		}
2060 #ifdef	DIAGNOSTIC
2061 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2062 			ccb->ccb_h.status = CAM_PATH_INVALID;
2063 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2064 			ccb->ccb_h.status = CAM_PATH_INVALID;
2065 		}
2066 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2067 			isp_prt(isp, ISP_LOGERR,
2068 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2069 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2070 			xpt_done(ccb);
2071 			break;
2072 		}
2073 #endif
2074 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2075 		CAMLOCK_2_ISPLOCK(isp);
2076 		error = isp_start((XS_T *) ccb);
2077 		switch (error) {
2078 		case CMD_QUEUED:
2079 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2080 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2081 				u_int64_t ticks = (u_int64_t) hz;
2082 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2083 					ticks = 60 * 1000 * ticks;
2084 				else
2085 					ticks = ccb->ccb_h.timeout * hz;
2086 				ticks = ((ticks + 999) / 1000) + hz + hz;
2087 				if (ticks >= 0x80000000) {
2088 					isp_prt(isp, ISP_LOGERR,
2089 					    "timeout overflow");
2090 					ticks = 0x7fffffff;
2091 				}
2092 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2093 				    (caddr_t)ccb, (int)ticks);
2094 			} else {
2095 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2096 			}
2097 			ISPLOCK_2_CAMLOCK(isp);
2098 			break;
2099 		case CMD_RQLATER:
2100 			/*
2101 			 * This can only happen for Fibre Channel
2102 			 */
2103 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2104 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2105 			    isp->isp_osinfo.ktmature) {
2106 				ISPLOCK_2_CAMLOCK(isp);
2107 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2108 				xpt_done(ccb);
2109 				break;
2110 			}
2111 #ifdef	ISP_SMPLOCK
2112 			cv_signal(&isp->isp_osinfo.kthread_cv);
2113 #else
2114 			wakeup(&isp->isp_osinfo.kthread_cv);
2115 #endif
2116 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2117 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2118 			ISPLOCK_2_CAMLOCK(isp);
2119 			xpt_done(ccb);
2120 			break;
2121 		case CMD_EAGAIN:
2122 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2123 			ISPLOCK_2_CAMLOCK(isp);
2124 			xpt_done(ccb);
2125 			break;
2126 		case CMD_COMPLETE:
2127 			isp_done((struct ccb_scsiio *) ccb);
2128 			ISPLOCK_2_CAMLOCK(isp);
2129 			break;
2130 		default:
2131 			isp_prt(isp, ISP_LOGERR,
2132 			    "What's this? 0x%x at %d in file %s",
2133 			    error, __LINE__, __FILE__);
2134 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2135 			xpt_done(ccb);
2136 			ISPLOCK_2_CAMLOCK(isp);
2137 		}
2138 		break;
2139 
2140 #ifdef	ISP_TARGET_MODE
2141 	case XPT_EN_LUN:		/* Enable LUN as a target */
2142 	{
2143 		int iok;
2144 		CAMLOCK_2_ISPLOCK(isp);
2145 		iok = isp->isp_osinfo.intsok;
2146 		isp->isp_osinfo.intsok = 0;
2147 		isp_en_lun(isp, ccb);
2148 		isp->isp_osinfo.intsok = iok;
2149 		ISPLOCK_2_CAMLOCK(isp);
2150 		xpt_done(ccb);
2151 		break;
2152 	}
2153 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2154 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2155 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2156 	{
2157 		tstate_t *tptr =
2158 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2159 		if (tptr == NULL) {
2160 			ccb->ccb_h.status = CAM_LUN_INVALID;
2161 			xpt_done(ccb);
2162 			break;
2163 		}
2164 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2165 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2166 		ccb->ccb_h.flags = 0;
2167 
2168 		CAMLOCK_2_ISPLOCK(isp);
2169 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2170 			/*
2171 			 * Note that the command itself may not be done-
2172 			 * it may not even have had the first CTIO sent.
2173 			 */
2174 			tptr->atio_count++;
2175 			isp_prt(isp, ISP_LOGTDEBUG0,
2176 			    "Put FREE ATIO2, lun %d, count now %d",
2177 			    ccb->ccb_h.target_lun, tptr->atio_count);
2178 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2179 			    sim_links.sle);
2180 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2181 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2182 			    sim_links.sle);
2183 		} else {
2184 			;
2185 		}
2186 		rls_lun_statep(isp, tptr);
2187 		ccb->ccb_h.status = CAM_REQ_INPROG;
2188 		ISPLOCK_2_CAMLOCK(isp);
2189 		break;
2190 	}
2191 	case XPT_CONT_TARGET_IO:
2192 	{
2193 		CAMLOCK_2_ISPLOCK(isp);
2194 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2195 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2196 			isp_prt(isp, ISP_LOGWARN,
2197 			    "XPT_CONT_TARGET_IO: status 0x%x",
2198 			    ccb->ccb_h.status);
2199 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2200 			ISPLOCK_2_CAMLOCK(isp);
2201 			xpt_done(ccb);
2202 		} else {
2203 			ISPLOCK_2_CAMLOCK(isp);
2204 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2205 		}
2206 		break;
2207 	}
2208 #endif
2209 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2210 
2211 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2212 		tgt = ccb->ccb_h.target_id;
2213 		tgt |= (bus << 16);
2214 
2215 		CAMLOCK_2_ISPLOCK(isp);
2216 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2217 		ISPLOCK_2_CAMLOCK(isp);
2218 		if (error) {
2219 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2220 		} else {
2221 			ccb->ccb_h.status = CAM_REQ_CMP;
2222 		}
2223 		xpt_done(ccb);
2224 		break;
2225 	case XPT_ABORT:			/* Abort the specified CCB */
2226 	{
2227 		union ccb *accb = ccb->cab.abort_ccb;
2228 		CAMLOCK_2_ISPLOCK(isp);
2229 		switch (accb->ccb_h.func_code) {
2230 #ifdef	ISP_TARGET_MODE
2231 		case XPT_ACCEPT_TARGET_IO:
2232 		case XPT_IMMED_NOTIFY:
2233         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2234 			break;
2235 		case XPT_CONT_TARGET_IO:
2236 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2237 			ccb->ccb_h.status = CAM_UA_ABORT;
2238 			break;
2239 #endif
2240 		case XPT_SCSI_IO:
2241 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2242 			if (error) {
2243 				ccb->ccb_h.status = CAM_UA_ABORT;
2244 			} else {
2245 				ccb->ccb_h.status = CAM_REQ_CMP;
2246 			}
2247 			break;
2248 		default:
2249 			ccb->ccb_h.status = CAM_REQ_INVALID;
2250 			break;
2251 		}
2252 		ISPLOCK_2_CAMLOCK(isp);
2253 		xpt_done(ccb);
2254 		break;
2255 	}
2256 #ifdef	CAM_NEW_TRAN_CODE
2257 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2258 #else
2259 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2260 #endif
2261 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2262 		cts = &ccb->cts;
2263 		if (!IS_CURRENT_SETTINGS(cts)) {
2264 			ccb->ccb_h.status = CAM_REQ_INVALID;
2265 			xpt_done(ccb);
2266 			break;
2267 		}
2268 		tgt = cts->ccb_h.target_id;
2269 		CAMLOCK_2_ISPLOCK(isp);
2270 		if (IS_SCSI(isp)) {
2271 #ifndef	CAM_NEW_TRAN_CODE
2272 			sdparam *sdp = isp->isp_param;
2273 			u_int16_t *dptr;
2274 
2275 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2276 
2277 			sdp += bus;
2278 			/*
2279 			 * We always update (internally) from goal_flags
2280 			 * so any request to change settings just gets
2281 			 * vectored to that location.
2282 			 */
2283 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2284 
2285 			/*
2286 			 * Note that these operations affect the
2287 			 * the goal flags (goal_flags)- not
2288 			 * the current state flags. Then we mark
2289 			 * things so that the next operation to
2290 			 * this HBA will cause the update to occur.
2291 			 */
2292 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2293 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2294 					*dptr |= DPARM_DISC;
2295 				} else {
2296 					*dptr &= ~DPARM_DISC;
2297 				}
2298 			}
2299 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2300 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2301 					*dptr |= DPARM_TQING;
2302 				} else {
2303 					*dptr &= ~DPARM_TQING;
2304 				}
2305 			}
2306 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2307 				switch (cts->bus_width) {
2308 				case MSG_EXT_WDTR_BUS_16_BIT:
2309 					*dptr |= DPARM_WIDE;
2310 					break;
2311 				default:
2312 					*dptr &= ~DPARM_WIDE;
2313 				}
2314 			}
2315 			/*
2316 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2317 			 * of nonzero will cause us to go to the
2318 			 * selected (from NVRAM) maximum value for
2319 			 * this device. At a later point, we'll
2320 			 * allow finer control.
2321 			 */
2322 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2323 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2324 			    (cts->sync_offset > 0)) {
2325 				*dptr |= DPARM_SYNC;
2326 			} else {
2327 				*dptr &= ~DPARM_SYNC;
2328 			}
2329 			*dptr |= DPARM_SAFE_DFLT;
2330 #else
2331 			struct ccb_trans_settings_scsi *scsi =
2332 			    &cts->proto_specific.scsi;
2333 			struct ccb_trans_settings_spi *spi =
2334 			    &cts->xport_specific.spi;
2335 			sdparam *sdp = isp->isp_param;
2336 			u_int16_t *dptr;
2337 
2338 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2339 			sdp += bus;
2340 			/*
2341 			 * We always update (internally) from goal_flags
2342 			 * so any request to change settings just gets
2343 			 * vectored to that location.
2344 			 */
2345 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2346 
2347 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2348 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2349 					*dptr |= DPARM_DISC;
2350 				else
2351 					*dptr &= ~DPARM_DISC;
2352 			}
2353 
2354 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2355 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2356 					*dptr |= DPARM_TQING;
2357 				else
2358 					*dptr &= ~DPARM_TQING;
2359 			}
2360 
2361 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2362 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2363 					*dptr |= DPARM_WIDE;
2364 				else
2365 					*dptr &= ~DPARM_WIDE;
2366 			}
2367 
2368 			/*
2369 			 * XXX: FIX ME
2370 			 */
2371 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2372 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2373 			    (spi->sync_period && spi->sync_offset)) {
2374 				*dptr |= DPARM_SYNC;
2375 				/*
2376 				 * XXX: CHECK FOR LEGALITY
2377 				 */
2378 				sdp->isp_devparam[tgt].goal_period =
2379 				    spi->sync_period;
2380 				sdp->isp_devparam[tgt].goal_offset =
2381 				    spi->sync_offset;
2382 			} else {
2383 				*dptr &= ~DPARM_SYNC;
2384 			}
2385 #endif
2386 			isp_prt(isp, ISP_LOGDEBUG0,
2387 			    "SET bus %d targ %d to flags %x off %x per %x",
2388 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2389 			    sdp->isp_devparam[tgt].goal_offset,
2390 			    sdp->isp_devparam[tgt].goal_period);
2391 			sdp->isp_devparam[tgt].dev_update = 1;
2392 			isp->isp_update |= (1 << bus);
2393 		}
2394 		ISPLOCK_2_CAMLOCK(isp);
2395 		ccb->ccb_h.status = CAM_REQ_CMP;
2396 		xpt_done(ccb);
2397 		break;
2398 	case XPT_GET_TRAN_SETTINGS:
2399 		cts = &ccb->cts;
2400 		tgt = cts->ccb_h.target_id;
2401 		CAMLOCK_2_ISPLOCK(isp);
2402 		if (IS_FC(isp)) {
2403 #ifndef	CAM_NEW_TRAN_CODE
2404 			/*
2405 			 * a lot of normal SCSI things don't make sense.
2406 			 */
2407 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2408 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2409 			/*
2410 			 * How do you measure the width of a high
2411 			 * speed serial bus? Well, in bytes.
2412 			 *
2413 			 * Offset and period make no sense, though, so we set
2414 			 * (above) a 'base' transfer speed to be gigabit.
2415 			 */
2416 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2417 #else
2418 			fcparam *fcp = isp->isp_param;
2419 			struct ccb_trans_settings_fc *fc =
2420 			    &cts->xport_specific.fc;
2421 
2422 			cts->protocol = PROTO_SCSI;
2423 			cts->protocol_version = SCSI_REV_2;
2424 			cts->transport = XPORT_FC;
2425 			cts->transport_version = 0;
2426 
2427 			fc->valid = CTS_FC_VALID_SPEED;
2428 			if (fcp->isp_gbspeed == 2)
2429 				fc->bitrate = 200000;
2430 			else
2431 				fc->bitrate = 100000;
2432 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2433 				struct lportdb *lp = &fcp->portdb[tgt];
2434 				fc->wwnn = lp->node_wwn;
2435 				fc->wwpn = lp->port_wwn;
2436 				fc->port = lp->portid;
2437 				fc->valid |= CTS_FC_VALID_WWNN |
2438 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2439 			}
2440 #endif
2441 		} else {
2442 #ifdef	CAM_NEW_TRAN_CODE
2443 			struct ccb_trans_settings_scsi *scsi =
2444 			    &cts->proto_specific.scsi;
2445 			struct ccb_trans_settings_spi *spi =
2446 			    &cts->xport_specific.spi;
2447 #endif
2448 			sdparam *sdp = isp->isp_param;
2449 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2450 			u_int16_t dval, pval, oval;
2451 
2452 			sdp += bus;
2453 
2454 			if (IS_CURRENT_SETTINGS(cts)) {
2455 				sdp->isp_devparam[tgt].dev_refresh = 1;
2456 				isp->isp_update |= (1 << bus);
2457 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2458 				    NULL);
2459 				dval = sdp->isp_devparam[tgt].actv_flags;
2460 				oval = sdp->isp_devparam[tgt].actv_offset;
2461 				pval = sdp->isp_devparam[tgt].actv_period;
2462 			} else {
2463 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2464 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2465 				pval = sdp->isp_devparam[tgt].nvrm_period;
2466 			}
2467 
2468 #ifndef	CAM_NEW_TRAN_CODE
2469 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2470 
2471 			if (dval & DPARM_DISC) {
2472 				cts->flags |= CCB_TRANS_DISC_ENB;
2473 			}
2474 			if (dval & DPARM_TQING) {
2475 				cts->flags |= CCB_TRANS_TAG_ENB;
2476 			}
2477 			if (dval & DPARM_WIDE) {
2478 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2479 			} else {
2480 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2481 			}
2482 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2483 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2484 
2485 			if ((dval & DPARM_SYNC) && oval != 0) {
2486 				cts->sync_period = pval;
2487 				cts->sync_offset = oval;
2488 				cts->valid |=
2489 				    CCB_TRANS_SYNC_RATE_VALID |
2490 				    CCB_TRANS_SYNC_OFFSET_VALID;
2491 			}
2492 #else
2493 			cts->protocol = PROTO_SCSI;
2494 			cts->protocol_version = SCSI_REV_2;
2495 			cts->transport = XPORT_SPI;
2496 			cts->transport_version = 2;
2497 
2498 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2499 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2500 			if (dval & DPARM_DISC) {
2501 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2502 			}
2503 			if (dval & DPARM_TQING) {
2504 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2505 			}
2506 			if ((dval & DPARM_SYNC) && oval && pval) {
2507 				spi->sync_offset = oval;
2508 				spi->sync_period = pval;
2509 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2510 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2511 			}
2512 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2513 			if (dval & DPARM_WIDE) {
2514 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2515 			} else {
2516 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2517 			}
2518 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2519 				scsi->valid = CTS_SCSI_VALID_TQ;
2520 				spi->valid |= CTS_SPI_VALID_DISC;
2521 			} else {
2522 				scsi->valid = 0;
2523 			}
2524 #endif
2525 			isp_prt(isp, ISP_LOGDEBUG0,
2526 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2527 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2528 			    bus, tgt, dval, oval, pval);
2529 		}
2530 		ISPLOCK_2_CAMLOCK(isp);
2531 		ccb->ccb_h.status = CAM_REQ_CMP;
2532 		xpt_done(ccb);
2533 		break;
2534 
2535 	case XPT_CALC_GEOMETRY:
2536 	{
2537 		struct ccb_calc_geometry *ccg;
2538 
2539 		ccg = &ccb->ccg;
2540 		if (ccg->block_size == 0) {
2541 			isp_prt(isp, ISP_LOGERR,
2542 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2543 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2544 			ccb->ccb_h.status = CAM_REQ_INVALID;
2545 			xpt_done(ccb);
2546 			break;
2547 		}
2548 		cam_calc_geometry(ccg, /*extended*/1);
2549 		xpt_done(ccb);
2550 		break;
2551 	}
2552 	case XPT_RESET_BUS:		/* Reset the specified bus */
2553 		bus = cam_sim_bus(sim);
2554 		CAMLOCK_2_ISPLOCK(isp);
2555 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2556 		ISPLOCK_2_CAMLOCK(isp);
2557 		if (error)
2558 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2559 		else {
2560 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2561 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2562 			else if (isp->isp_path != NULL)
2563 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2564 			ccb->ccb_h.status = CAM_REQ_CMP;
2565 		}
2566 		xpt_done(ccb);
2567 		break;
2568 
2569 	case XPT_TERM_IO:		/* Terminate the I/O process */
2570 		ccb->ccb_h.status = CAM_REQ_INVALID;
2571 		xpt_done(ccb);
2572 		break;
2573 
2574 	case XPT_PATH_INQ:		/* Path routing inquiry */
2575 	{
2576 		struct ccb_pathinq *cpi = &ccb->cpi;
2577 
2578 		cpi->version_num = 1;
2579 #ifdef	ISP_TARGET_MODE
2580 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2581 #else
2582 		cpi->target_sprt = 0;
2583 #endif
2584 		cpi->hba_eng_cnt = 0;
2585 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2586 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2587 		cpi->bus_id = cam_sim_bus(sim);
2588 		if (IS_FC(isp)) {
2589 			cpi->hba_misc = PIM_NOBUSRESET;
2590 			/*
2591 			 * Because our loop ID can shift from time to time,
2592 			 * make our initiator ID out of range of our bus.
2593 			 */
2594 			cpi->initiator_id = cpi->max_target + 1;
2595 
2596 			/*
2597 			 * Set base transfer capabilities for Fibre Channel.
2598 			 * Technically not correct because we don't know
2599 			 * what media we're running on top of- but we'll
2600 			 * look good if we always say 100MB/s.
2601 			 */
2602 			if (FCPARAM(isp)->isp_gbspeed == 2)
2603 				cpi->base_transfer_speed = 200000;
2604 			else
2605 				cpi->base_transfer_speed = 100000;
2606 			cpi->hba_inquiry = PI_TAG_ABLE;
2607 #ifdef	CAM_NEW_TRAN_CODE
2608 			cpi->transport = XPORT_FC;
2609 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2610 #endif
2611 		} else {
2612 			sdparam *sdp = isp->isp_param;
2613 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2614 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2615 			cpi->hba_misc = 0;
2616 			cpi->initiator_id = sdp->isp_initiator_id;
2617 			cpi->base_transfer_speed = 3300;
2618 #ifdef	CAM_NEW_TRAN_CODE
2619 			cpi->transport = XPORT_SPI;
2620 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2621 #endif
2622 		}
2623 #ifdef	CAM_NEW_TRAN_CODE
2624 		cpi->protocol = PROTO_SCSI;
2625 		cpi->protocol_version = SCSI_REV_2;
2626 #endif
2627 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2628 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2629 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2630 		cpi->unit_number = cam_sim_unit(sim);
2631 		cpi->ccb_h.status = CAM_REQ_CMP;
2632 		xpt_done(ccb);
2633 		break;
2634 	}
2635 	default:
2636 		ccb->ccb_h.status = CAM_REQ_INVALID;
2637 		xpt_done(ccb);
2638 		break;
2639 	}
2640 }
2641 
2642 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2643 void
2644 isp_done(struct ccb_scsiio *sccb)
2645 {
2646 	struct ispsoftc *isp = XS_ISP(sccb);
2647 
2648 	if (XS_NOERR(sccb))
2649 		XS_SETERR(sccb, CAM_REQ_CMP);
2650 
2651 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2652 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2653 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2654 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2655 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2656 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2657 		} else {
2658 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2659 		}
2660 	}
2661 
2662 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2663 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2664 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2665 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2666 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2667 			isp_prt(isp, ISP_LOGDEBUG0,
2668 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2669 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2670 			    sccb->ccb_h.status, sccb->scsi_status);
2671 		}
2672 	}
2673 
2674 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2675 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2676 		xpt_print_path(sccb->ccb_h.path);
2677 		isp_prt(isp, ISP_LOGINFO,
2678 		    "cam completion status 0x%x", sccb->ccb_h.status);
2679 	}
2680 
2681 	XS_CMD_S_DONE(sccb);
2682 	if (XS_CMD_WDOG_P(sccb) == 0) {
2683 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2684 		if (XS_CMD_GRACE_P(sccb)) {
2685 			isp_prt(isp, ISP_LOGDEBUG2,
2686 			    "finished command on borrowed time");
2687 		}
2688 		XS_CMD_S_CLEAR(sccb);
2689 		ISPLOCK_2_CAMLOCK(isp);
2690 		xpt_done((union ccb *) sccb);
2691 		CAMLOCK_2_ISPLOCK(isp);
2692 	}
2693 }
2694 
2695 int
2696 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2697 {
2698 	int bus, rv = 0;
2699 	switch (cmd) {
2700 	case ISPASYNC_NEW_TGT_PARAMS:
2701 	{
2702 #ifdef	CAM_NEW_TRAN_CODE
2703 		struct ccb_trans_settings_scsi *scsi;
2704 		struct ccb_trans_settings_spi *spi;
2705 #endif
2706 		int flags, tgt;
2707 		sdparam *sdp = isp->isp_param;
2708 		struct ccb_trans_settings cts;
2709 		struct cam_path *tmppath;
2710 
2711 		bzero(&cts, sizeof (struct ccb_trans_settings));
2712 
2713 		tgt = *((int *)arg);
2714 		bus = (tgt >> 16) & 0xffff;
2715 		tgt &= 0xffff;
2716 		sdp += bus;
2717 		ISPLOCK_2_CAMLOCK(isp);
2718 		if (xpt_create_path(&tmppath, NULL,
2719 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2720 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2721 			CAMLOCK_2_ISPLOCK(isp);
2722 			isp_prt(isp, ISP_LOGWARN,
2723 			    "isp_async cannot make temp path for %d.%d",
2724 			    tgt, bus);
2725 			rv = -1;
2726 			break;
2727 		}
2728 		CAMLOCK_2_ISPLOCK(isp);
2729 		flags = sdp->isp_devparam[tgt].actv_flags;
2730 #ifdef	CAM_NEW_TRAN_CODE
2731 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2732 		cts.protocol = PROTO_SCSI;
2733 		cts.transport = XPORT_SPI;
2734 
2735 		scsi = &cts.proto_specific.scsi;
2736 		spi = &cts.xport_specific.spi;
2737 
2738 		if (flags & DPARM_TQING) {
2739 			scsi->valid |= CTS_SCSI_VALID_TQ;
2740 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2741 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2742 		}
2743 
2744 		if (flags & DPARM_DISC) {
2745 			spi->valid |= CTS_SPI_VALID_DISC;
2746 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2747 		}
2748 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2749 		if (flags & DPARM_WIDE) {
2750 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2751 		} else {
2752 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2753 		}
2754 		if (flags & DPARM_SYNC) {
2755 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2756 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2757 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2758 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2759 		}
2760 #else
2761 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2762 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2763 		if (flags & DPARM_DISC) {
2764 			cts.flags |= CCB_TRANS_DISC_ENB;
2765 		}
2766 		if (flags & DPARM_TQING) {
2767 			cts.flags |= CCB_TRANS_TAG_ENB;
2768 		}
2769 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2770 		cts.bus_width = (flags & DPARM_WIDE)?
2771 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2772 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2773 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2774 		if (flags & DPARM_SYNC) {
2775 			cts.valid |=
2776 			    CCB_TRANS_SYNC_RATE_VALID |
2777 			    CCB_TRANS_SYNC_OFFSET_VALID;
2778 		}
2779 #endif
2780 		isp_prt(isp, ISP_LOGDEBUG2,
2781 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2782 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2783 		    sdp->isp_devparam[tgt].actv_offset, flags);
2784 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2785 		ISPLOCK_2_CAMLOCK(isp);
2786 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2787 		xpt_free_path(tmppath);
2788 		CAMLOCK_2_ISPLOCK(isp);
2789 		break;
2790 	}
2791 	case ISPASYNC_BUS_RESET:
2792 		bus = *((int *)arg);
2793 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2794 		    bus);
2795 		if (bus > 0 && isp->isp_path2) {
2796 			ISPLOCK_2_CAMLOCK(isp);
2797 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2798 			CAMLOCK_2_ISPLOCK(isp);
2799 		} else if (isp->isp_path) {
2800 			ISPLOCK_2_CAMLOCK(isp);
2801 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2802 			CAMLOCK_2_ISPLOCK(isp);
2803 		}
2804 		break;
2805 	case ISPASYNC_LIP:
2806 		if (isp->isp_path) {
2807 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2808 		}
2809 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2810 		break;
2811 	case ISPASYNC_LOOP_RESET:
2812 		if (isp->isp_path) {
2813 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2814 		}
2815 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2816 		break;
2817 	case ISPASYNC_LOOP_DOWN:
2818 		if (isp->isp_path) {
2819 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2820 		}
2821 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2822 		break;
2823 	case ISPASYNC_LOOP_UP:
2824 		/*
2825 		 * Now we just note that Loop has come up. We don't
2826 		 * actually do anything because we're waiting for a
2827 		 * Change Notify before activating the FC cleanup
2828 		 * thread to look at the state of the loop again.
2829 		 */
2830 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2831 		break;
2832 	case ISPASYNC_PROMENADE:
2833 	{
2834 		struct cam_path *tmppath;
2835 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2836 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2837 		static const char *roles[4] = {
2838 		    "(none)", "Target", "Initiator", "Target/Initiator"
2839 		};
2840 		fcparam *fcp = isp->isp_param;
2841 		int tgt = *((int *) arg);
2842 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2843 		struct lportdb *lp = &fcp->portdb[tgt];
2844 
2845 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2846 		    roles[lp->roles & 0x3],
2847 		    (lp->valid)? "Arrived" : "Departed",
2848 		    (u_int32_t) (lp->port_wwn >> 32),
2849 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2850 		    (u_int32_t) (lp->node_wwn >> 32),
2851 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2852 
2853 		ISPLOCK_2_CAMLOCK(isp);
2854 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2855 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2856 			CAMLOCK_2_ISPLOCK(isp);
2857                         break;
2858                 }
2859 		/*
2860 		 * Policy: only announce targets.
2861 		 */
2862 		if (lp->roles & is_tgt_mask) {
2863 			if (lp->valid) {
2864 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2865 			} else {
2866 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2867 			}
2868 		}
2869 		xpt_free_path(tmppath);
2870 		CAMLOCK_2_ISPLOCK(isp);
2871 		break;
2872 	}
2873 	case ISPASYNC_CHANGE_NOTIFY:
2874 		if (arg == ISPASYNC_CHANGE_PDB) {
2875 			isp_prt(isp, ISP_LOGINFO,
2876 			    "Port Database Changed");
2877 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2878 			isp_prt(isp, ISP_LOGINFO,
2879 			    "Name Server Database Changed");
2880 		}
2881 #ifdef	ISP_SMPLOCK
2882 		cv_signal(&isp->isp_osinfo.kthread_cv);
2883 #else
2884 		wakeup(&isp->isp_osinfo.kthread_cv);
2885 #endif
2886 		break;
2887 	case ISPASYNC_FABRIC_DEV:
2888 	{
2889 		int target, base, lim;
2890 		fcparam *fcp = isp->isp_param;
2891 		struct lportdb *lp = NULL;
2892 		struct lportdb *clp = (struct lportdb *) arg;
2893 		char *pt;
2894 
2895 		switch (clp->port_type) {
2896 		case 1:
2897 			pt = "   N_Port";
2898 			break;
2899 		case 2:
2900 			pt = "  NL_Port";
2901 			break;
2902 		case 3:
2903 			pt = "F/NL_Port";
2904 			break;
2905 		case 0x7f:
2906 			pt = "  Nx_Port";
2907 			break;
2908 		case 0x81:
2909 			pt = "  F_port";
2910 			break;
2911 		case 0x82:
2912 			pt = "  FL_Port";
2913 			break;
2914 		case 0x84:
2915 			pt = "   E_port";
2916 			break;
2917 		default:
2918 			pt = " ";
2919 			break;
2920 		}
2921 
2922 		isp_prt(isp, ISP_LOGINFO,
2923 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2924 
2925 		/*
2926 		 * If we don't have an initiator role we bail.
2927 		 *
2928 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2929 		 */
2930 
2931 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2932 			break;
2933 		}
2934 
2935 		/*
2936 		 * Is this entry for us? If so, we bail.
2937 		 */
2938 
2939 		if (fcp->isp_portid == clp->portid) {
2940 			break;
2941 		}
2942 
2943 		/*
2944 		 * Else, the default policy is to find room for it in
2945 		 * our local port database. Later, when we execute
2946 		 * the call to isp_pdb_sync either this newly arrived
2947 		 * or already logged in device will be (re)announced.
2948 		 */
2949 
2950 		if (fcp->isp_topo == TOPO_FL_PORT)
2951 			base = FC_SNS_ID+1;
2952 		else
2953 			base = 0;
2954 
2955 		if (fcp->isp_topo == TOPO_N_PORT)
2956 			lim = 1;
2957 		else
2958 			lim = MAX_FC_TARG;
2959 
2960 		/*
2961 		 * Is it already in our list?
2962 		 */
2963 		for (target = base; target < lim; target++) {
2964 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2965 				continue;
2966 			}
2967 			lp = &fcp->portdb[target];
2968 			if (lp->port_wwn == clp->port_wwn &&
2969 			    lp->node_wwn == clp->node_wwn) {
2970 				lp->fabric_dev = 1;
2971 				break;
2972 			}
2973 		}
2974 		if (target < lim) {
2975 			break;
2976 		}
2977 		for (target = base; target < lim; target++) {
2978 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2979 				continue;
2980 			}
2981 			lp = &fcp->portdb[target];
2982 			if (lp->port_wwn == 0) {
2983 				break;
2984 			}
2985 		}
2986 		if (target == lim) {
2987 			isp_prt(isp, ISP_LOGWARN,
2988 			    "out of space for fabric devices");
2989 			break;
2990 		}
2991 		lp->port_type = clp->port_type;
2992 		lp->fc4_type = clp->fc4_type;
2993 		lp->node_wwn = clp->node_wwn;
2994 		lp->port_wwn = clp->port_wwn;
2995 		lp->portid = clp->portid;
2996 		lp->fabric_dev = 1;
2997 		break;
2998 	}
2999 #ifdef	ISP_TARGET_MODE
3000 	case ISPASYNC_TARGET_MESSAGE:
3001 	{
3002 		tmd_msg_t *mp = arg;
3003 		isp_prt(isp, ISP_LOGALL,
3004 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3005 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3006 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3007 		    mp->nt_msg[0]);
3008 		break;
3009 	}
3010 	case ISPASYNC_TARGET_EVENT:
3011 	{
3012 		tmd_event_t *ep = arg;
3013 		isp_prt(isp, ISP_LOGALL,
3014 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3015 		break;
3016 	}
3017 	case ISPASYNC_TARGET_ACTION:
3018 		switch (((isphdr_t *)arg)->rqs_entry_type) {
3019 		default:
3020 			isp_prt(isp, ISP_LOGWARN,
3021 			   "event 0x%x for unhandled target action",
3022 			    ((isphdr_t *)arg)->rqs_entry_type);
3023 			break;
3024 		case RQSTYPE_NOTIFY:
3025 			if (IS_SCSI(isp)) {
3026 				rv = isp_handle_platform_notify_scsi(isp,
3027 				    (in_entry_t *) arg);
3028 			} else {
3029 				rv = isp_handle_platform_notify_fc(isp,
3030 				    (in_fcentry_t *) arg);
3031 			}
3032 			break;
3033 		case RQSTYPE_ATIO:
3034 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3035 			break;
3036 		case RQSTYPE_ATIO2:
3037 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3038 			break;
3039 		case RQSTYPE_CTIO2:
3040 		case RQSTYPE_CTIO:
3041 			rv = isp_handle_platform_ctio(isp, arg);
3042 			break;
3043 		case RQSTYPE_ENABLE_LUN:
3044 		case RQSTYPE_MODIFY_LUN:
3045 			if (IS_DUALBUS(isp)) {
3046 				bus =
3047 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3048 			} else {
3049 				bus = 0;
3050 			}
3051 			isp_cv_signal_rqe(isp, bus,
3052 			    ((lun_entry_t *)arg)->le_status);
3053 			break;
3054 		}
3055 		break;
3056 #endif
3057 	case ISPASYNC_FW_CRASH:
3058 	{
3059 		u_int16_t mbox1, mbox6;
3060 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3061 		if (IS_DUALBUS(isp)) {
3062 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3063 		} else {
3064 			mbox6 = 0;
3065 		}
3066                 isp_prt(isp, ISP_LOGERR,
3067                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3068                     mbox6, mbox1);
3069 #ifdef	ISP_FW_CRASH_DUMP
3070 		/*
3071 		 * XXX: really need a thread to do this right.
3072 		 */
3073 		if (IS_FC(isp)) {
3074 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3075 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3076 			isp_freeze_loopdown(isp, "f/w crash");
3077 			isp_fw_dump(isp);
3078 		}
3079 		isp_reinit(isp);
3080 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3081 #endif
3082 		break;
3083 	}
3084 	case ISPASYNC_UNHANDLED_RESPONSE:
3085 		break;
3086 	default:
3087 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3088 		break;
3089 	}
3090 	return (rv);
3091 }
3092 
3093 
3094 /*
3095  * Locks are held before coming here.
3096  */
3097 void
3098 isp_uninit(struct ispsoftc *isp)
3099 {
3100 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3101 	DISABLE_INTS(isp);
3102 }
3103 
3104 void
3105 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3106 {
3107 	va_list ap;
3108 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3109 		return;
3110 	}
3111 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3112 	va_start(ap, fmt);
3113 	vprintf(fmt, ap);
3114 	va_end(ap);
3115 	printf("\n");
3116 }
3117