xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 11f0b352e05306cf6f1f85e9087022c0a92624a3)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/ioccom.h>
34 #include <dev/isp/isp_ioctl.h>
35 
36 
37 int isp_announced = 0;
38 ispfwfunc *isp_get_firmware_p = NULL;
39 
40 static d_ioctl_t ispioctl;
41 static void isp_intr_enable(void *);
42 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
43 static void isp_poll(struct cam_sim *);
44 static timeout_t isp_watchdog;
45 static void isp_kthread(void *);
46 static void isp_action(struct cam_sim *, union ccb *);
47 
48 
49 #define ISP_CDEV_MAJOR	248
50 static struct cdevsw isp_cdevsw = {
51 	/* open */	nullopen,
52 	/* close */	nullclose,
53 	/* read */	noread,
54 	/* write */	nowrite,
55 	/* ioctl */	ispioctl,
56 	/* poll */	nopoll,
57 	/* mmap */	nommap,
58 	/* strategy */	nostrategy,
59 	/* name */	"isp",
60 	/* maj */	ISP_CDEV_MAJOR,
61 	/* dump */	nodump,
62 	/* psize */	nopsize,
63 	/* flags */	D_TAPE,
64 };
65 
66 static struct ispsoftc *isplist = NULL;
67 
68 void
69 isp_attach(struct ispsoftc *isp)
70 {
71 	int primary, secondary;
72 	struct ccb_setasync csa;
73 	struct cam_devq *devq;
74 	struct cam_sim *sim;
75 	struct cam_path *path;
76 
77 	/*
78 	 * Establish (in case of 12X0) which bus is the primary.
79 	 */
80 
81 	primary = 0;
82 	secondary = 1;
83 
84 	/*
85 	 * Create the device queue for our SIM(s).
86 	 */
87 	devq = cam_simq_alloc(isp->isp_maxcmds);
88 	if (devq == NULL) {
89 		return;
90 	}
91 
92 	/*
93 	 * Construct our SIM entry.
94 	 */
95 	ISPLOCK_2_CAMLOCK(isp);
96 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
97 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
98 	if (sim == NULL) {
99 		cam_simq_free(devq);
100 		CAMLOCK_2_ISPLOCK(isp);
101 		return;
102 	}
103 	CAMLOCK_2_ISPLOCK(isp);
104 
105 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
106 	isp->isp_osinfo.ehook.ich_arg = isp;
107 	ISPLOCK_2_CAMLOCK(isp);
108 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
109 		cam_sim_free(sim, TRUE);
110 		CAMLOCK_2_ISPLOCK(isp);
111 		isp_prt(isp, ISP_LOGERR,
112 		    "could not establish interrupt enable hook");
113 		return;
114 	}
115 
116 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
117 		cam_sim_free(sim, TRUE);
118 		CAMLOCK_2_ISPLOCK(isp);
119 		return;
120 	}
121 
122 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
123 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
124 		xpt_bus_deregister(cam_sim_path(sim));
125 		cam_sim_free(sim, TRUE);
126 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
127 		CAMLOCK_2_ISPLOCK(isp);
128 		return;
129 	}
130 
131 	xpt_setup_ccb(&csa.ccb_h, path, 5);
132 	csa.ccb_h.func_code = XPT_SASYNC_CB;
133 	csa.event_enable = AC_LOST_DEVICE;
134 	csa.callback = isp_cam_async;
135 	csa.callback_arg = sim;
136 	xpt_action((union ccb *)&csa);
137 	CAMLOCK_2_ISPLOCK(isp);
138 	isp->isp_sim = sim;
139 	isp->isp_path = path;
140 	/*
141 	 * Create a kernel thread for fibre channel instances. We
142 	 * don't have dual channel FC cards.
143 	 */
144 	if (IS_FC(isp)) {
145 		ISPLOCK_2_CAMLOCK(isp);
146 		/* XXX: LOCK VIOLATION */
147 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
148 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
149 		    RFHIGHPID, "%s: fc_thrd",
150 		    device_get_nameunit(isp->isp_dev))) {
151 			xpt_bus_deregister(cam_sim_path(sim));
152 			cam_sim_free(sim, TRUE);
153 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
154 			CAMLOCK_2_ISPLOCK(isp);
155 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
156 			return;
157 		}
158 		CAMLOCK_2_ISPLOCK(isp);
159 	}
160 
161 
162 	/*
163 	 * If we have a second channel, construct SIM entry for that.
164 	 */
165 	if (IS_DUALBUS(isp)) {
166 		ISPLOCK_2_CAMLOCK(isp);
167 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
168 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
169 		if (sim == NULL) {
170 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
171 			xpt_free_path(isp->isp_path);
172 			cam_simq_free(devq);
173 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
174 			return;
175 		}
176 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
177 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
178 			xpt_free_path(isp->isp_path);
179 			cam_sim_free(sim, TRUE);
180 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
181 			CAMLOCK_2_ISPLOCK(isp);
182 			return;
183 		}
184 
185 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
186 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
187 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
188 			xpt_free_path(isp->isp_path);
189 			xpt_bus_deregister(cam_sim_path(sim));
190 			cam_sim_free(sim, TRUE);
191 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
192 			CAMLOCK_2_ISPLOCK(isp);
193 			return;
194 		}
195 
196 		xpt_setup_ccb(&csa.ccb_h, path, 5);
197 		csa.ccb_h.func_code = XPT_SASYNC_CB;
198 		csa.event_enable = AC_LOST_DEVICE;
199 		csa.callback = isp_cam_async;
200 		csa.callback_arg = sim;
201 		xpt_action((union ccb *)&csa);
202 		CAMLOCK_2_ISPLOCK(isp);
203 		isp->isp_sim2 = sim;
204 		isp->isp_path2 = path;
205 	}
206 
207 #ifdef	ISP_TARGET_MODE
208 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
209 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
210 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
211 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
212 #endif
213 	/*
214 	 * Create device nodes
215 	 */
216 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
217 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
218 
219 	if (isp->isp_role != ISP_ROLE_NONE) {
220 		isp->isp_state = ISP_RUNSTATE;
221 		ENABLE_INTS(isp);
222 	}
223 	if (isplist == NULL) {
224 		isplist = isp;
225 	} else {
226 		struct ispsoftc *tmp = isplist;
227 		while (tmp->isp_osinfo.next) {
228 			tmp = tmp->isp_osinfo.next;
229 		}
230 		tmp->isp_osinfo.next = isp;
231 	}
232 
233 }
234 
235 static __inline void
236 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
237 {
238 	if (isp->isp_osinfo.simqfrozen == 0) {
239 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
240 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
241 		ISPLOCK_2_CAMLOCK(isp);
242 		xpt_freeze_simq(isp->isp_sim, 1);
243 		CAMLOCK_2_ISPLOCK(isp);
244 	} else {
245 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
246 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
247 	}
248 }
249 
250 static int
251 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
252 {
253 	struct ispsoftc *isp;
254 	int retval = ENOTTY;
255 
256 	isp = isplist;
257 	while (isp) {
258 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
259 			break;
260 		}
261 		isp = isp->isp_osinfo.next;
262 	}
263 	if (isp == NULL)
264 		return (ENXIO);
265 
266 	switch (cmd) {
267 #ifdef	ISP_FW_CRASH_DUMP
268 	case ISP_GET_FW_CRASH_DUMP:
269 	{
270 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
271 		size_t sz;
272 
273 		retval = 0;
274 		if (IS_2200(isp))
275 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
276 		else
277 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
278 		ISP_LOCK(isp);
279 		if (ptr && *ptr) {
280 			void *uaddr = *((void **) addr);
281 			if (copyout(ptr, uaddr, sz)) {
282 				retval = EFAULT;
283 			} else {
284 				*ptr = 0;
285 			}
286 		} else {
287 			retval = ENXIO;
288 		}
289 		ISP_UNLOCK(isp);
290 		break;
291 	}
292 
293 	case ISP_FORCE_CRASH_DUMP:
294 		ISP_LOCK(isp);
295 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
296 		isp_fw_dump(isp);
297 		isp_reinit(isp);
298 		ISP_UNLOCK(isp);
299 		retval = 0;
300 		break;
301 #endif
302 	case ISP_SDBLEV:
303 	{
304 		int olddblev = isp->isp_dblev;
305 		isp->isp_dblev = *(int *)addr;
306 		*(int *)addr = olddblev;
307 		retval = 0;
308 		break;
309 	}
310 	case ISP_RESETHBA:
311 		ISP_LOCK(isp);
312 		isp_reinit(isp);
313 		ISP_UNLOCK(isp);
314 		retval = 0;
315 		break;
316 	case ISP_RESCAN:
317 		if (IS_FC(isp)) {
318 			ISP_LOCK(isp);
319 			if (isp_fc_runstate(isp, 5 * 1000000)) {
320 				retval = EIO;
321 			} else {
322 				retval = 0;
323 			}
324 			ISP_UNLOCK(isp);
325 		}
326 		break;
327 	case ISP_FC_LIP:
328 		if (IS_FC(isp)) {
329 			ISP_LOCK(isp);
330 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
331 				retval = EIO;
332 			} else {
333 				retval = 0;
334 			}
335 			ISP_UNLOCK(isp);
336 		}
337 		break;
338 	case ISP_FC_GETDINFO:
339 	{
340 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
341 		struct lportdb *lp;
342 
343 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
344 			retval = EINVAL;
345 			break;
346 		}
347 		ISP_LOCK(isp);
348 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
349 		if (lp->valid) {
350 			ifc->loopid = lp->loopid;
351 			ifc->portid = lp->portid;
352 			ifc->node_wwn = lp->node_wwn;
353 			ifc->port_wwn = lp->port_wwn;
354 			retval = 0;
355 		} else {
356 			retval = ENODEV;
357 		}
358 		ISP_UNLOCK(isp);
359 		break;
360 	}
361 	case ISP_GET_STATS:
362 	{
363 		isp_stats_t *sp = (isp_stats_t *) addr;
364 
365 		MEMZERO(sp, sizeof (*sp));
366 		sp->isp_stat_version = ISP_STATS_VERSION;
367 		sp->isp_type = isp->isp_type;
368 		sp->isp_revision = isp->isp_revision;
369 		ISP_LOCK(isp);
370 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
371 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
372 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
373 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
374 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
375 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
376 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
377 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
378 		ISP_UNLOCK(isp);
379 		retval = 0;
380 		break;
381 	}
382 	case ISP_CLR_STATS:
383 		ISP_LOCK(isp);
384 		isp->isp_intcnt = 0;
385 		isp->isp_intbogus = 0;
386 		isp->isp_intmboxc = 0;
387 		isp->isp_intoasync = 0;
388 		isp->isp_rsltccmplt = 0;
389 		isp->isp_fphccmplt = 0;
390 		isp->isp_rscchiwater = 0;
391 		isp->isp_fpcchiwater = 0;
392 		ISP_UNLOCK(isp);
393 		retval = 0;
394 		break;
395 	case ISP_FC_GETHINFO:
396 	{
397 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
398 		MEMZERO(hba, sizeof (*hba));
399 		ISP_LOCK(isp);
400 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
401 		hba->fc_scsi_supported = 1;
402 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
403 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
404 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
405 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
406 		ISP_UNLOCK(isp);
407 		retval = 0;
408 		break;
409 	}
410 	case ISP_GET_FC_PARAM:
411 	{
412 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
413 
414 		if (!IS_FC(isp)) {
415 			retval = EINVAL;
416 			break;
417 		}
418 		f->parameter = 0;
419 		if (strcmp(f->param_name, "framelength") == 0) {
420 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
421 			retval = 0;
422 			break;
423 		}
424 		if (strcmp(f->param_name, "exec_throttle") == 0) {
425 			f->parameter = FCPARAM(isp)->isp_execthrottle;
426 			retval = 0;
427 			break;
428 		}
429 		if (strcmp(f->param_name, "fullduplex") == 0) {
430 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
431 				f->parameter = 1;
432 			retval = 0;
433 			break;
434 		}
435 		if (strcmp(f->param_name, "loopid") == 0) {
436 			f->parameter = FCPARAM(isp)->isp_loopid;
437 			retval = 0;
438 			break;
439 		}
440 		retval = EINVAL;
441 		break;
442 	}
443 	case ISP_SET_FC_PARAM:
444 	{
445 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
446 		u_int32_t param = f->parameter;
447 
448 		if (!IS_FC(isp)) {
449 			retval = EINVAL;
450 			break;
451 		}
452 		f->parameter = 0;
453 		if (strcmp(f->param_name, "framelength") == 0) {
454 			if (param != 512 && param != 1024 && param != 1024) {
455 				retval = EINVAL;
456 				break;
457 			}
458 			FCPARAM(isp)->isp_maxfrmlen = param;
459 			retval = 0;
460 			break;
461 		}
462 		if (strcmp(f->param_name, "exec_throttle") == 0) {
463 			if (param < 16 || param > 255) {
464 				retval = EINVAL;
465 				break;
466 			}
467 			FCPARAM(isp)->isp_execthrottle = param;
468 			retval = 0;
469 			break;
470 		}
471 		if (strcmp(f->param_name, "fullduplex") == 0) {
472 			if (param != 0 && param != 1) {
473 				retval = EINVAL;
474 				break;
475 			}
476 			if (param) {
477 				FCPARAM(isp)->isp_fwoptions |=
478 				    ICBOPT_FULL_DUPLEX;
479 			} else {
480 				FCPARAM(isp)->isp_fwoptions &=
481 				    ~ICBOPT_FULL_DUPLEX;
482 			}
483 			retval = 0;
484 			break;
485 		}
486 		if (strcmp(f->param_name, "loopid") == 0) {
487 			if (param < 0 || param > 125) {
488 				retval = EINVAL;
489 				break;
490 			}
491 			FCPARAM(isp)->isp_loopid = param;
492 			retval = 0;
493 			break;
494 		}
495 		retval = EINVAL;
496 		break;
497 	}
498 	default:
499 		break;
500 	}
501 	return (retval);
502 }
503 
504 static void
505 isp_intr_enable(void *arg)
506 {
507 	struct ispsoftc *isp = arg;
508 	if (isp->isp_role != ISP_ROLE_NONE) {
509 		ENABLE_INTS(isp);
510 		isp->isp_osinfo.intsok = 1;
511 	}
512 	/* Release our hook so that the boot can continue. */
513 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
514 }
515 
516 /*
517  * Put the target mode functions here, because some are inlines
518  */
519 
520 #ifdef	ISP_TARGET_MODE
521 
522 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
523 static __inline int are_any_luns_enabled(struct ispsoftc *, int);
524 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
525 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
526 static __inline int isp_psema_sig_rqe(struct ispsoftc *, int);
527 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
528 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int, int);
529 static __inline void isp_vsema_rqe(struct ispsoftc *, int);
530 static __inline atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
531 static cam_status
532 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
533 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
534 static void isp_en_lun(struct ispsoftc *, union ccb *);
535 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
536 static timeout_t isp_refire_putback_atio;
537 static void isp_complete_ctio(union ccb *);
538 static void isp_target_putback_atio(union ccb *);
539 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
540 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
541 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
542 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
543 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
544 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
545 
546 static __inline int
547 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
548 {
549 	tstate_t *tptr;
550 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
551 	if (tptr == NULL) {
552 		return (0);
553 	}
554 	do {
555 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
556 			return (1);
557 		}
558 	} while ((tptr = tptr->next) != NULL);
559 	return (0);
560 }
561 
562 static __inline int
563 are_any_luns_enabled(struct ispsoftc *isp, int port)
564 {
565 	int lo, hi;
566 	if (IS_DUALBUS(isp)) {
567 		lo = (port * (LUN_HASH_SIZE >> 1));
568 		hi = lo + (LUN_HASH_SIZE >> 1);
569 	} else {
570 		lo = 0;
571 		hi = LUN_HASH_SIZE;
572 	}
573 	for (lo = 0; lo < hi; lo++) {
574 		if (isp->isp_osinfo.lun_hash[lo]) {
575 			return (1);
576 		}
577 	}
578 	return (0);
579 }
580 
581 static __inline tstate_t *
582 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
583 {
584 	tstate_t *tptr = NULL;
585 
586 	if (lun == CAM_LUN_WILDCARD) {
587 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
588 			tptr = &isp->isp_osinfo.tsdflt[bus];
589 			tptr->hold++;
590 			return (tptr);
591 		}
592 	} else {
593 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
594 		if (tptr == NULL) {
595 			return (NULL);
596 		}
597 	}
598 
599 	do {
600 		if (tptr->lun == lun && tptr->bus == bus) {
601 			tptr->hold++;
602 			return (tptr);
603 		}
604 	} while ((tptr = tptr->next) != NULL);
605 	return (tptr);
606 }
607 
608 static __inline void
609 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
610 {
611 	if (tptr->hold)
612 		tptr->hold--;
613 }
614 
615 static __inline int
616 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
617 {
618 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
619 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
620 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
621 			return (-1);
622 		}
623 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
624 	}
625 	return (0);
626 }
627 
628 static __inline int
629 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
630 {
631 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
632 		return (-1);
633 	}
634 	return (0);
635 }
636 
637 static __inline void
638 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
639 {
640 	isp->isp_osinfo.rstatus[bus] = status;
641 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
642 }
643 
644 static __inline void
645 isp_vsema_rqe(struct ispsoftc *isp, int bus)
646 {
647 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
648 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
649 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
650 	}
651 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
652 }
653 
654 static __inline atio_private_data_t *
655 isp_get_atpd(struct ispsoftc *isp, int tag)
656 {
657 	atio_private_data_t *atp;
658 	for (atp = isp->isp_osinfo.atpdp;
659 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
660 		if (atp->tag == tag)
661 			return (atp);
662 	}
663 	return (NULL);
664 }
665 
666 static cam_status
667 create_lun_state(struct ispsoftc *isp, int bus,
668     struct cam_path *path, tstate_t **rslt)
669 {
670 	cam_status status;
671 	lun_id_t lun;
672 	int hfx;
673 	tstate_t *tptr, *new;
674 
675 	lun = xpt_path_lun_id(path);
676 	if (lun < 0) {
677 		return (CAM_LUN_INVALID);
678 	}
679 	if (is_lun_enabled(isp, bus, lun)) {
680 		return (CAM_LUN_ALRDY_ENA);
681 	}
682 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
683 	if (new == NULL) {
684 		return (CAM_RESRC_UNAVAIL);
685 	}
686 
687 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
688 	    xpt_path_target_id(path), xpt_path_lun_id(path));
689 	if (status != CAM_REQ_CMP) {
690 		free(new, M_DEVBUF);
691 		return (status);
692 	}
693 	new->bus = bus;
694 	new->lun = lun;
695 	SLIST_INIT(&new->atios);
696 	SLIST_INIT(&new->inots);
697 	new->hold = 1;
698 
699 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
700 	tptr = isp->isp_osinfo.lun_hash[hfx];
701 	if (tptr == NULL) {
702 		isp->isp_osinfo.lun_hash[hfx] = new;
703 	} else {
704 		while (tptr->next)
705 			tptr = tptr->next;
706 		tptr->next = new;
707 	}
708 	*rslt = new;
709 	return (CAM_REQ_CMP);
710 }
711 
712 static __inline void
713 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
714 {
715 	int hfx;
716 	tstate_t *lw, *pw;
717 
718 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
719 	if (tptr->hold) {
720 		return;
721 	}
722 	pw = isp->isp_osinfo.lun_hash[hfx];
723 	if (pw == NULL) {
724 		return;
725 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
726 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
727 	} else {
728 		lw = pw;
729 		pw = lw->next;
730 		while (pw) {
731 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
732 				lw->next = pw->next;
733 				break;
734 			}
735 			lw = pw;
736 			pw = pw->next;
737 		}
738 		if (pw == NULL) {
739 			return;
740 		}
741 	}
742 	free(tptr, M_DEVBUF);
743 }
744 
745 /*
746  * we enter with our locks held.
747  */
748 static void
749 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
750 {
751 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
752 	struct ccb_en_lun *cel = &ccb->cel;
753 	tstate_t *tptr;
754 	u_int16_t rstat;
755 	int bus, cmd, av, wildcard;
756 	lun_id_t lun;
757 	target_id_t tgt;
758 
759 
760 	bus = XS_CHANNEL(ccb) & 0x1;
761 	tgt = ccb->ccb_h.target_id;
762 	lun = ccb->ccb_h.target_lun;
763 
764 	/*
765 	 * Do some sanity checking first.
766 	 */
767 
768 	if ((lun != CAM_LUN_WILDCARD) &&
769 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
770 		ccb->ccb_h.status = CAM_LUN_INVALID;
771 		return;
772 	}
773 
774 	if (IS_SCSI(isp)) {
775 		sdparam *sdp = isp->isp_param;
776 		sdp += bus;
777 		if (tgt != CAM_TARGET_WILDCARD &&
778 		    tgt != sdp->isp_initiator_id) {
779 			ccb->ccb_h.status = CAM_TID_INVALID;
780 			return;
781 		}
782 	} else {
783 		if (tgt != CAM_TARGET_WILDCARD &&
784 		    tgt != FCPARAM(isp)->isp_iid) {
785 			ccb->ccb_h.status = CAM_TID_INVALID;
786 			return;
787 		}
788 		/*
789 		 * This is as a good a place as any to check f/w capabilities.
790 		 */
791 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
792 			isp_prt(isp, ISP_LOGERR,
793 			    "firmware does not support target mode");
794 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
795 			return;
796 		}
797 		/*
798 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
799 		 * XXX: dorks with our already fragile enable/disable code.
800 		 */
801 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
802 			isp_prt(isp, ISP_LOGERR,
803 			    "firmware not SCCLUN capable");
804 		}
805 	}
806 
807 	if (tgt == CAM_TARGET_WILDCARD) {
808 		if (lun == CAM_LUN_WILDCARD) {
809 			wildcard = 1;
810 		} else {
811 			ccb->ccb_h.status = CAM_LUN_INVALID;
812 			return;
813 		}
814 	} else {
815 		wildcard = 0;
816 	}
817 
818 	/*
819 	 * Next check to see whether this is a target/lun wildcard action.
820 	 *
821 	 * If so, we know that we can accept commands for luns that haven't
822 	 * been enabled yet and send them upstream. Otherwise, we have to
823 	 * handle them locally (if we see them at all).
824 	 */
825 
826 	if (wildcard) {
827 		tptr = &isp->isp_osinfo.tsdflt[bus];
828 		if (cel->enable) {
829 			if (isp->isp_osinfo.tmflags[bus] &
830 			    TM_WILDCARD_ENABLED) {
831 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
832 				return;
833 			}
834 			ccb->ccb_h.status =
835 			    xpt_create_path(&tptr->owner, NULL,
836 			    xpt_path_path_id(ccb->ccb_h.path),
837 			    xpt_path_target_id(ccb->ccb_h.path),
838 			    xpt_path_lun_id(ccb->ccb_h.path));
839 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
840 				return;
841 			}
842 			SLIST_INIT(&tptr->atios);
843 			SLIST_INIT(&tptr->inots);
844 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
845 		} else {
846 			if ((isp->isp_osinfo.tmflags[bus] &
847 			    TM_WILDCARD_ENABLED) == 0) {
848 				ccb->ccb_h.status = CAM_REQ_CMP;
849 				return;
850 			}
851 			if (tptr->hold) {
852 				ccb->ccb_h.status = CAM_SCSI_BUSY;
853 				return;
854 			}
855 			xpt_free_path(tptr->owner);
856 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
857 		}
858 	}
859 
860 	/*
861 	 * Now check to see whether this bus needs to be
862 	 * enabled/disabled with respect to target mode.
863 	 */
864 	av = bus << 31;
865 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
866 		av |= ENABLE_TARGET_FLAG;
867 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
868 		if (av) {
869 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
870 			if (wildcard) {
871 				isp->isp_osinfo.tmflags[bus] &=
872 				    ~TM_WILDCARD_ENABLED;
873 				xpt_free_path(tptr->owner);
874 			}
875 			return;
876 		}
877 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
878 		isp_prt(isp, ISP_LOGINFO,
879 		    "Target Mode enabled on channel %d", bus);
880 	} else if (cel->enable == 0 &&
881 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
882 		if (are_any_luns_enabled(isp, bus)) {
883 			ccb->ccb_h.status = CAM_SCSI_BUSY;
884 			return;
885 		}
886 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
887 		if (av) {
888 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
889 			return;
890 		}
891 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
892 		isp_prt(isp, ISP_LOGINFO,
893 		    "Target Mode disabled on channel %d", bus);
894 	}
895 
896 	if (wildcard) {
897 		ccb->ccb_h.status = CAM_REQ_CMP;
898 		return;
899 	}
900 
901 	if (cel->enable) {
902 		ccb->ccb_h.status =
903 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
904 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
905 			return;
906 		}
907 	} else {
908 		tptr = get_lun_statep(isp, bus, lun);
909 		if (tptr == NULL) {
910 			ccb->ccb_h.status = CAM_LUN_INVALID;
911 			return;
912 		}
913 	}
914 
915 	if (isp_psema_sig_rqe(isp, bus)) {
916 		rls_lun_statep(isp, tptr);
917 		if (cel->enable)
918 			destroy_lun_state(isp, tptr);
919 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
920 		return;
921 	}
922 
923 	if (cel->enable) {
924 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
925 		int c, n, ulun = lun;
926 
927 		cmd = RQSTYPE_ENABLE_LUN;
928 		c = DFLT_CMND_CNT;
929 		n = DFLT_INOT_CNT;
930 		if (IS_FC(isp) && lun != 0) {
931 			cmd = RQSTYPE_MODIFY_LUN;
932 			n = 0;
933 			/*
934 		 	 * For SCC firmware, we only deal with setting
935 			 * (enabling or modifying) lun 0.
936 			 */
937 			ulun = 0;
938 		}
939 		rstat = LUN_ERR;
940 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
941 			xpt_print_path(ccb->ccb_h.path);
942 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
943 			goto out;
944 		}
945 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
946 			xpt_print_path(ccb->ccb_h.path);
947 			isp_prt(isp, ISP_LOGERR,
948 			    "wait for ENABLE/MODIFY LUN timed out");
949 			goto out;
950 		}
951 		rstat = isp->isp_osinfo.rstatus[bus];
952 		if (rstat != LUN_OK) {
953 			xpt_print_path(ccb->ccb_h.path);
954 			isp_prt(isp, ISP_LOGERR,
955 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
956 			goto out;
957 		}
958 	} else {
959 		int c, n, ulun = lun;
960 		u_int32_t seq;
961 
962 		rstat = LUN_ERR;
963 		seq = isp->isp_osinfo.rollinfo++;
964 		cmd = -RQSTYPE_MODIFY_LUN;
965 
966 		c = DFLT_CMND_CNT;
967 		n = DFLT_INOT_CNT;
968 		if (IS_FC(isp) && lun != 0) {
969 			n = 0;
970 			/*
971 		 	 * For SCC firmware, we only deal with setting
972 			 * (enabling or modifying) lun 0.
973 			 */
974 			ulun = 0;
975 		}
976 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
977 			xpt_print_path(ccb->ccb_h.path);
978 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
979 			goto out;
980 		}
981 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
982 			xpt_print_path(ccb->ccb_h.path);
983 			isp_prt(isp, ISP_LOGERR,
984 			    "wait for MODIFY LUN timed out");
985 			goto out;
986 		}
987 		rstat = isp->isp_osinfo.rstatus[bus];
988 		if (rstat != LUN_OK) {
989 			xpt_print_path(ccb->ccb_h.path);
990 			isp_prt(isp, ISP_LOGERR,
991 			    "MODIFY LUN returned 0x%x", rstat);
992 			goto out;
993 		}
994 		if (IS_FC(isp) && lun) {
995 			goto out;
996 		}
997 
998 		seq = isp->isp_osinfo.rollinfo++;
999 
1000 		rstat = LUN_ERR;
1001 		cmd = -RQSTYPE_ENABLE_LUN;
1002 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1003 			xpt_print_path(ccb->ccb_h.path);
1004 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1005 			goto out;
1006 		}
1007 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1008 			xpt_print_path(ccb->ccb_h.path);
1009 			isp_prt(isp, ISP_LOGERR,
1010 			     "wait for DISABLE LUN timed out");
1011 			goto out;
1012 		}
1013 		rstat = isp->isp_osinfo.rstatus[bus];
1014 		if (rstat != LUN_OK) {
1015 			xpt_print_path(ccb->ccb_h.path);
1016 			isp_prt(isp, ISP_LOGWARN,
1017 			    "DISABLE LUN returned 0x%x", rstat);
1018 			goto out;
1019 		}
1020 		if (are_any_luns_enabled(isp, bus) == 0) {
1021 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1022 			if (av) {
1023 				isp_prt(isp, ISP_LOGWARN,
1024 				    "disable target mode on channel %d failed",
1025 				    bus);
1026 				goto out;
1027 			}
1028 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1029 			xpt_print_path(ccb->ccb_h.path);
1030 			isp_prt(isp, ISP_LOGINFO,
1031 			    "Target Mode disabled on channel %d", bus);
1032 		}
1033 	}
1034 
1035 out:
1036 	isp_vsema_rqe(isp, bus);
1037 
1038 	if (rstat != LUN_OK) {
1039 		xpt_print_path(ccb->ccb_h.path);
1040 		isp_prt(isp, ISP_LOGWARN,
1041 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1042 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1043 		rls_lun_statep(isp, tptr);
1044 		if (cel->enable)
1045 			destroy_lun_state(isp, tptr);
1046 	} else {
1047 		xpt_print_path(ccb->ccb_h.path);
1048 		isp_prt(isp, ISP_LOGINFO, lfmt,
1049 		    (cel->enable) ? "en" : "dis", bus);
1050 		rls_lun_statep(isp, tptr);
1051 		if (cel->enable == 0) {
1052 			destroy_lun_state(isp, tptr);
1053 		}
1054 		ccb->ccb_h.status = CAM_REQ_CMP;
1055 	}
1056 }
1057 
1058 static cam_status
1059 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1060 {
1061 	tstate_t *tptr;
1062 	struct ccb_hdr_slist *lp;
1063 	struct ccb_hdr *curelm;
1064 	int found;
1065 	union ccb *accb = ccb->cab.abort_ccb;
1066 
1067 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1068 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1069 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1070 			return (CAM_PATH_INVALID);
1071 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1072 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1073 			return (CAM_PATH_INVALID);
1074 		}
1075 	}
1076 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1077 	if (tptr == NULL) {
1078 		return (CAM_PATH_INVALID);
1079 	}
1080 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1081 		lp = &tptr->atios;
1082 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1083 		lp = &tptr->inots;
1084 	} else {
1085 		rls_lun_statep(isp, tptr);
1086 		return (CAM_UA_ABORT);
1087 	}
1088 	curelm = SLIST_FIRST(lp);
1089 	found = 0;
1090 	if (curelm == &accb->ccb_h) {
1091 		found = 1;
1092 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1093 	} else {
1094 		while(curelm != NULL) {
1095 			struct ccb_hdr *nextelm;
1096 
1097 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1098 			if (nextelm == &accb->ccb_h) {
1099 				found = 1;
1100 				SLIST_NEXT(curelm, sim_links.sle) =
1101 				    SLIST_NEXT(nextelm, sim_links.sle);
1102 				break;
1103 			}
1104 			curelm = nextelm;
1105 		}
1106 	}
1107 	rls_lun_statep(isp, tptr);
1108 	if (found) {
1109 		accb->ccb_h.status = CAM_REQ_ABORTED;
1110 		return (CAM_REQ_CMP);
1111 	}
1112 	return(CAM_PATH_INVALID);
1113 }
1114 
1115 static cam_status
1116 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1117 {
1118 	void *qe;
1119 	struct ccb_scsiio *cso = &ccb->csio;
1120 	u_int16_t *hp, save_handle;
1121 	u_int16_t nxti, optr;
1122 	u_int8_t local[QENTRY_LEN];
1123 
1124 
1125 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1126 		xpt_print_path(ccb->ccb_h.path);
1127 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1128 		return (CAM_RESRC_UNAVAIL);
1129 	}
1130 	bzero(local, QENTRY_LEN);
1131 
1132 	/*
1133 	 * We're either moving data or completing a command here.
1134 	 */
1135 
1136 	if (IS_FC(isp)) {
1137 		atio_private_data_t *atp;
1138 		ct2_entry_t *cto = (ct2_entry_t *) local;
1139 
1140 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1141 		cto->ct_header.rqs_entry_count = 1;
1142 		cto->ct_iid = cso->init_id;
1143 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1144 			cto->ct_lun = ccb->ccb_h.target_lun;
1145 		}
1146 
1147 		atp = isp_get_atpd(isp, cso->tag_id);
1148 		if (atp == NULL) {
1149 			isp_prt(isp, ISP_LOGERR,
1150 			    "cannot find private data adjunct for tag %x",
1151 			    cso->tag_id);
1152 			return (-1);
1153 		}
1154 
1155 		cto->ct_rxid = cso->tag_id;
1156 		if (cso->dxfer_len == 0) {
1157 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1158 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1159 				cto->ct_flags |= CT2_SENDSTATUS;
1160 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1161 				cto->ct_resid =
1162 				    atp->orig_datalen - atp->bytes_xfered;
1163 				if (cto->ct_resid < 0) {
1164 					cto->rsp.m1.ct_scsi_status |=
1165 					    CT2_DATA_OVER;
1166 				} else if (cto->ct_resid > 0) {
1167 					cto->rsp.m1.ct_scsi_status |=
1168 					    CT2_DATA_UNDER;
1169 				}
1170 			}
1171 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1172 				int m = min(cso->sense_len, MAXRESPLEN);
1173 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1174 				cto->rsp.m1.ct_senselen = m;
1175 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1176 			}
1177 		} else {
1178 			cto->ct_flags |= CT2_FLAG_MODE0;
1179 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1180 				cto->ct_flags |= CT2_DATA_IN;
1181 			} else {
1182 				cto->ct_flags |= CT2_DATA_OUT;
1183 			}
1184 			cto->ct_reloff = atp->bytes_xfered;
1185 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1186 				cto->ct_flags |= CT2_SENDSTATUS;
1187 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1188 				cto->ct_resid =
1189 				    atp->orig_datalen -
1190 				    (atp->bytes_xfered + cso->dxfer_len);
1191 				if (cto->ct_resid < 0) {
1192 					cto->rsp.m0.ct_scsi_status |=
1193 					    CT2_DATA_OVER;
1194 				} else if (cto->ct_resid > 0) {
1195 					cto->rsp.m0.ct_scsi_status |=
1196 					    CT2_DATA_UNDER;
1197 				}
1198 			} else {
1199 				atp->last_xframt = cso->dxfer_len;
1200 			}
1201 			/*
1202 			 * If we're sending data and status back together,
1203 			 * we can't also send back sense data as well.
1204 			 */
1205 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1206 		}
1207 
1208 		if (cto->ct_flags & CT2_SENDSTATUS) {
1209 			isp_prt(isp, ISP_LOGTDEBUG0,
1210 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1211 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1212 			    cso->dxfer_len, cto->ct_resid);
1213 			cto->ct_flags |= CT2_CCINCR;
1214 			atp->state = ATPD_STATE_LAST_CTIO;
1215 		} else
1216 			atp->state = ATPD_STATE_CTIO;
1217 		cto->ct_timeout = 10;
1218 		hp = &cto->ct_syshandle;
1219 	} else {
1220 		ct_entry_t *cto = (ct_entry_t *) local;
1221 
1222 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1223 		cto->ct_header.rqs_entry_count = 1;
1224 		cto->ct_iid = cso->init_id;
1225 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1226 		cto->ct_tgt = ccb->ccb_h.target_id;
1227 		cto->ct_lun = ccb->ccb_h.target_lun;
1228 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1229 		if (AT_HAS_TAG(cso->tag_id)) {
1230 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1231 			cto->ct_flags |= CT_TQAE;
1232 		}
1233 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1234 			cto->ct_flags |= CT_NODISC;
1235 		}
1236 		if (cso->dxfer_len == 0) {
1237 			cto->ct_flags |= CT_NO_DATA;
1238 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1239 			cto->ct_flags |= CT_DATA_IN;
1240 		} else {
1241 			cto->ct_flags |= CT_DATA_OUT;
1242 		}
1243 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1244 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1245 			cto->ct_scsi_status = cso->scsi_status;
1246 			cto->ct_resid = cso->resid;
1247 			isp_prt(isp, ISP_LOGTDEBUG0,
1248 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1249 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1250 			    cso->tag_id);
1251 		}
1252 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1253 		cto->ct_timeout = 10;
1254 		hp = &cto->ct_syshandle;
1255 	}
1256 
1257 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1258 		xpt_print_path(ccb->ccb_h.path);
1259 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1260 		return (CAM_RESRC_UNAVAIL);
1261 	}
1262 
1263 
1264 	/*
1265 	 * Call the dma setup routines for this entry (and any subsequent
1266 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1267 	 * new things to play with. As with isp_start's usage of DMA setup,
1268 	 * any swizzling is done in the machine dependent layer. Because
1269 	 * of this, we put the request onto the queue area first in native
1270 	 * format.
1271 	 */
1272 
1273 	save_handle = *hp;
1274 
1275 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1276 	case CMD_QUEUED:
1277 		ISP_ADD_REQUEST(isp, nxti);
1278 		return (CAM_REQ_INPROG);
1279 
1280 	case CMD_EAGAIN:
1281 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1282 		isp_destroy_handle(isp, save_handle);
1283 		return (CAM_RESRC_UNAVAIL);
1284 
1285 	default:
1286 		isp_destroy_handle(isp, save_handle);
1287 		return (XS_ERR(ccb));
1288 	}
1289 }
1290 
1291 static void
1292 isp_refire_putback_atio(void *arg)
1293 {
1294 	int s = splcam();
1295 	isp_target_putback_atio(arg);
1296 	splx(s);
1297 }
1298 
1299 static void
1300 isp_target_putback_atio(union ccb *ccb)
1301 {
1302 	struct ispsoftc *isp;
1303 	struct ccb_scsiio *cso;
1304 	u_int16_t nxti, optr;
1305 	void *qe;
1306 
1307 	isp = XS_ISP(ccb);
1308 
1309 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1310 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1311 		isp_prt(isp, ISP_LOGWARN,
1312 		    "isp_target_putback_atio: Request Queue Overflow");
1313 		return;
1314 	}
1315 	bzero(qe, QENTRY_LEN);
1316 	cso = &ccb->csio;
1317 	if (IS_FC(isp)) {
1318 		at2_entry_t local, *at = &local;
1319 		MEMZERO(at, sizeof (at2_entry_t));
1320 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1321 		at->at_header.rqs_entry_count = 1;
1322 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1323 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1324 		} else {
1325 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1326 		}
1327 		at->at_status = CT_OK;
1328 		at->at_rxid = cso->tag_id;
1329 		at->at_iid = cso->ccb_h.target_id;
1330 		isp_put_atio2(isp, at, qe);
1331 	} else {
1332 		at_entry_t local, *at = &local;
1333 		MEMZERO(at, sizeof (at_entry_t));
1334 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1335 		at->at_header.rqs_entry_count = 1;
1336 		at->at_iid = cso->init_id;
1337 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1338 		at->at_tgt = cso->ccb_h.target_id;
1339 		at->at_lun = cso->ccb_h.target_lun;
1340 		at->at_status = CT_OK;
1341 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1342 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1343 		isp_put_atio(isp, at, qe);
1344 	}
1345 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1346 	ISP_ADD_REQUEST(isp, nxti);
1347 	isp_complete_ctio(ccb);
1348 }
1349 
1350 static void
1351 isp_complete_ctio(union ccb *ccb)
1352 {
1353 	struct ispsoftc *isp = XS_ISP(ccb);
1354 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1355 		ccb->ccb_h.status |= CAM_REQ_CMP;
1356 	}
1357 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1358 	xpt_done(ccb);
1359 }
1360 
1361 /*
1362  * Handle ATIO stuff that the generic code can't.
1363  * This means handling CDBs.
1364  */
1365 
1366 static int
1367 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1368 {
1369 	tstate_t *tptr;
1370 	int status, bus, iswildcard;
1371 	struct ccb_accept_tio *atiop;
1372 
1373 	/*
1374 	 * The firmware status (except for the QLTM_SVALID bit)
1375 	 * indicates why this ATIO was sent to us.
1376 	 *
1377 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1378 	 *
1379 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1380 	 * we're still connected on the SCSI bus.
1381 	 */
1382 	status = aep->at_status;
1383 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1384 		/*
1385 		 * Bus Phase Sequence error. We should have sense data
1386 		 * suggested by the f/w. I'm not sure quite yet what
1387 		 * to do about this for CAM.
1388 		 */
1389 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1390 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1391 		return (0);
1392 	}
1393 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1394 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1395 		    status);
1396 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1397 		return (0);
1398 	}
1399 
1400 	bus = GET_BUS_VAL(aep->at_iid);
1401 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1402 	if (tptr == NULL) {
1403 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1404 		iswildcard = 1;
1405 	} else {
1406 		iswildcard = 0;
1407 	}
1408 
1409 	if (tptr == NULL) {
1410 		/*
1411 		 * Because we can't autofeed sense data back with
1412 		 * a command for parallel SCSI, we can't give back
1413 		 * a CHECK CONDITION. We'll give back a BUSY status
1414 		 * instead. This works out okay because the only
1415 		 * time we should, in fact, get this, is in the
1416 		 * case that somebody configured us without the
1417 		 * blackhole driver, so they get what they deserve.
1418 		 */
1419 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1420 		return (0);
1421 	}
1422 
1423 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1424 	if (atiop == NULL) {
1425 		/*
1426 		 * Because we can't autofeed sense data back with
1427 		 * a command for parallel SCSI, we can't give back
1428 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1429 		 * instead. This works out okay because the only time we
1430 		 * should, in fact, get this, is in the case that we've
1431 		 * run out of ATIOS.
1432 		 */
1433 		xpt_print_path(tptr->owner);
1434 		isp_prt(isp, ISP_LOGWARN,
1435 		    "no ATIOS for lun %d from initiator %d on channel %d",
1436 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1437 		if (aep->at_flags & AT_TQAE)
1438 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1439 		else
1440 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1441 		rls_lun_statep(isp, tptr);
1442 		return (0);
1443 	}
1444 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1445 	if (iswildcard) {
1446 		atiop->ccb_h.target_id = aep->at_tgt;
1447 		atiop->ccb_h.target_lun = aep->at_lun;
1448 	}
1449 	if (aep->at_flags & AT_NODISC) {
1450 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1451 	} else {
1452 		atiop->ccb_h.flags = 0;
1453 	}
1454 
1455 	if (status & QLTM_SVALID) {
1456 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1457 		atiop->sense_len = amt;
1458 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1459 	} else {
1460 		atiop->sense_len = 0;
1461 	}
1462 
1463 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1464 	atiop->cdb_len = aep->at_cdblen;
1465 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1466 	atiop->ccb_h.status = CAM_CDB_RECVD;
1467 	/*
1468 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1469 	 * and the handle (which we have to preserve).
1470 	 */
1471 	AT_MAKE_TAGID(atiop->tag_id, aep);
1472 	if (aep->at_flags & AT_TQAE) {
1473 		atiop->tag_action = aep->at_tag_type;
1474 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1475 	}
1476 	xpt_done((union ccb*)atiop);
1477 	isp_prt(isp, ISP_LOGTDEBUG0,
1478 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1479 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1480 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1481 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1482 	    "nondisc" : "disconnecting");
1483 	rls_lun_statep(isp, tptr);
1484 	return (0);
1485 }
1486 
1487 static int
1488 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1489 {
1490 	lun_id_t lun;
1491 	tstate_t *tptr;
1492 	struct ccb_accept_tio *atiop;
1493 	atio_private_data_t *atp;
1494 
1495 	/*
1496 	 * The firmware status (except for the QLTM_SVALID bit)
1497 	 * indicates why this ATIO was sent to us.
1498 	 *
1499 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1500 	 */
1501 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1502 		isp_prt(isp, ISP_LOGWARN,
1503 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1504 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1505 		return (0);
1506 	}
1507 
1508 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1509 		lun = aep->at_scclun;
1510 	} else {
1511 		lun = aep->at_lun;
1512 	}
1513 	tptr = get_lun_statep(isp, 0, lun);
1514 	if (tptr == NULL) {
1515 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1516 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1517 	}
1518 
1519 	if (tptr == NULL) {
1520 		/*
1521 		 * What we'd like to know is whether or not we have a listener
1522 		 * upstream that really hasn't configured yet. If we do, then
1523 		 * we can give a more sensible reply here. If not, then we can
1524 		 * reject this out of hand.
1525 		 *
1526 		 * Choices for what to send were
1527 		 *
1528                  *	Not Ready, Unit Not Self-Configured Yet
1529 		 *	(0x2,0x3e,0x00)
1530 		 *
1531 		 * for the former and
1532 		 *
1533 		 *	Illegal Request, Logical Unit Not Supported
1534 		 *	(0x5,0x25,0x00)
1535 		 *
1536 		 * for the latter.
1537 		 *
1538 		 * We used to decide whether there was at least one listener
1539 		 * based upon whether the black hole driver was configured.
1540 		 * However, recent config(8) changes have made this hard to do
1541 		 * at this time.
1542 		 *
1543 		 */
1544 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1545 		return (0);
1546 	}
1547 
1548 	atp = isp_get_atpd(isp, 0);
1549 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1550 	if (atiop == NULL || atp == NULL) {
1551 		/*
1552 		 * Because we can't autofeed sense data back with
1553 		 * a command for parallel SCSI, we can't give back
1554 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1555 		 * instead. This works out okay because the only time we
1556 		 * should, in fact, get this, is in the case that we've
1557 		 * run out of ATIOS.
1558 		 */
1559 		xpt_print_path(tptr->owner);
1560 		isp_prt(isp, ISP_LOGWARN,
1561 		    "no %s for lun %d from initiator %d",
1562 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1563 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1564 		rls_lun_statep(isp, tptr);
1565 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1566 		return (0);
1567 	}
1568 	atp->state = ATPD_STATE_ATIO;
1569 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1570 	tptr->atio_count--;
1571 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1572 	    lun, tptr->atio_count);
1573 
1574 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1575 		atiop->ccb_h.target_id =
1576 		    ((fcparam *)isp->isp_param)->isp_loopid;
1577 		atiop->ccb_h.target_lun = lun;
1578 	}
1579 	/*
1580 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1581 	 */
1582 	atiop->sense_len = 0;
1583 
1584 	atiop->init_id = aep->at_iid;
1585 	atiop->cdb_len = ATIO2_CDBLEN;
1586 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1587 	atiop->ccb_h.status = CAM_CDB_RECVD;
1588 	atiop->tag_id = aep->at_rxid;
1589 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1590 	case ATIO2_TC_ATTR_SIMPLEQ:
1591 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1592 		break;
1593         case ATIO2_TC_ATTR_HEADOFQ:
1594 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1595 		break;
1596         case ATIO2_TC_ATTR_ORDERED:
1597 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1598 		break;
1599         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1600 	case ATIO2_TC_ATTR_UNTAGGED:
1601 	default:
1602 		atiop->tag_action = 0;
1603 		break;
1604 	}
1605 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1606 
1607 	atp->tag = atiop->tag_id;
1608 	atp->lun = lun;
1609 	atp->orig_datalen = aep->at_datalen;
1610 	atp->last_xframt = 0;
1611 	atp->bytes_xfered = 0;
1612 	atp->state = ATPD_STATE_CAM;
1613 	xpt_done((union ccb*)atiop);
1614 
1615 	isp_prt(isp, ISP_LOGTDEBUG0,
1616 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1617 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1618 	    lun, aep->at_taskflags, aep->at_datalen);
1619 	rls_lun_statep(isp, tptr);
1620 	return (0);
1621 }
1622 
1623 static int
1624 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1625 {
1626 	union ccb *ccb;
1627 	int sentstatus, ok, notify_cam, resid = 0;
1628 	u_int16_t tval;
1629 
1630 	/*
1631 	 * CTIO and CTIO2 are close enough....
1632 	 */
1633 
1634 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1635 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1636 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1637 
1638 	if (IS_FC(isp)) {
1639 		ct2_entry_t *ct = arg;
1640 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1641 		if (atp == NULL) {
1642 			isp_prt(isp, ISP_LOGERR,
1643 			    "cannot find adjunct for %x after I/O",
1644 			    ct->ct_rxid);
1645 			return (0);
1646 		}
1647 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1648 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1649 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1650 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1651 		}
1652 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1653 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1654 			resid = ct->ct_resid;
1655 			atp->bytes_xfered += (atp->last_xframt - resid);
1656 			atp->last_xframt = 0;
1657 		}
1658 		if (sentstatus || !ok) {
1659 			atp->tag = 0;
1660 		}
1661 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1662 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1663 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1664 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1665 		    resid, sentstatus? "FIN" : "MID");
1666 		tval = ct->ct_rxid;
1667 
1668 		/* XXX: should really come after isp_complete_ctio */
1669 		atp->state = ATPD_STATE_PDON;
1670 	} else {
1671 		ct_entry_t *ct = arg;
1672 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1673 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1674 		/*
1675 		 * We *ought* to be able to get back to the original ATIO
1676 		 * here, but for some reason this gets lost. It's just as
1677 		 * well because it's squirrelled away as part of periph
1678 		 * private data.
1679 		 *
1680 		 * We can live without it as long as we continue to use
1681 		 * the auto-replenish feature for CTIOs.
1682 		 */
1683 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1684 		if (ct->ct_status & QLTM_SVALID) {
1685 			char *sp = (char *)ct;
1686 			sp += CTIO_SENSE_OFFSET;
1687 			ccb->csio.sense_len =
1688 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1689 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1690 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1691 		}
1692 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1693 			resid = ct->ct_resid;
1694 		}
1695 		isp_prt(isp, ISP_LOGTDEBUG0,
1696 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1697 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1698 		    ct->ct_status, ct->ct_flags, resid,
1699 		    sentstatus? "FIN" : "MID");
1700 		tval = ct->ct_fwhandle;
1701 	}
1702 	ccb->csio.resid += resid;
1703 
1704 	/*
1705 	 * We're here either because intermediate data transfers are done
1706 	 * and/or the final status CTIO (which may have joined with a
1707 	 * Data Transfer) is done.
1708 	 *
1709 	 * In any case, for this platform, the upper layers figure out
1710 	 * what to do next, so all we do here is collect status and
1711 	 * pass information along. Any DMA handles have already been
1712 	 * freed.
1713 	 */
1714 	if (notify_cam == 0) {
1715 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1716 		return (0);
1717 	}
1718 
1719 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1720 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1721 
1722 	if (!ok) {
1723 		isp_target_putback_atio(ccb);
1724 	} else {
1725 		isp_complete_ctio(ccb);
1726 
1727 	}
1728 	return (0);
1729 }
1730 
1731 static int
1732 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1733 {
1734 	return (0);	/* XXXX */
1735 }
1736 
1737 static int
1738 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1739 {
1740 
1741 	switch (inp->in_status) {
1742 	case IN_PORT_LOGOUT:
1743 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1744 		   inp->in_iid);
1745 		break;
1746 	case IN_PORT_CHANGED:
1747 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1748 		   inp->in_iid);
1749 		break;
1750 	case IN_GLOBAL_LOGO:
1751 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1752 		break;
1753 	case IN_ABORT_TASK:
1754 	{
1755 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1756 		struct ccb_immed_notify *inot = NULL;
1757 
1758 		if (atp) {
1759 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1760 			if (tptr) {
1761 				inot = (struct ccb_immed_notify *)
1762 				    SLIST_FIRST(&tptr->inots);
1763 				if (inot) {
1764 					SLIST_REMOVE_HEAD(&tptr->inots,
1765 					    sim_links.sle);
1766 				}
1767 			}
1768 			isp_prt(isp, ISP_LOGWARN,
1769 			   "abort task RX_ID %x IID %d state %d",
1770 			   inp->in_seqid, inp->in_iid, atp->state);
1771 		} else {
1772 			isp_prt(isp, ISP_LOGWARN,
1773 			   "abort task RX_ID %x from iid %d, state unknown",
1774 			   inp->in_seqid, inp->in_iid);
1775 		}
1776 		if (inot) {
1777 			inot->initiator_id = inp->in_iid;
1778 			inot->sense_len = 0;
1779 			inot->message_args[0] = MSG_ABORT_TAG;
1780 			inot->message_args[1] = inp->in_seqid & 0xff;
1781 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1782 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1783 			xpt_done((union ccb *)inot);
1784 		}
1785 		break;
1786 	}
1787 	default:
1788 		break;
1789 	}
1790 	return (0);
1791 }
1792 #endif
1793 
1794 static void
1795 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1796 {
1797 	struct cam_sim *sim;
1798 	struct ispsoftc *isp;
1799 
1800 	sim = (struct cam_sim *)cbarg;
1801 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1802 	switch (code) {
1803 	case AC_LOST_DEVICE:
1804 		if (IS_SCSI(isp)) {
1805 			u_int16_t oflags, nflags;
1806 			sdparam *sdp = isp->isp_param;
1807 			int tgt;
1808 
1809 			tgt = xpt_path_target_id(path);
1810 			ISP_LOCK(isp);
1811 			sdp += cam_sim_bus(sim);
1812 			nflags = sdp->isp_devparam[tgt].nvrm_flags;
1813 #ifndef	ISP_TARGET_MODE
1814 			nflags &= DPARM_SAFE_DFLT;
1815 			if (isp->isp_loaded_fw) {
1816 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1817 			}
1818 #else
1819 			nflags = DPARM_DEFAULT;
1820 #endif
1821 			oflags = sdp->isp_devparam[tgt].goal_flags;
1822 			sdp->isp_devparam[tgt].goal_flags = nflags;
1823 			sdp->isp_devparam[tgt].dev_update = 1;
1824 			isp->isp_update |= (1 << cam_sim_bus(sim));
1825 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1826 			sdp->isp_devparam[tgt].goal_flags = oflags;
1827 			ISP_UNLOCK(isp);
1828 		}
1829 		break;
1830 	default:
1831 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1832 		break;
1833 	}
1834 }
1835 
1836 static void
1837 isp_poll(struct cam_sim *sim)
1838 {
1839 	struct ispsoftc *isp = cam_sim_softc(sim);
1840 	u_int16_t isr, sema, mbox;
1841 
1842 	ISP_LOCK(isp);
1843 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1844 		isp_intr(isp, isr, sema, mbox);
1845 	}
1846 	ISP_UNLOCK(isp);
1847 }
1848 
1849 
1850 static void
1851 isp_watchdog(void *arg)
1852 {
1853 	XS_T *xs = arg;
1854 	struct ispsoftc *isp = XS_ISP(xs);
1855 	u_int32_t handle;
1856 	int iok;
1857 
1858 	/*
1859 	 * We've decided this command is dead. Make sure we're not trying
1860 	 * to kill a command that's already dead by getting it's handle and
1861 	 * and seeing whether it's still alive.
1862 	 */
1863 	ISP_LOCK(isp);
1864 	iok = isp->isp_osinfo.intsok;
1865 	isp->isp_osinfo.intsok = 0;
1866 	handle = isp_find_handle(isp, xs);
1867 	if (handle) {
1868 		u_int16_t isr, sema, mbox;
1869 
1870 		if (XS_CMD_DONE_P(xs)) {
1871 			isp_prt(isp, ISP_LOGDEBUG1,
1872 			    "watchdog found done cmd (handle 0x%x)", handle);
1873 			ISP_UNLOCK(isp);
1874 			return;
1875 		}
1876 
1877 		if (XS_CMD_WDOG_P(xs)) {
1878 			isp_prt(isp, ISP_LOGDEBUG2,
1879 			    "recursive watchdog (handle 0x%x)", handle);
1880 			ISP_UNLOCK(isp);
1881 			return;
1882 		}
1883 
1884 		XS_CMD_S_WDOG(xs);
1885 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1886 			isp_intr(isp, isr, sema, mbox);
1887 		}
1888 		if (XS_CMD_DONE_P(xs)) {
1889 			isp_prt(isp, ISP_LOGDEBUG2,
1890 			    "watchdog cleanup for handle 0x%x", handle);
1891 			xpt_done((union ccb *) xs);
1892 		} else if (XS_CMD_GRACE_P(xs)) {
1893 			/*
1894 			 * Make sure the command is *really* dead before we
1895 			 * release the handle (and DMA resources) for reuse.
1896 			 */
1897 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1898 
1899 			/*
1900 			 * After this point, the comamnd is really dead.
1901 			 */
1902 			if (XS_XFRLEN(xs)) {
1903 				ISP_DMAFREE(isp, xs, handle);
1904                 	}
1905 			isp_destroy_handle(isp, handle);
1906 			xpt_print_path(xs->ccb_h.path);
1907 			isp_prt(isp, ISP_LOGWARN,
1908 			    "watchdog timeout for handle 0x%x", handle);
1909 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1910 			XS_CMD_C_WDOG(xs);
1911 			isp_done(xs);
1912 		} else {
1913 			u_int16_t nxti, optr;
1914 			ispreq_t local, *mp= &local, *qe;
1915 
1916 			XS_CMD_C_WDOG(xs);
1917 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1918 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1919 				ISP_UNLOCK(isp);
1920 				return;
1921 			}
1922 			XS_CMD_S_GRACE(xs);
1923 			MEMZERO((void *) mp, sizeof (*mp));
1924 			mp->req_header.rqs_entry_count = 1;
1925 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1926 			mp->req_modifier = SYNC_ALL;
1927 			mp->req_target = XS_CHANNEL(xs) << 7;
1928 			isp_put_request(isp, mp, qe);
1929 			ISP_ADD_REQUEST(isp, nxti);
1930 		}
1931 	} else {
1932 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1933 	}
1934 	isp->isp_osinfo.intsok = iok;
1935 	ISP_UNLOCK(isp);
1936 }
1937 
1938 static void
1939 isp_kthread(void *arg)
1940 {
1941 	struct ispsoftc *isp = arg;
1942 
1943 	mtx_lock(&isp->isp_lock);
1944 	/*
1945 	 * The first loop is for our usage where we have yet to have
1946 	 * gotten good fibre channel state.
1947 	 */
1948 	for (;;) {
1949 		int wasfrozen;
1950 
1951 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1952 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1953 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1954 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1955 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1956 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1957 				    isp->isp_osinfo.ktmature == 0) {
1958 					break;
1959 				}
1960 			}
1961 			msleep(isp_kthread, &isp->isp_lock,
1962 			    PRIBIO, "isp_fcthrd", hz);
1963 		}
1964 
1965 		/*
1966 		 * Even if we didn't get good loop state we may be
1967 		 * unfreezing the SIMQ so that we can kill off
1968 		 * commands (if we've never seen loop before, for example).
1969 		 */
1970 		isp->isp_osinfo.ktmature = 1;
1971 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1972 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1973 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1974 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
1975 			ISPLOCK_2_CAMLOCK(isp);
1976 			xpt_release_simq(isp->isp_sim, 1);
1977 			CAMLOCK_2_ISPLOCK(isp);
1978 		}
1979 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
1980 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
1981 	}
1982 }
1983 
1984 static void
1985 isp_action(struct cam_sim *sim, union ccb *ccb)
1986 {
1987 	int bus, tgt, error;
1988 	struct ispsoftc *isp;
1989 	struct ccb_trans_settings *cts;
1990 
1991 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1992 
1993 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1994 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1995 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1996 	if (isp->isp_state != ISP_RUNSTATE &&
1997 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1998 		CAMLOCK_2_ISPLOCK(isp);
1999 		isp_init(isp);
2000 		if (isp->isp_state != ISP_INITSTATE) {
2001 			ISP_UNLOCK(isp);
2002 			/*
2003 			 * Lie. Say it was a selection timeout.
2004 			 */
2005 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2006 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2007 			xpt_done(ccb);
2008 			return;
2009 		}
2010 		isp->isp_state = ISP_RUNSTATE;
2011 		ISPLOCK_2_CAMLOCK(isp);
2012 	}
2013 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2014 
2015 
2016 	switch (ccb->ccb_h.func_code) {
2017 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2018 		/*
2019 		 * Do a couple of preliminary checks...
2020 		 */
2021 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2022 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2023 				ccb->ccb_h.status = CAM_REQ_INVALID;
2024 				xpt_done(ccb);
2025 				break;
2026 			}
2027 		}
2028 #ifdef	DIAGNOSTIC
2029 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2030 			ccb->ccb_h.status = CAM_PATH_INVALID;
2031 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2032 			ccb->ccb_h.status = CAM_PATH_INVALID;
2033 		}
2034 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2035 			isp_prt(isp, ISP_LOGERR,
2036 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2037 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2038 			xpt_done(ccb);
2039 			break;
2040 		}
2041 #endif
2042 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2043 		CAMLOCK_2_ISPLOCK(isp);
2044 		error = isp_start((XS_T *) ccb);
2045 		switch (error) {
2046 		case CMD_QUEUED:
2047 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2048 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2049 				u_int64_t ticks = (u_int64_t) hz;
2050 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2051 					ticks = 60 * 1000 * ticks;
2052 				else
2053 					ticks = ccb->ccb_h.timeout * hz;
2054 				ticks = ((ticks + 999) / 1000) + hz + hz;
2055 				if (ticks >= 0x80000000) {
2056 					isp_prt(isp, ISP_LOGERR,
2057 					    "timeout overflow");
2058 					ticks = 0x7fffffff;
2059 				}
2060 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2061 				    (caddr_t)ccb, (int)ticks);
2062 			} else {
2063 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2064 			}
2065 			ISPLOCK_2_CAMLOCK(isp);
2066 			break;
2067 		case CMD_RQLATER:
2068 			/*
2069 			 * This can only happen for Fibre Channel
2070 			 */
2071 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2072 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2073 			    isp->isp_osinfo.ktmature) {
2074 				ISPLOCK_2_CAMLOCK(isp);
2075 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2076 				xpt_done(ccb);
2077 				break;
2078 			}
2079 			cv_signal(&isp->isp_osinfo.kthread_cv);
2080 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2081 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2082 			ISPLOCK_2_CAMLOCK(isp);
2083 			xpt_done(ccb);
2084 			break;
2085 		case CMD_EAGAIN:
2086 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2087 			ISPLOCK_2_CAMLOCK(isp);
2088 			xpt_done(ccb);
2089 			break;
2090 		case CMD_COMPLETE:
2091 			isp_done((struct ccb_scsiio *) ccb);
2092 			ISPLOCK_2_CAMLOCK(isp);
2093 			break;
2094 		default:
2095 			isp_prt(isp, ISP_LOGERR,
2096 			    "What's this? 0x%x at %d in file %s",
2097 			    error, __LINE__, __FILE__);
2098 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2099 			xpt_done(ccb);
2100 			ISPLOCK_2_CAMLOCK(isp);
2101 		}
2102 		break;
2103 
2104 #ifdef	ISP_TARGET_MODE
2105 	case XPT_EN_LUN:		/* Enable LUN as a target */
2106 	{
2107 		int iok;
2108 		CAMLOCK_2_ISPLOCK(isp);
2109 		iok = isp->isp_osinfo.intsok;
2110 		isp->isp_osinfo.intsok = 0;
2111 		isp_en_lun(isp, ccb);
2112 		isp->isp_osinfo.intsok = iok;
2113 		ISPLOCK_2_CAMLOCK(isp);
2114 		xpt_done(ccb);
2115 		break;
2116 	}
2117 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2118 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2119 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2120 	{
2121 		tstate_t *tptr =
2122 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2123 		if (tptr == NULL) {
2124 			ccb->ccb_h.status = CAM_LUN_INVALID;
2125 			xpt_done(ccb);
2126 			break;
2127 		}
2128 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2129 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2130 		ccb->ccb_h.flags = 0;
2131 
2132 		CAMLOCK_2_ISPLOCK(isp);
2133 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2134 			/*
2135 			 * Note that the command itself may not be done-
2136 			 * it may not even have had the first CTIO sent.
2137 			 */
2138 			tptr->atio_count++;
2139 			isp_prt(isp, ISP_LOGTDEBUG0,
2140 			    "Put FREE ATIO2, lun %d, count now %d",
2141 			    ccb->ccb_h.target_lun, tptr->atio_count);
2142 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2143 			    sim_links.sle);
2144 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2145 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2146 			    sim_links.sle);
2147 		} else {
2148 			;
2149 		}
2150 		rls_lun_statep(isp, tptr);
2151 		ccb->ccb_h.status = CAM_REQ_INPROG;
2152 		ISPLOCK_2_CAMLOCK(isp);
2153 		break;
2154 	}
2155 	case XPT_CONT_TARGET_IO:
2156 	{
2157 		CAMLOCK_2_ISPLOCK(isp);
2158 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2159 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2160 			isp_prt(isp, ISP_LOGWARN,
2161 			    "XPT_CONT_TARGET_IO: status 0x%x",
2162 			    ccb->ccb_h.status);
2163 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2164 			ISPLOCK_2_CAMLOCK(isp);
2165 			xpt_done(ccb);
2166 		} else {
2167 			ISPLOCK_2_CAMLOCK(isp);
2168 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2169 		}
2170 		break;
2171 	}
2172 #endif
2173 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2174 
2175 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2176 		tgt = ccb->ccb_h.target_id;
2177 		tgt |= (bus << 16);
2178 
2179 		CAMLOCK_2_ISPLOCK(isp);
2180 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2181 		ISPLOCK_2_CAMLOCK(isp);
2182 		if (error) {
2183 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2184 		} else {
2185 			ccb->ccb_h.status = CAM_REQ_CMP;
2186 		}
2187 		xpt_done(ccb);
2188 		break;
2189 	case XPT_ABORT:			/* Abort the specified CCB */
2190 	{
2191 		union ccb *accb = ccb->cab.abort_ccb;
2192 		CAMLOCK_2_ISPLOCK(isp);
2193 		switch (accb->ccb_h.func_code) {
2194 #ifdef	ISP_TARGET_MODE
2195 		case XPT_ACCEPT_TARGET_IO:
2196 		case XPT_IMMED_NOTIFY:
2197         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2198 			break;
2199 		case XPT_CONT_TARGET_IO:
2200 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2201 			ccb->ccb_h.status = CAM_UA_ABORT;
2202 			break;
2203 #endif
2204 		case XPT_SCSI_IO:
2205 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2206 			if (error) {
2207 				ccb->ccb_h.status = CAM_UA_ABORT;
2208 			} else {
2209 				ccb->ccb_h.status = CAM_REQ_CMP;
2210 			}
2211 			break;
2212 		default:
2213 			ccb->ccb_h.status = CAM_REQ_INVALID;
2214 			break;
2215 		}
2216 		ISPLOCK_2_CAMLOCK(isp);
2217 		xpt_done(ccb);
2218 		break;
2219 	}
2220 #ifdef	CAM_NEW_TRAN_CODE
2221 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2222 #else
2223 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2224 #endif
2225 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2226 		cts = &ccb->cts;
2227 		if (!IS_CURRENT_SETTINGS(cts)) {
2228 			ccb->ccb_h.status = CAM_REQ_INVALID;
2229 			xpt_done(ccb);
2230 			break;
2231 		}
2232 		tgt = cts->ccb_h.target_id;
2233 		CAMLOCK_2_ISPLOCK(isp);
2234 		if (IS_SCSI(isp)) {
2235 #ifndef	CAM_NEW_TRAN_CODE
2236 			sdparam *sdp = isp->isp_param;
2237 			u_int16_t *dptr;
2238 
2239 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2240 
2241 			sdp += bus;
2242 			/*
2243 			 * We always update (internally) from goal_flags
2244 			 * so any request to change settings just gets
2245 			 * vectored to that location.
2246 			 */
2247 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2248 
2249 			/*
2250 			 * Note that these operations affect the
2251 			 * the goal flags (goal_flags)- not
2252 			 * the current state flags. Then we mark
2253 			 * things so that the next operation to
2254 			 * this HBA will cause the update to occur.
2255 			 */
2256 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2257 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2258 					*dptr |= DPARM_DISC;
2259 				} else {
2260 					*dptr &= ~DPARM_DISC;
2261 				}
2262 			}
2263 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2264 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2265 					*dptr |= DPARM_TQING;
2266 				} else {
2267 					*dptr &= ~DPARM_TQING;
2268 				}
2269 			}
2270 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2271 				switch (cts->bus_width) {
2272 				case MSG_EXT_WDTR_BUS_16_BIT:
2273 					*dptr |= DPARM_WIDE;
2274 					break;
2275 				default:
2276 					*dptr &= ~DPARM_WIDE;
2277 				}
2278 			}
2279 			/*
2280 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2281 			 * of nonzero will cause us to go to the
2282 			 * selected (from NVRAM) maximum value for
2283 			 * this device. At a later point, we'll
2284 			 * allow finer control.
2285 			 */
2286 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2287 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2288 			    (cts->sync_offset > 0)) {
2289 				*dptr |= DPARM_SYNC;
2290 			} else {
2291 				*dptr &= ~DPARM_SYNC;
2292 			}
2293 			*dptr |= DPARM_SAFE_DFLT;
2294 #else
2295 			struct ccb_trans_settings_scsi *scsi =
2296 			    &cts->proto_specific.scsi;
2297 			struct ccb_trans_settings_spi *spi =
2298 			    &cts->xport_specific.spi;
2299 			sdparam *sdp = isp->isp_param;
2300 			u_int16_t *dptr;
2301 
2302 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2303 			sdp += bus;
2304 			/*
2305 			 * We always update (internally) from goal_flags
2306 			 * so any request to change settings just gets
2307 			 * vectored to that location.
2308 			 */
2309 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2310 
2311 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2312 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2313 					*dptr |= DPARM_DISC;
2314 				else
2315 					*dptr &= ~DPARM_DISC;
2316 			}
2317 
2318 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2319 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2320 					*dptr |= DPARM_TQING;
2321 				else
2322 					*dptr &= ~DPARM_TQING;
2323 			}
2324 
2325 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2326 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2327 					*dptr |= DPARM_WIDE;
2328 				else
2329 					*dptr &= ~DPARM_WIDE;
2330 			}
2331 
2332 			/*
2333 			 * XXX: FIX ME
2334 			 */
2335 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2336 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2337 			    (spi->sync_period && spi->sync_offset)) {
2338 				*dptr |= DPARM_SYNC;
2339 				/*
2340 				 * XXX: CHECK FOR LEGALITY
2341 				 */
2342 				sdp->isp_devparam[tgt].goal_period =
2343 				    spi->sync_period;
2344 				sdp->isp_devparam[tgt].goal_offset =
2345 				    spi->sync_offset;
2346 			} else {
2347 				*dptr &= ~DPARM_SYNC;
2348 			}
2349 #endif
2350 			isp_prt(isp, ISP_LOGDEBUG0,
2351 			    "SET bus %d targ %d to flags %x off %x per %x",
2352 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2353 			    sdp->isp_devparam[tgt].goal_offset,
2354 			    sdp->isp_devparam[tgt].goal_period);
2355 			sdp->isp_devparam[tgt].dev_update = 1;
2356 			isp->isp_update |= (1 << bus);
2357 		}
2358 		ISPLOCK_2_CAMLOCK(isp);
2359 		ccb->ccb_h.status = CAM_REQ_CMP;
2360 		xpt_done(ccb);
2361 		break;
2362 	case XPT_GET_TRAN_SETTINGS:
2363 		cts = &ccb->cts;
2364 		tgt = cts->ccb_h.target_id;
2365 		CAMLOCK_2_ISPLOCK(isp);
2366 		if (IS_FC(isp)) {
2367 #ifndef	CAM_NEW_TRAN_CODE
2368 			/*
2369 			 * a lot of normal SCSI things don't make sense.
2370 			 */
2371 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2372 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2373 			/*
2374 			 * How do you measure the width of a high
2375 			 * speed serial bus? Well, in bytes.
2376 			 *
2377 			 * Offset and period make no sense, though, so we set
2378 			 * (above) a 'base' transfer speed to be gigabit.
2379 			 */
2380 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2381 #else
2382 			fcparam *fcp = isp->isp_param;
2383 			struct ccb_trans_settings_fc *fc =
2384 			    &cts->xport_specific.fc;
2385 
2386 			cts->protocol = PROTO_SCSI;
2387 			cts->protocol_version = SCSI_REV_2;
2388 			cts->transport = XPORT_FC;
2389 			cts->transport_version = 0;
2390 
2391 			fc->valid = CTS_FC_VALID_SPEED;
2392 			if (fcp->isp_gbspeed == 2)
2393 				fc->bitrate = 200000;
2394 			else
2395 				fc->bitrate = 100000;
2396 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2397 				struct lportdb *lp = &fcp->portdb[tgt];
2398 				fc->wwnn = lp->node_wwn;
2399 				fc->wwpn = lp->port_wwn;
2400 				fc->port = lp->portid;
2401 				fc->valid |= CTS_FC_VALID_WWNN |
2402 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2403 			}
2404 #endif
2405 		} else {
2406 #ifdef	CAM_NEW_TRAN_CODE
2407 			struct ccb_trans_settings_scsi *scsi =
2408 			    &cts->proto_specific.scsi;
2409 			struct ccb_trans_settings_spi *spi =
2410 			    &cts->xport_specific.spi;
2411 #endif
2412 			sdparam *sdp = isp->isp_param;
2413 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2414 			u_int16_t dval, pval, oval;
2415 
2416 			sdp += bus;
2417 
2418 			if (IS_CURRENT_SETTINGS(cts)) {
2419 				sdp->isp_devparam[tgt].dev_refresh = 1;
2420 				isp->isp_update |= (1 << bus);
2421 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2422 				    NULL);
2423 				dval = sdp->isp_devparam[tgt].actv_flags;
2424 				oval = sdp->isp_devparam[tgt].actv_offset;
2425 				pval = sdp->isp_devparam[tgt].actv_period;
2426 			} else {
2427 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2428 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2429 				pval = sdp->isp_devparam[tgt].nvrm_period;
2430 			}
2431 
2432 #ifndef	CAM_NEW_TRAN_CODE
2433 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2434 
2435 			if (dval & DPARM_DISC) {
2436 				cts->flags |= CCB_TRANS_DISC_ENB;
2437 			}
2438 			if (dval & DPARM_TQING) {
2439 				cts->flags |= CCB_TRANS_TAG_ENB;
2440 			}
2441 			if (dval & DPARM_WIDE) {
2442 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2443 			} else {
2444 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2445 			}
2446 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2447 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2448 
2449 			if ((dval & DPARM_SYNC) && oval != 0) {
2450 				cts->sync_period = pval;
2451 				cts->sync_offset = oval;
2452 				cts->valid |=
2453 				    CCB_TRANS_SYNC_RATE_VALID |
2454 				    CCB_TRANS_SYNC_OFFSET_VALID;
2455 			}
2456 #else
2457 			cts->protocol = PROTO_SCSI;
2458 			cts->protocol_version = SCSI_REV_2;
2459 			cts->transport = XPORT_SPI;
2460 			cts->transport_version = 2;
2461 
2462 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2463 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2464 			if (dval & DPARM_DISC) {
2465 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2466 			}
2467 			if (dval & DPARM_TQING) {
2468 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2469 			}
2470 			if ((dval & DPARM_SYNC) && oval && pval) {
2471 				spi->sync_offset = oval;
2472 				spi->sync_period = pval;
2473 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2474 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2475 			}
2476 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2477 			if (dval & DPARM_WIDE) {
2478 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2479 			} else {
2480 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2481 			}
2482 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2483 				scsi->valid = CTS_SCSI_VALID_TQ;
2484 				spi->valid |= CTS_SPI_VALID_DISC;
2485 			} else {
2486 				scsi->valid = 0;
2487 			}
2488 #endif
2489 			isp_prt(isp, ISP_LOGDEBUG0,
2490 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2491 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2492 			    bus, tgt, dval, oval, pval);
2493 		}
2494 		ISPLOCK_2_CAMLOCK(isp);
2495 		ccb->ccb_h.status = CAM_REQ_CMP;
2496 		xpt_done(ccb);
2497 		break;
2498 
2499 	case XPT_CALC_GEOMETRY:
2500 	{
2501 		struct ccb_calc_geometry *ccg;
2502 		u_int32_t secs_per_cylinder;
2503 		u_int32_t size_mb;
2504 
2505 		ccg = &ccb->ccg;
2506 		if (ccg->block_size == 0) {
2507 			isp_prt(isp, ISP_LOGERR,
2508 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2509 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2510 			ccb->ccb_h.status = CAM_REQ_INVALID;
2511 			xpt_done(ccb);
2512 			break;
2513 		}
2514 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2515 		if (size_mb > 1024) {
2516 			ccg->heads = 255;
2517 			ccg->secs_per_track = 63;
2518 		} else {
2519 			ccg->heads = 64;
2520 			ccg->secs_per_track = 32;
2521 		}
2522 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2523 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2524 		ccb->ccb_h.status = CAM_REQ_CMP;
2525 		xpt_done(ccb);
2526 		break;
2527 	}
2528 	case XPT_RESET_BUS:		/* Reset the specified bus */
2529 		bus = cam_sim_bus(sim);
2530 		CAMLOCK_2_ISPLOCK(isp);
2531 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2532 		ISPLOCK_2_CAMLOCK(isp);
2533 		if (error)
2534 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2535 		else {
2536 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2537 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2538 			else if (isp->isp_path != NULL)
2539 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2540 			ccb->ccb_h.status = CAM_REQ_CMP;
2541 		}
2542 		xpt_done(ccb);
2543 		break;
2544 
2545 	case XPT_TERM_IO:		/* Terminate the I/O process */
2546 		ccb->ccb_h.status = CAM_REQ_INVALID;
2547 		xpt_done(ccb);
2548 		break;
2549 
2550 	case XPT_PATH_INQ:		/* Path routing inquiry */
2551 	{
2552 		struct ccb_pathinq *cpi = &ccb->cpi;
2553 
2554 		cpi->version_num = 1;
2555 #ifdef	ISP_TARGET_MODE
2556 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2557 #else
2558 		cpi->target_sprt = 0;
2559 #endif
2560 		cpi->hba_eng_cnt = 0;
2561 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2562 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2563 		cpi->bus_id = cam_sim_bus(sim);
2564 		if (IS_FC(isp)) {
2565 			cpi->hba_misc = PIM_NOBUSRESET;
2566 			/*
2567 			 * Because our loop ID can shift from time to time,
2568 			 * make our initiator ID out of range of our bus.
2569 			 */
2570 			cpi->initiator_id = cpi->max_target + 1;
2571 
2572 			/*
2573 			 * Set base transfer capabilities for Fibre Channel.
2574 			 * Technically not correct because we don't know
2575 			 * what media we're running on top of- but we'll
2576 			 * look good if we always say 100MB/s.
2577 			 */
2578 			if (FCPARAM(isp)->isp_gbspeed == 2)
2579 				cpi->base_transfer_speed = 200000;
2580 			else
2581 				cpi->base_transfer_speed = 100000;
2582 			cpi->hba_inquiry = PI_TAG_ABLE;
2583 #ifdef	CAM_NEW_TRAN_CODE
2584 			cpi->transport = XPORT_FC;
2585 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2586 #endif
2587 		} else {
2588 			sdparam *sdp = isp->isp_param;
2589 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2590 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2591 			cpi->hba_misc = 0;
2592 			cpi->initiator_id = sdp->isp_initiator_id;
2593 			cpi->base_transfer_speed = 3300;
2594 #ifdef	CAM_NEW_TRAN_CODE
2595 			cpi->transport = XPORT_SPI;
2596 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2597 #endif
2598 		}
2599 #ifdef	CAM_NEW_TRAN_CODE
2600 		cpi->protocol = PROTO_SCSI;
2601 		cpi->protocol_version = SCSI_REV_2;
2602 #endif
2603 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2604 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2605 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2606 		cpi->unit_number = cam_sim_unit(sim);
2607 		cpi->ccb_h.status = CAM_REQ_CMP;
2608 		xpt_done(ccb);
2609 		break;
2610 	}
2611 	default:
2612 		ccb->ccb_h.status = CAM_REQ_INVALID;
2613 		xpt_done(ccb);
2614 		break;
2615 	}
2616 }
2617 
2618 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2619 void
2620 isp_done(struct ccb_scsiio *sccb)
2621 {
2622 	struct ispsoftc *isp = XS_ISP(sccb);
2623 
2624 	if (XS_NOERR(sccb))
2625 		XS_SETERR(sccb, CAM_REQ_CMP);
2626 
2627 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2628 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2629 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2630 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2631 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2632 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2633 		} else {
2634 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2635 		}
2636 	}
2637 
2638 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2639 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2640 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2641 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2642 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2643 			isp_prt(isp, ISP_LOGDEBUG0,
2644 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2645 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2646 			    sccb->ccb_h.status, sccb->scsi_status);
2647 		}
2648 	}
2649 
2650 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2651 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2652 		xpt_print_path(sccb->ccb_h.path);
2653 		isp_prt(isp, ISP_LOGINFO,
2654 		    "cam completion status 0x%x", sccb->ccb_h.status);
2655 	}
2656 
2657 	XS_CMD_S_DONE(sccb);
2658 	if (XS_CMD_WDOG_P(sccb) == 0) {
2659 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2660 		if (XS_CMD_GRACE_P(sccb)) {
2661 			isp_prt(isp, ISP_LOGDEBUG2,
2662 			    "finished command on borrowed time");
2663 		}
2664 		XS_CMD_S_CLEAR(sccb);
2665 		ISPLOCK_2_CAMLOCK(isp);
2666 		xpt_done((union ccb *) sccb);
2667 		CAMLOCK_2_ISPLOCK(isp);
2668 	}
2669 }
2670 
2671 int
2672 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2673 {
2674 	int bus, rv = 0;
2675 	switch (cmd) {
2676 	case ISPASYNC_NEW_TGT_PARAMS:
2677 	{
2678 #ifdef	CAM_NEW_TRAN_CODE
2679 		struct ccb_trans_settings_scsi *scsi;
2680 		struct ccb_trans_settings_spi *spi;
2681 #endif
2682 		int flags, tgt;
2683 		sdparam *sdp = isp->isp_param;
2684 		struct ccb_trans_settings cts;
2685 		struct cam_path *tmppath;
2686 
2687 		bzero(&cts, sizeof (struct ccb_trans_settings));
2688 
2689 		tgt = *((int *)arg);
2690 		bus = (tgt >> 16) & 0xffff;
2691 		tgt &= 0xffff;
2692 		sdp += bus;
2693 		ISPLOCK_2_CAMLOCK(isp);
2694 		if (xpt_create_path(&tmppath, NULL,
2695 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2696 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2697 			CAMLOCK_2_ISPLOCK(isp);
2698 			isp_prt(isp, ISP_LOGWARN,
2699 			    "isp_async cannot make temp path for %d.%d",
2700 			    tgt, bus);
2701 			rv = -1;
2702 			break;
2703 		}
2704 		CAMLOCK_2_ISPLOCK(isp);
2705 		flags = sdp->isp_devparam[tgt].actv_flags;
2706 #ifdef	CAM_NEW_TRAN_CODE
2707 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2708 		cts.protocol = PROTO_SCSI;
2709 		cts.transport = XPORT_SPI;
2710 
2711 		scsi = &cts.proto_specific.scsi;
2712 		spi = &cts.xport_specific.spi;
2713 
2714 		if (flags & DPARM_TQING) {
2715 			scsi->valid |= CTS_SCSI_VALID_TQ;
2716 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2717 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2718 		}
2719 
2720 		if (flags & DPARM_DISC) {
2721 			spi->valid |= CTS_SPI_VALID_DISC;
2722 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2723 		}
2724 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2725 		if (flags & DPARM_WIDE) {
2726 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2727 		} else {
2728 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2729 		}
2730 		if (flags & DPARM_SYNC) {
2731 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2732 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2733 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2734 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2735 		}
2736 #else
2737 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2738 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2739 		if (flags & DPARM_DISC) {
2740 			cts.flags |= CCB_TRANS_DISC_ENB;
2741 		}
2742 		if (flags & DPARM_TQING) {
2743 			cts.flags |= CCB_TRANS_TAG_ENB;
2744 		}
2745 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2746 		cts.bus_width = (flags & DPARM_WIDE)?
2747 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2748 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2749 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2750 		if (flags & DPARM_SYNC) {
2751 			cts.valid |=
2752 			    CCB_TRANS_SYNC_RATE_VALID |
2753 			    CCB_TRANS_SYNC_OFFSET_VALID;
2754 		}
2755 #endif
2756 		isp_prt(isp, ISP_LOGDEBUG2,
2757 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2758 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2759 		    sdp->isp_devparam[tgt].actv_offset, flags);
2760 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2761 		ISPLOCK_2_CAMLOCK(isp);
2762 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2763 		xpt_free_path(tmppath);
2764 		CAMLOCK_2_ISPLOCK(isp);
2765 		break;
2766 	}
2767 	case ISPASYNC_BUS_RESET:
2768 		bus = *((int *)arg);
2769 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2770 		    bus);
2771 		if (bus > 0 && isp->isp_path2) {
2772 			ISPLOCK_2_CAMLOCK(isp);
2773 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2774 			CAMLOCK_2_ISPLOCK(isp);
2775 		} else if (isp->isp_path) {
2776 			ISPLOCK_2_CAMLOCK(isp);
2777 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2778 			CAMLOCK_2_ISPLOCK(isp);
2779 		}
2780 		break;
2781 	case ISPASYNC_LIP:
2782 		if (isp->isp_path) {
2783 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2784 		}
2785 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2786 		break;
2787 	case ISPASYNC_LOOP_RESET:
2788 		if (isp->isp_path) {
2789 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2790 		}
2791 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2792 		break;
2793 	case ISPASYNC_LOOP_DOWN:
2794 		if (isp->isp_path) {
2795 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2796 		}
2797 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2798 		break;
2799 	case ISPASYNC_LOOP_UP:
2800 		/*
2801 		 * Now we just note that Loop has come up. We don't
2802 		 * actually do anything because we're waiting for a
2803 		 * Change Notify before activating the FC cleanup
2804 		 * thread to look at the state of the loop again.
2805 		 */
2806 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2807 		break;
2808 	case ISPASYNC_PROMENADE:
2809 	{
2810 		struct cam_path *tmppath;
2811 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2812 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2813 		static const char *roles[4] = {
2814 		    "(none)", "Target", "Initiator", "Target/Initiator"
2815 		};
2816 		fcparam *fcp = isp->isp_param;
2817 		int tgt = *((int *) arg);
2818 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2819 		struct lportdb *lp = &fcp->portdb[tgt];
2820 
2821 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2822 		    roles[lp->roles & 0x3],
2823 		    (lp->valid)? "Arrived" : "Departed",
2824 		    (u_int32_t) (lp->port_wwn >> 32),
2825 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2826 		    (u_int32_t) (lp->node_wwn >> 32),
2827 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2828 
2829 		ISPLOCK_2_CAMLOCK(isp);
2830 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2831 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2832 			CAMLOCK_2_ISPLOCK(isp);
2833                         break;
2834                 }
2835 		/*
2836 		 * Policy: only announce targets.
2837 		 */
2838 		if (lp->roles & is_tgt_mask) {
2839 			if (lp->valid) {
2840 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2841 			} else {
2842 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2843 			}
2844 		}
2845 		xpt_free_path(tmppath);
2846 		CAMLOCK_2_ISPLOCK(isp);
2847 		break;
2848 	}
2849 	case ISPASYNC_CHANGE_NOTIFY:
2850 		if (arg == ISPASYNC_CHANGE_PDB) {
2851 			isp_prt(isp, ISP_LOGINFO,
2852 			    "Port Database Changed");
2853 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2854 			isp_prt(isp, ISP_LOGINFO,
2855 			    "Name Server Database Changed");
2856 		}
2857 		cv_signal(&isp->isp_osinfo.kthread_cv);
2858 		break;
2859 	case ISPASYNC_FABRIC_DEV:
2860 	{
2861 		int target, base, lim;
2862 		fcparam *fcp = isp->isp_param;
2863 		struct lportdb *lp = NULL;
2864 		struct lportdb *clp = (struct lportdb *) arg;
2865 		char *pt;
2866 
2867 		switch (clp->port_type) {
2868 		case 1:
2869 			pt = "   N_Port";
2870 			break;
2871 		case 2:
2872 			pt = "  NL_Port";
2873 			break;
2874 		case 3:
2875 			pt = "F/NL_Port";
2876 			break;
2877 		case 0x7f:
2878 			pt = "  Nx_Port";
2879 			break;
2880 		case 0x81:
2881 			pt = "  F_port";
2882 			break;
2883 		case 0x82:
2884 			pt = "  FL_Port";
2885 			break;
2886 		case 0x84:
2887 			pt = "   E_port";
2888 			break;
2889 		default:
2890 			pt = " ";
2891 			break;
2892 		}
2893 
2894 		isp_prt(isp, ISP_LOGINFO,
2895 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2896 
2897 		/*
2898 		 * If we don't have an initiator role we bail.
2899 		 *
2900 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2901 		 */
2902 
2903 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2904 			break;
2905 		}
2906 
2907 		/*
2908 		 * Is this entry for us? If so, we bail.
2909 		 */
2910 
2911 		if (fcp->isp_portid == clp->portid) {
2912 			break;
2913 		}
2914 
2915 		/*
2916 		 * Else, the default policy is to find room for it in
2917 		 * our local port database. Later, when we execute
2918 		 * the call to isp_pdb_sync either this newly arrived
2919 		 * or already logged in device will be (re)announced.
2920 		 */
2921 
2922 		if (fcp->isp_topo == TOPO_FL_PORT)
2923 			base = FC_SNS_ID+1;
2924 		else
2925 			base = 0;
2926 
2927 		if (fcp->isp_topo == TOPO_N_PORT)
2928 			lim = 1;
2929 		else
2930 			lim = MAX_FC_TARG;
2931 
2932 		/*
2933 		 * Is it already in our list?
2934 		 */
2935 		for (target = base; target < lim; target++) {
2936 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2937 				continue;
2938 			}
2939 			lp = &fcp->portdb[target];
2940 			if (lp->port_wwn == clp->port_wwn &&
2941 			    lp->node_wwn == clp->node_wwn) {
2942 				lp->fabric_dev = 1;
2943 				break;
2944 			}
2945 		}
2946 		if (target < lim) {
2947 			break;
2948 		}
2949 		for (target = base; target < lim; target++) {
2950 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2951 				continue;
2952 			}
2953 			lp = &fcp->portdb[target];
2954 			if (lp->port_wwn == 0) {
2955 				break;
2956 			}
2957 		}
2958 		if (target == lim) {
2959 			isp_prt(isp, ISP_LOGWARN,
2960 			    "out of space for fabric devices");
2961 			break;
2962 		}
2963 		lp->port_type = clp->port_type;
2964 		lp->fc4_type = clp->fc4_type;
2965 		lp->node_wwn = clp->node_wwn;
2966 		lp->port_wwn = clp->port_wwn;
2967 		lp->portid = clp->portid;
2968 		lp->fabric_dev = 1;
2969 		break;
2970 	}
2971 #ifdef	ISP_TARGET_MODE
2972 	case ISPASYNC_TARGET_MESSAGE:
2973 	{
2974 		tmd_msg_t *mp = arg;
2975 		isp_prt(isp, ISP_LOGALL,
2976 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2977 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2978 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2979 		    mp->nt_msg[0]);
2980 		break;
2981 	}
2982 	case ISPASYNC_TARGET_EVENT:
2983 	{
2984 		tmd_event_t *ep = arg;
2985 		isp_prt(isp, ISP_LOGALL,
2986 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2987 		break;
2988 	}
2989 	case ISPASYNC_TARGET_ACTION:
2990 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2991 		default:
2992 			isp_prt(isp, ISP_LOGWARN,
2993 			   "event 0x%x for unhandled target action",
2994 			    ((isphdr_t *)arg)->rqs_entry_type);
2995 			break;
2996 		case RQSTYPE_NOTIFY:
2997 			if (IS_SCSI(isp)) {
2998 				rv = isp_handle_platform_notify_scsi(isp,
2999 				    (in_entry_t *) arg);
3000 			} else {
3001 				rv = isp_handle_platform_notify_fc(isp,
3002 				    (in_fcentry_t *) arg);
3003 			}
3004 			break;
3005 		case RQSTYPE_ATIO:
3006 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3007 			break;
3008 		case RQSTYPE_ATIO2:
3009 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3010 			break;
3011 		case RQSTYPE_CTIO2:
3012 		case RQSTYPE_CTIO:
3013 			rv = isp_handle_platform_ctio(isp, arg);
3014 			break;
3015 		case RQSTYPE_ENABLE_LUN:
3016 		case RQSTYPE_MODIFY_LUN:
3017 			if (IS_DUALBUS(isp)) {
3018 				bus =
3019 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3020 			} else {
3021 				bus = 0;
3022 			}
3023 			isp_cv_signal_rqe(isp, bus,
3024 			    ((lun_entry_t *)arg)->le_status);
3025 			break;
3026 		}
3027 		break;
3028 #endif
3029 	case ISPASYNC_FW_CRASH:
3030 	{
3031 		u_int16_t mbox1, mbox6;
3032 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3033 		if (IS_DUALBUS(isp)) {
3034 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3035 		} else {
3036 			mbox6 = 0;
3037 		}
3038                 isp_prt(isp, ISP_LOGERR,
3039                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3040                     mbox6, mbox1);
3041 		isp_reinit(isp);
3042 		break;
3043 	}
3044 	case ISPASYNC_UNHANDLED_RESPONSE:
3045 		break;
3046 	default:
3047 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3048 		break;
3049 	}
3050 	return (rv);
3051 }
3052 
3053 
3054 /*
3055  * Locks are held before coming here.
3056  */
3057 void
3058 isp_uninit(struct ispsoftc *isp)
3059 {
3060 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3061 	DISABLE_INTS(isp);
3062 }
3063 
3064 void
3065 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3066 {
3067 	va_list ap;
3068 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3069 		return;
3070 	}
3071 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3072 	va_start(ap, fmt);
3073 	vprintf(fmt, ap);
3074 	va_end(ap);
3075 	printf("\n");
3076 }
3077