xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision c4f6a2a9e1b1879b618c436ab4f56ff75c73a0f5)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/module.h>
34 #include <sys/ioccom.h>
35 #include <dev/isp/isp_ioctl.h>
36 
37 
38 MODULE_VERSION(isp, 1);
39 int isp_announced = 0;
40 ispfwfunc *isp_get_firmware_p = NULL;
41 
42 static d_ioctl_t ispioctl;
43 static void isp_intr_enable(void *);
44 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
45 static void isp_poll(struct cam_sim *);
46 static timeout_t isp_watchdog;
47 static void isp_kthread(void *);
48 static void isp_action(struct cam_sim *, union ccb *);
49 
50 
51 #define ISP_CDEV_MAJOR	248
52 static struct cdevsw isp_cdevsw = {
53 	/* open */	nullopen,
54 	/* close */	nullclose,
55 	/* read */	noread,
56 	/* write */	nowrite,
57 	/* ioctl */	ispioctl,
58 	/* poll */	nopoll,
59 	/* mmap */	nommap,
60 	/* strategy */	nostrategy,
61 	/* name */	"isp",
62 	/* maj */	ISP_CDEV_MAJOR,
63 	/* dump */	nodump,
64 	/* psize */	nopsize,
65 	/* flags */	D_TAPE,
66 };
67 
68 static struct ispsoftc *isplist = NULL;
69 
70 void
71 isp_attach(struct ispsoftc *isp)
72 {
73 	int primary, secondary;
74 	struct ccb_setasync csa;
75 	struct cam_devq *devq;
76 	struct cam_sim *sim;
77 	struct cam_path *path;
78 
79 	/*
80 	 * Establish (in case of 12X0) which bus is the primary.
81 	 */
82 
83 	primary = 0;
84 	secondary = 1;
85 
86 	/*
87 	 * Create the device queue for our SIM(s).
88 	 */
89 	devq = cam_simq_alloc(isp->isp_maxcmds);
90 	if (devq == NULL) {
91 		return;
92 	}
93 
94 	/*
95 	 * Construct our SIM entry.
96 	 */
97 	ISPLOCK_2_CAMLOCK(isp);
98 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
99 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
100 	if (sim == NULL) {
101 		cam_simq_free(devq);
102 		CAMLOCK_2_ISPLOCK(isp);
103 		return;
104 	}
105 	CAMLOCK_2_ISPLOCK(isp);
106 
107 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
108 	isp->isp_osinfo.ehook.ich_arg = isp;
109 	ISPLOCK_2_CAMLOCK(isp);
110 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
111 		cam_sim_free(sim, TRUE);
112 		CAMLOCK_2_ISPLOCK(isp);
113 		isp_prt(isp, ISP_LOGERR,
114 		    "could not establish interrupt enable hook");
115 		return;
116 	}
117 
118 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
119 		cam_sim_free(sim, TRUE);
120 		CAMLOCK_2_ISPLOCK(isp);
121 		return;
122 	}
123 
124 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
125 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
126 		xpt_bus_deregister(cam_sim_path(sim));
127 		cam_sim_free(sim, TRUE);
128 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
129 		CAMLOCK_2_ISPLOCK(isp);
130 		return;
131 	}
132 
133 	xpt_setup_ccb(&csa.ccb_h, path, 5);
134 	csa.ccb_h.func_code = XPT_SASYNC_CB;
135 	csa.event_enable = AC_LOST_DEVICE;
136 	csa.callback = isp_cam_async;
137 	csa.callback_arg = sim;
138 	xpt_action((union ccb *)&csa);
139 	CAMLOCK_2_ISPLOCK(isp);
140 	isp->isp_sim = sim;
141 	isp->isp_path = path;
142 	/*
143 	 * Create a kernel thread for fibre channel instances. We
144 	 * don't have dual channel FC cards.
145 	 */
146 	if (IS_FC(isp)) {
147 		ISPLOCK_2_CAMLOCK(isp);
148 		/* XXX: LOCK VIOLATION */
149 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
150 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
151 		    RFHIGHPID, "%s: fc_thrd",
152 		    device_get_nameunit(isp->isp_dev))) {
153 			xpt_bus_deregister(cam_sim_path(sim));
154 			cam_sim_free(sim, TRUE);
155 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
156 			CAMLOCK_2_ISPLOCK(isp);
157 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
158 			return;
159 		}
160 		CAMLOCK_2_ISPLOCK(isp);
161 	}
162 
163 
164 	/*
165 	 * If we have a second channel, construct SIM entry for that.
166 	 */
167 	if (IS_DUALBUS(isp)) {
168 		ISPLOCK_2_CAMLOCK(isp);
169 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
170 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
171 		if (sim == NULL) {
172 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
173 			xpt_free_path(isp->isp_path);
174 			cam_simq_free(devq);
175 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
176 			return;
177 		}
178 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
179 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
180 			xpt_free_path(isp->isp_path);
181 			cam_sim_free(sim, TRUE);
182 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
183 			CAMLOCK_2_ISPLOCK(isp);
184 			return;
185 		}
186 
187 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
188 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
189 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
190 			xpt_free_path(isp->isp_path);
191 			xpt_bus_deregister(cam_sim_path(sim));
192 			cam_sim_free(sim, TRUE);
193 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
194 			CAMLOCK_2_ISPLOCK(isp);
195 			return;
196 		}
197 
198 		xpt_setup_ccb(&csa.ccb_h, path, 5);
199 		csa.ccb_h.func_code = XPT_SASYNC_CB;
200 		csa.event_enable = AC_LOST_DEVICE;
201 		csa.callback = isp_cam_async;
202 		csa.callback_arg = sim;
203 		xpt_action((union ccb *)&csa);
204 		CAMLOCK_2_ISPLOCK(isp);
205 		isp->isp_sim2 = sim;
206 		isp->isp_path2 = path;
207 	}
208 
209 #ifdef	ISP_TARGET_MODE
210 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
211 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
212 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
213 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
214 #endif
215 	/*
216 	 * Create device nodes
217 	 */
218 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
219 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
220 
221 	if (isp->isp_role != ISP_ROLE_NONE) {
222 		isp->isp_state = ISP_RUNSTATE;
223 		ENABLE_INTS(isp);
224 	}
225 	if (isplist == NULL) {
226 		isplist = isp;
227 	} else {
228 		struct ispsoftc *tmp = isplist;
229 		while (tmp->isp_osinfo.next) {
230 			tmp = tmp->isp_osinfo.next;
231 		}
232 		tmp->isp_osinfo.next = isp;
233 	}
234 
235 }
236 
237 static __inline void
238 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
239 {
240 	if (isp->isp_osinfo.simqfrozen == 0) {
241 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
242 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
243 		ISPLOCK_2_CAMLOCK(isp);
244 		xpt_freeze_simq(isp->isp_sim, 1);
245 		CAMLOCK_2_ISPLOCK(isp);
246 	} else {
247 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
248 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
249 	}
250 }
251 
252 static int
253 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
254 {
255 	struct ispsoftc *isp;
256 	int retval = ENOTTY;
257 
258 	isp = isplist;
259 	while (isp) {
260 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
261 			break;
262 		}
263 		isp = isp->isp_osinfo.next;
264 	}
265 	if (isp == NULL)
266 		return (ENXIO);
267 
268 	switch (cmd) {
269 #ifdef	ISP_FW_CRASH_DUMP
270 	case ISP_GET_FW_CRASH_DUMP:
271 	{
272 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
273 		size_t sz;
274 
275 		retval = 0;
276 		if (IS_2200(isp))
277 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
278 		else
279 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
280 		ISP_LOCK(isp);
281 		if (ptr && *ptr) {
282 			void *uaddr = *((void **) addr);
283 			if (copyout(ptr, uaddr, sz)) {
284 				retval = EFAULT;
285 			} else {
286 				*ptr = 0;
287 			}
288 		} else {
289 			retval = ENXIO;
290 		}
291 		ISP_UNLOCK(isp);
292 		break;
293 	}
294 
295 	case ISP_FORCE_CRASH_DUMP:
296 		ISP_LOCK(isp);
297 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
298 		isp_fw_dump(isp);
299 		isp_reinit(isp);
300 		ISP_UNLOCK(isp);
301 		retval = 0;
302 		break;
303 #endif
304 	case ISP_SDBLEV:
305 	{
306 		int olddblev = isp->isp_dblev;
307 		isp->isp_dblev = *(int *)addr;
308 		*(int *)addr = olddblev;
309 		retval = 0;
310 		break;
311 	}
312 	case ISP_RESETHBA:
313 		ISP_LOCK(isp);
314 		isp_reinit(isp);
315 		ISP_UNLOCK(isp);
316 		retval = 0;
317 		break;
318 	case ISP_RESCAN:
319 		if (IS_FC(isp)) {
320 			ISP_LOCK(isp);
321 			if (isp_fc_runstate(isp, 5 * 1000000)) {
322 				retval = EIO;
323 			} else {
324 				retval = 0;
325 			}
326 			ISP_UNLOCK(isp);
327 		}
328 		break;
329 	case ISP_FC_LIP:
330 		if (IS_FC(isp)) {
331 			ISP_LOCK(isp);
332 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
333 				retval = EIO;
334 			} else {
335 				retval = 0;
336 			}
337 			ISP_UNLOCK(isp);
338 		}
339 		break;
340 	case ISP_FC_GETDINFO:
341 	{
342 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
343 		struct lportdb *lp;
344 
345 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
346 			retval = EINVAL;
347 			break;
348 		}
349 		ISP_LOCK(isp);
350 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
351 		if (lp->valid) {
352 			ifc->loopid = lp->loopid;
353 			ifc->portid = lp->portid;
354 			ifc->node_wwn = lp->node_wwn;
355 			ifc->port_wwn = lp->port_wwn;
356 			retval = 0;
357 		} else {
358 			retval = ENODEV;
359 		}
360 		ISP_UNLOCK(isp);
361 		break;
362 	}
363 	case ISP_GET_STATS:
364 	{
365 		isp_stats_t *sp = (isp_stats_t *) addr;
366 
367 		MEMZERO(sp, sizeof (*sp));
368 		sp->isp_stat_version = ISP_STATS_VERSION;
369 		sp->isp_type = isp->isp_type;
370 		sp->isp_revision = isp->isp_revision;
371 		ISP_LOCK(isp);
372 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
373 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
374 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
375 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
376 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
377 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
378 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
379 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
380 		ISP_UNLOCK(isp);
381 		retval = 0;
382 		break;
383 	}
384 	case ISP_CLR_STATS:
385 		ISP_LOCK(isp);
386 		isp->isp_intcnt = 0;
387 		isp->isp_intbogus = 0;
388 		isp->isp_intmboxc = 0;
389 		isp->isp_intoasync = 0;
390 		isp->isp_rsltccmplt = 0;
391 		isp->isp_fphccmplt = 0;
392 		isp->isp_rscchiwater = 0;
393 		isp->isp_fpcchiwater = 0;
394 		ISP_UNLOCK(isp);
395 		retval = 0;
396 		break;
397 	case ISP_FC_GETHINFO:
398 	{
399 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
400 		MEMZERO(hba, sizeof (*hba));
401 		ISP_LOCK(isp);
402 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
403 		hba->fc_scsi_supported = 1;
404 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
405 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
406 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
407 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
408 		ISP_UNLOCK(isp);
409 		retval = 0;
410 		break;
411 	}
412 	case ISP_GET_FC_PARAM:
413 	{
414 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
415 
416 		if (!IS_FC(isp)) {
417 			retval = EINVAL;
418 			break;
419 		}
420 		f->parameter = 0;
421 		if (strcmp(f->param_name, "framelength") == 0) {
422 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
423 			retval = 0;
424 			break;
425 		}
426 		if (strcmp(f->param_name, "exec_throttle") == 0) {
427 			f->parameter = FCPARAM(isp)->isp_execthrottle;
428 			retval = 0;
429 			break;
430 		}
431 		if (strcmp(f->param_name, "fullduplex") == 0) {
432 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
433 				f->parameter = 1;
434 			retval = 0;
435 			break;
436 		}
437 		if (strcmp(f->param_name, "loopid") == 0) {
438 			f->parameter = FCPARAM(isp)->isp_loopid;
439 			retval = 0;
440 			break;
441 		}
442 		retval = EINVAL;
443 		break;
444 	}
445 	case ISP_SET_FC_PARAM:
446 	{
447 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
448 		u_int32_t param = f->parameter;
449 
450 		if (!IS_FC(isp)) {
451 			retval = EINVAL;
452 			break;
453 		}
454 		f->parameter = 0;
455 		if (strcmp(f->param_name, "framelength") == 0) {
456 			if (param != 512 && param != 1024 && param != 1024) {
457 				retval = EINVAL;
458 				break;
459 			}
460 			FCPARAM(isp)->isp_maxfrmlen = param;
461 			retval = 0;
462 			break;
463 		}
464 		if (strcmp(f->param_name, "exec_throttle") == 0) {
465 			if (param < 16 || param > 255) {
466 				retval = EINVAL;
467 				break;
468 			}
469 			FCPARAM(isp)->isp_execthrottle = param;
470 			retval = 0;
471 			break;
472 		}
473 		if (strcmp(f->param_name, "fullduplex") == 0) {
474 			if (param != 0 && param != 1) {
475 				retval = EINVAL;
476 				break;
477 			}
478 			if (param) {
479 				FCPARAM(isp)->isp_fwoptions |=
480 				    ICBOPT_FULL_DUPLEX;
481 			} else {
482 				FCPARAM(isp)->isp_fwoptions &=
483 				    ~ICBOPT_FULL_DUPLEX;
484 			}
485 			retval = 0;
486 			break;
487 		}
488 		if (strcmp(f->param_name, "loopid") == 0) {
489 			if (param < 0 || param > 125) {
490 				retval = EINVAL;
491 				break;
492 			}
493 			FCPARAM(isp)->isp_loopid = param;
494 			retval = 0;
495 			break;
496 		}
497 		retval = EINVAL;
498 		break;
499 	}
500 	default:
501 		break;
502 	}
503 	return (retval);
504 }
505 
506 static void
507 isp_intr_enable(void *arg)
508 {
509 	struct ispsoftc *isp = arg;
510 	if (isp->isp_role != ISP_ROLE_NONE) {
511 		ENABLE_INTS(isp);
512 		isp->isp_osinfo.intsok = 1;
513 	}
514 	/* Release our hook so that the boot can continue. */
515 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
516 }
517 
518 /*
519  * Put the target mode functions here, because some are inlines
520  */
521 
522 #ifdef	ISP_TARGET_MODE
523 
524 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
525 static __inline int are_any_luns_enabled(struct ispsoftc *, int);
526 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
527 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
528 static __inline int isp_psema_sig_rqe(struct ispsoftc *, int);
529 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
530 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int, int);
531 static __inline void isp_vsema_rqe(struct ispsoftc *, int);
532 static __inline atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
533 static cam_status
534 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
535 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
536 static void isp_en_lun(struct ispsoftc *, union ccb *);
537 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
538 static timeout_t isp_refire_putback_atio;
539 static void isp_complete_ctio(union ccb *);
540 static void isp_target_putback_atio(union ccb *);
541 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
542 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
543 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
544 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
545 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
546 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
547 
548 static __inline int
549 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
550 {
551 	tstate_t *tptr;
552 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
553 	if (tptr == NULL) {
554 		return (0);
555 	}
556 	do {
557 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
558 			return (1);
559 		}
560 	} while ((tptr = tptr->next) != NULL);
561 	return (0);
562 }
563 
564 static __inline int
565 are_any_luns_enabled(struct ispsoftc *isp, int port)
566 {
567 	int lo, hi;
568 	if (IS_DUALBUS(isp)) {
569 		lo = (port * (LUN_HASH_SIZE >> 1));
570 		hi = lo + (LUN_HASH_SIZE >> 1);
571 	} else {
572 		lo = 0;
573 		hi = LUN_HASH_SIZE;
574 	}
575 	for (lo = 0; lo < hi; lo++) {
576 		if (isp->isp_osinfo.lun_hash[lo]) {
577 			return (1);
578 		}
579 	}
580 	return (0);
581 }
582 
583 static __inline tstate_t *
584 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
585 {
586 	tstate_t *tptr = NULL;
587 
588 	if (lun == CAM_LUN_WILDCARD) {
589 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
590 			tptr = &isp->isp_osinfo.tsdflt[bus];
591 			tptr->hold++;
592 			return (tptr);
593 		}
594 	} else {
595 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
596 		if (tptr == NULL) {
597 			return (NULL);
598 		}
599 	}
600 
601 	do {
602 		if (tptr->lun == lun && tptr->bus == bus) {
603 			tptr->hold++;
604 			return (tptr);
605 		}
606 	} while ((tptr = tptr->next) != NULL);
607 	return (tptr);
608 }
609 
610 static __inline void
611 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
612 {
613 	if (tptr->hold)
614 		tptr->hold--;
615 }
616 
617 static __inline int
618 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
619 {
620 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
621 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
622 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
623 			return (-1);
624 		}
625 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
626 	}
627 	return (0);
628 }
629 
630 static __inline int
631 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
632 {
633 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
634 		return (-1);
635 	}
636 	return (0);
637 }
638 
639 static __inline void
640 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
641 {
642 	isp->isp_osinfo.rstatus[bus] = status;
643 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
644 }
645 
646 static __inline void
647 isp_vsema_rqe(struct ispsoftc *isp, int bus)
648 {
649 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
650 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
651 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
652 	}
653 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
654 }
655 
656 static __inline atio_private_data_t *
657 isp_get_atpd(struct ispsoftc *isp, int tag)
658 {
659 	atio_private_data_t *atp;
660 	for (atp = isp->isp_osinfo.atpdp;
661 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
662 		if (atp->tag == tag)
663 			return (atp);
664 	}
665 	return (NULL);
666 }
667 
668 static cam_status
669 create_lun_state(struct ispsoftc *isp, int bus,
670     struct cam_path *path, tstate_t **rslt)
671 {
672 	cam_status status;
673 	lun_id_t lun;
674 	int hfx;
675 	tstate_t *tptr, *new;
676 
677 	lun = xpt_path_lun_id(path);
678 	if (lun < 0) {
679 		return (CAM_LUN_INVALID);
680 	}
681 	if (is_lun_enabled(isp, bus, lun)) {
682 		return (CAM_LUN_ALRDY_ENA);
683 	}
684 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
685 	if (new == NULL) {
686 		return (CAM_RESRC_UNAVAIL);
687 	}
688 
689 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
690 	    xpt_path_target_id(path), xpt_path_lun_id(path));
691 	if (status != CAM_REQ_CMP) {
692 		free(new, M_DEVBUF);
693 		return (status);
694 	}
695 	new->bus = bus;
696 	new->lun = lun;
697 	SLIST_INIT(&new->atios);
698 	SLIST_INIT(&new->inots);
699 	new->hold = 1;
700 
701 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
702 	tptr = isp->isp_osinfo.lun_hash[hfx];
703 	if (tptr == NULL) {
704 		isp->isp_osinfo.lun_hash[hfx] = new;
705 	} else {
706 		while (tptr->next)
707 			tptr = tptr->next;
708 		tptr->next = new;
709 	}
710 	*rslt = new;
711 	return (CAM_REQ_CMP);
712 }
713 
714 static __inline void
715 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
716 {
717 	int hfx;
718 	tstate_t *lw, *pw;
719 
720 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
721 	if (tptr->hold) {
722 		return;
723 	}
724 	pw = isp->isp_osinfo.lun_hash[hfx];
725 	if (pw == NULL) {
726 		return;
727 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
728 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
729 	} else {
730 		lw = pw;
731 		pw = lw->next;
732 		while (pw) {
733 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
734 				lw->next = pw->next;
735 				break;
736 			}
737 			lw = pw;
738 			pw = pw->next;
739 		}
740 		if (pw == NULL) {
741 			return;
742 		}
743 	}
744 	free(tptr, M_DEVBUF);
745 }
746 
747 /*
748  * we enter with our locks held.
749  */
750 static void
751 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
752 {
753 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
754 	struct ccb_en_lun *cel = &ccb->cel;
755 	tstate_t *tptr;
756 	u_int16_t rstat;
757 	int bus, cmd, av, wildcard;
758 	lun_id_t lun;
759 	target_id_t tgt;
760 
761 
762 	bus = XS_CHANNEL(ccb) & 0x1;
763 	tgt = ccb->ccb_h.target_id;
764 	lun = ccb->ccb_h.target_lun;
765 
766 	/*
767 	 * Do some sanity checking first.
768 	 */
769 
770 	if ((lun != CAM_LUN_WILDCARD) &&
771 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
772 		ccb->ccb_h.status = CAM_LUN_INVALID;
773 		return;
774 	}
775 
776 	if (IS_SCSI(isp)) {
777 		sdparam *sdp = isp->isp_param;
778 		sdp += bus;
779 		if (tgt != CAM_TARGET_WILDCARD &&
780 		    tgt != sdp->isp_initiator_id) {
781 			ccb->ccb_h.status = CAM_TID_INVALID;
782 			return;
783 		}
784 	} else {
785 		if (tgt != CAM_TARGET_WILDCARD &&
786 		    tgt != FCPARAM(isp)->isp_iid) {
787 			ccb->ccb_h.status = CAM_TID_INVALID;
788 			return;
789 		}
790 		/*
791 		 * This is as a good a place as any to check f/w capabilities.
792 		 */
793 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
794 			isp_prt(isp, ISP_LOGERR,
795 			    "firmware does not support target mode");
796 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
797 			return;
798 		}
799 		/*
800 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
801 		 * XXX: dorks with our already fragile enable/disable code.
802 		 */
803 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
804 			isp_prt(isp, ISP_LOGERR,
805 			    "firmware not SCCLUN capable");
806 		}
807 	}
808 
809 	if (tgt == CAM_TARGET_WILDCARD) {
810 		if (lun == CAM_LUN_WILDCARD) {
811 			wildcard = 1;
812 		} else {
813 			ccb->ccb_h.status = CAM_LUN_INVALID;
814 			return;
815 		}
816 	} else {
817 		wildcard = 0;
818 	}
819 
820 	/*
821 	 * Next check to see whether this is a target/lun wildcard action.
822 	 *
823 	 * If so, we know that we can accept commands for luns that haven't
824 	 * been enabled yet and send them upstream. Otherwise, we have to
825 	 * handle them locally (if we see them at all).
826 	 */
827 
828 	if (wildcard) {
829 		tptr = &isp->isp_osinfo.tsdflt[bus];
830 		if (cel->enable) {
831 			if (isp->isp_osinfo.tmflags[bus] &
832 			    TM_WILDCARD_ENABLED) {
833 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
834 				return;
835 			}
836 			ccb->ccb_h.status =
837 			    xpt_create_path(&tptr->owner, NULL,
838 			    xpt_path_path_id(ccb->ccb_h.path),
839 			    xpt_path_target_id(ccb->ccb_h.path),
840 			    xpt_path_lun_id(ccb->ccb_h.path));
841 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
842 				return;
843 			}
844 			SLIST_INIT(&tptr->atios);
845 			SLIST_INIT(&tptr->inots);
846 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
847 		} else {
848 			if ((isp->isp_osinfo.tmflags[bus] &
849 			    TM_WILDCARD_ENABLED) == 0) {
850 				ccb->ccb_h.status = CAM_REQ_CMP;
851 				return;
852 			}
853 			if (tptr->hold) {
854 				ccb->ccb_h.status = CAM_SCSI_BUSY;
855 				return;
856 			}
857 			xpt_free_path(tptr->owner);
858 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
859 		}
860 	}
861 
862 	/*
863 	 * Now check to see whether this bus needs to be
864 	 * enabled/disabled with respect to target mode.
865 	 */
866 	av = bus << 31;
867 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
868 		av |= ENABLE_TARGET_FLAG;
869 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
870 		if (av) {
871 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
872 			if (wildcard) {
873 				isp->isp_osinfo.tmflags[bus] &=
874 				    ~TM_WILDCARD_ENABLED;
875 				xpt_free_path(tptr->owner);
876 			}
877 			return;
878 		}
879 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
880 		isp_prt(isp, ISP_LOGINFO,
881 		    "Target Mode enabled on channel %d", bus);
882 	} else if (cel->enable == 0 &&
883 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
884 		if (are_any_luns_enabled(isp, bus)) {
885 			ccb->ccb_h.status = CAM_SCSI_BUSY;
886 			return;
887 		}
888 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
889 		if (av) {
890 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
891 			return;
892 		}
893 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
894 		isp_prt(isp, ISP_LOGINFO,
895 		    "Target Mode disabled on channel %d", bus);
896 	}
897 
898 	if (wildcard) {
899 		ccb->ccb_h.status = CAM_REQ_CMP;
900 		return;
901 	}
902 
903 	if (cel->enable) {
904 		ccb->ccb_h.status =
905 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
906 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
907 			return;
908 		}
909 	} else {
910 		tptr = get_lun_statep(isp, bus, lun);
911 		if (tptr == NULL) {
912 			ccb->ccb_h.status = CAM_LUN_INVALID;
913 			return;
914 		}
915 	}
916 
917 	if (isp_psema_sig_rqe(isp, bus)) {
918 		rls_lun_statep(isp, tptr);
919 		if (cel->enable)
920 			destroy_lun_state(isp, tptr);
921 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
922 		return;
923 	}
924 
925 	if (cel->enable) {
926 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
927 		int c, n, ulun = lun;
928 
929 		cmd = RQSTYPE_ENABLE_LUN;
930 		c = DFLT_CMND_CNT;
931 		n = DFLT_INOT_CNT;
932 		if (IS_FC(isp) && lun != 0) {
933 			cmd = RQSTYPE_MODIFY_LUN;
934 			n = 0;
935 			/*
936 		 	 * For SCC firmware, we only deal with setting
937 			 * (enabling or modifying) lun 0.
938 			 */
939 			ulun = 0;
940 		}
941 		rstat = LUN_ERR;
942 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
943 			xpt_print_path(ccb->ccb_h.path);
944 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
945 			goto out;
946 		}
947 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
948 			xpt_print_path(ccb->ccb_h.path);
949 			isp_prt(isp, ISP_LOGERR,
950 			    "wait for ENABLE/MODIFY LUN timed out");
951 			goto out;
952 		}
953 		rstat = isp->isp_osinfo.rstatus[bus];
954 		if (rstat != LUN_OK) {
955 			xpt_print_path(ccb->ccb_h.path);
956 			isp_prt(isp, ISP_LOGERR,
957 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
958 			goto out;
959 		}
960 	} else {
961 		int c, n, ulun = lun;
962 		u_int32_t seq;
963 
964 		rstat = LUN_ERR;
965 		seq = isp->isp_osinfo.rollinfo++;
966 		cmd = -RQSTYPE_MODIFY_LUN;
967 
968 		c = DFLT_CMND_CNT;
969 		n = DFLT_INOT_CNT;
970 		if (IS_FC(isp) && lun != 0) {
971 			n = 0;
972 			/*
973 		 	 * For SCC firmware, we only deal with setting
974 			 * (enabling or modifying) lun 0.
975 			 */
976 			ulun = 0;
977 		}
978 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
979 			xpt_print_path(ccb->ccb_h.path);
980 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
981 			goto out;
982 		}
983 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
984 			xpt_print_path(ccb->ccb_h.path);
985 			isp_prt(isp, ISP_LOGERR,
986 			    "wait for MODIFY LUN timed out");
987 			goto out;
988 		}
989 		rstat = isp->isp_osinfo.rstatus[bus];
990 		if (rstat != LUN_OK) {
991 			xpt_print_path(ccb->ccb_h.path);
992 			isp_prt(isp, ISP_LOGERR,
993 			    "MODIFY LUN returned 0x%x", rstat);
994 			goto out;
995 		}
996 		if (IS_FC(isp) && lun) {
997 			goto out;
998 		}
999 
1000 		seq = isp->isp_osinfo.rollinfo++;
1001 
1002 		rstat = LUN_ERR;
1003 		cmd = -RQSTYPE_ENABLE_LUN;
1004 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1005 			xpt_print_path(ccb->ccb_h.path);
1006 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1007 			goto out;
1008 		}
1009 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1010 			xpt_print_path(ccb->ccb_h.path);
1011 			isp_prt(isp, ISP_LOGERR,
1012 			     "wait for DISABLE LUN timed out");
1013 			goto out;
1014 		}
1015 		rstat = isp->isp_osinfo.rstatus[bus];
1016 		if (rstat != LUN_OK) {
1017 			xpt_print_path(ccb->ccb_h.path);
1018 			isp_prt(isp, ISP_LOGWARN,
1019 			    "DISABLE LUN returned 0x%x", rstat);
1020 			goto out;
1021 		}
1022 		if (are_any_luns_enabled(isp, bus) == 0) {
1023 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1024 			if (av) {
1025 				isp_prt(isp, ISP_LOGWARN,
1026 				    "disable target mode on channel %d failed",
1027 				    bus);
1028 				goto out;
1029 			}
1030 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1031 			xpt_print_path(ccb->ccb_h.path);
1032 			isp_prt(isp, ISP_LOGINFO,
1033 			    "Target Mode disabled on channel %d", bus);
1034 		}
1035 	}
1036 
1037 out:
1038 	isp_vsema_rqe(isp, bus);
1039 
1040 	if (rstat != LUN_OK) {
1041 		xpt_print_path(ccb->ccb_h.path);
1042 		isp_prt(isp, ISP_LOGWARN,
1043 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1044 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1045 		rls_lun_statep(isp, tptr);
1046 		if (cel->enable)
1047 			destroy_lun_state(isp, tptr);
1048 	} else {
1049 		xpt_print_path(ccb->ccb_h.path);
1050 		isp_prt(isp, ISP_LOGINFO, lfmt,
1051 		    (cel->enable) ? "en" : "dis", bus);
1052 		rls_lun_statep(isp, tptr);
1053 		if (cel->enable == 0) {
1054 			destroy_lun_state(isp, tptr);
1055 		}
1056 		ccb->ccb_h.status = CAM_REQ_CMP;
1057 	}
1058 }
1059 
1060 static cam_status
1061 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1062 {
1063 	tstate_t *tptr;
1064 	struct ccb_hdr_slist *lp;
1065 	struct ccb_hdr *curelm;
1066 	int found;
1067 	union ccb *accb = ccb->cab.abort_ccb;
1068 
1069 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1070 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1071 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1072 			return (CAM_PATH_INVALID);
1073 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1074 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1075 			return (CAM_PATH_INVALID);
1076 		}
1077 	}
1078 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1079 	if (tptr == NULL) {
1080 		return (CAM_PATH_INVALID);
1081 	}
1082 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1083 		lp = &tptr->atios;
1084 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1085 		lp = &tptr->inots;
1086 	} else {
1087 		rls_lun_statep(isp, tptr);
1088 		return (CAM_UA_ABORT);
1089 	}
1090 	curelm = SLIST_FIRST(lp);
1091 	found = 0;
1092 	if (curelm == &accb->ccb_h) {
1093 		found = 1;
1094 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1095 	} else {
1096 		while(curelm != NULL) {
1097 			struct ccb_hdr *nextelm;
1098 
1099 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1100 			if (nextelm == &accb->ccb_h) {
1101 				found = 1;
1102 				SLIST_NEXT(curelm, sim_links.sle) =
1103 				    SLIST_NEXT(nextelm, sim_links.sle);
1104 				break;
1105 			}
1106 			curelm = nextelm;
1107 		}
1108 	}
1109 	rls_lun_statep(isp, tptr);
1110 	if (found) {
1111 		accb->ccb_h.status = CAM_REQ_ABORTED;
1112 		return (CAM_REQ_CMP);
1113 	}
1114 	return(CAM_PATH_INVALID);
1115 }
1116 
1117 static cam_status
1118 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1119 {
1120 	void *qe;
1121 	struct ccb_scsiio *cso = &ccb->csio;
1122 	u_int16_t *hp, save_handle;
1123 	u_int16_t nxti, optr;
1124 	u_int8_t local[QENTRY_LEN];
1125 
1126 
1127 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1128 		xpt_print_path(ccb->ccb_h.path);
1129 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1130 		return (CAM_RESRC_UNAVAIL);
1131 	}
1132 	bzero(local, QENTRY_LEN);
1133 
1134 	/*
1135 	 * We're either moving data or completing a command here.
1136 	 */
1137 
1138 	if (IS_FC(isp)) {
1139 		atio_private_data_t *atp;
1140 		ct2_entry_t *cto = (ct2_entry_t *) local;
1141 
1142 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1143 		cto->ct_header.rqs_entry_count = 1;
1144 		cto->ct_iid = cso->init_id;
1145 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1146 			cto->ct_lun = ccb->ccb_h.target_lun;
1147 		}
1148 
1149 		atp = isp_get_atpd(isp, cso->tag_id);
1150 		if (atp == NULL) {
1151 			isp_prt(isp, ISP_LOGERR,
1152 			    "cannot find private data adjunct for tag %x",
1153 			    cso->tag_id);
1154 			return (-1);
1155 		}
1156 
1157 		cto->ct_rxid = cso->tag_id;
1158 		if (cso->dxfer_len == 0) {
1159 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1160 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1161 				cto->ct_flags |= CT2_SENDSTATUS;
1162 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1163 				cto->ct_resid =
1164 				    atp->orig_datalen - atp->bytes_xfered;
1165 				if (cto->ct_resid < 0) {
1166 					cto->rsp.m1.ct_scsi_status |=
1167 					    CT2_DATA_OVER;
1168 				} else if (cto->ct_resid > 0) {
1169 					cto->rsp.m1.ct_scsi_status |=
1170 					    CT2_DATA_UNDER;
1171 				}
1172 			}
1173 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1174 				int m = min(cso->sense_len, MAXRESPLEN);
1175 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1176 				cto->rsp.m1.ct_senselen = m;
1177 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1178 			}
1179 		} else {
1180 			cto->ct_flags |= CT2_FLAG_MODE0;
1181 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1182 				cto->ct_flags |= CT2_DATA_IN;
1183 			} else {
1184 				cto->ct_flags |= CT2_DATA_OUT;
1185 			}
1186 			cto->ct_reloff = atp->bytes_xfered;
1187 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1188 				cto->ct_flags |= CT2_SENDSTATUS;
1189 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1190 				cto->ct_resid =
1191 				    atp->orig_datalen -
1192 				    (atp->bytes_xfered + cso->dxfer_len);
1193 				if (cto->ct_resid < 0) {
1194 					cto->rsp.m0.ct_scsi_status |=
1195 					    CT2_DATA_OVER;
1196 				} else if (cto->ct_resid > 0) {
1197 					cto->rsp.m0.ct_scsi_status |=
1198 					    CT2_DATA_UNDER;
1199 				}
1200 			} else {
1201 				atp->last_xframt = cso->dxfer_len;
1202 			}
1203 			/*
1204 			 * If we're sending data and status back together,
1205 			 * we can't also send back sense data as well.
1206 			 */
1207 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1208 		}
1209 
1210 		if (cto->ct_flags & CT2_SENDSTATUS) {
1211 			isp_prt(isp, ISP_LOGTDEBUG0,
1212 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1213 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1214 			    cso->dxfer_len, cto->ct_resid);
1215 			cto->ct_flags |= CT2_CCINCR;
1216 			atp->state = ATPD_STATE_LAST_CTIO;
1217 		} else
1218 			atp->state = ATPD_STATE_CTIO;
1219 		cto->ct_timeout = 10;
1220 		hp = &cto->ct_syshandle;
1221 	} else {
1222 		ct_entry_t *cto = (ct_entry_t *) local;
1223 
1224 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1225 		cto->ct_header.rqs_entry_count = 1;
1226 		cto->ct_iid = cso->init_id;
1227 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1228 		cto->ct_tgt = ccb->ccb_h.target_id;
1229 		cto->ct_lun = ccb->ccb_h.target_lun;
1230 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1231 		if (AT_HAS_TAG(cso->tag_id)) {
1232 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1233 			cto->ct_flags |= CT_TQAE;
1234 		}
1235 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1236 			cto->ct_flags |= CT_NODISC;
1237 		}
1238 		if (cso->dxfer_len == 0) {
1239 			cto->ct_flags |= CT_NO_DATA;
1240 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1241 			cto->ct_flags |= CT_DATA_IN;
1242 		} else {
1243 			cto->ct_flags |= CT_DATA_OUT;
1244 		}
1245 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1246 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1247 			cto->ct_scsi_status = cso->scsi_status;
1248 			cto->ct_resid = cso->resid;
1249 			isp_prt(isp, ISP_LOGTDEBUG0,
1250 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1251 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1252 			    cso->tag_id);
1253 		}
1254 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1255 		cto->ct_timeout = 10;
1256 		hp = &cto->ct_syshandle;
1257 	}
1258 
1259 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1260 		xpt_print_path(ccb->ccb_h.path);
1261 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1262 		return (CAM_RESRC_UNAVAIL);
1263 	}
1264 
1265 
1266 	/*
1267 	 * Call the dma setup routines for this entry (and any subsequent
1268 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1269 	 * new things to play with. As with isp_start's usage of DMA setup,
1270 	 * any swizzling is done in the machine dependent layer. Because
1271 	 * of this, we put the request onto the queue area first in native
1272 	 * format.
1273 	 */
1274 
1275 	save_handle = *hp;
1276 
1277 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1278 	case CMD_QUEUED:
1279 		ISP_ADD_REQUEST(isp, nxti);
1280 		return (CAM_REQ_INPROG);
1281 
1282 	case CMD_EAGAIN:
1283 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1284 		isp_destroy_handle(isp, save_handle);
1285 		return (CAM_RESRC_UNAVAIL);
1286 
1287 	default:
1288 		isp_destroy_handle(isp, save_handle);
1289 		return (XS_ERR(ccb));
1290 	}
1291 }
1292 
1293 static void
1294 isp_refire_putback_atio(void *arg)
1295 {
1296 	int s = splcam();
1297 	isp_target_putback_atio(arg);
1298 	splx(s);
1299 }
1300 
1301 static void
1302 isp_target_putback_atio(union ccb *ccb)
1303 {
1304 	struct ispsoftc *isp;
1305 	struct ccb_scsiio *cso;
1306 	u_int16_t nxti, optr;
1307 	void *qe;
1308 
1309 	isp = XS_ISP(ccb);
1310 
1311 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1312 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1313 		isp_prt(isp, ISP_LOGWARN,
1314 		    "isp_target_putback_atio: Request Queue Overflow");
1315 		return;
1316 	}
1317 	bzero(qe, QENTRY_LEN);
1318 	cso = &ccb->csio;
1319 	if (IS_FC(isp)) {
1320 		at2_entry_t local, *at = &local;
1321 		MEMZERO(at, sizeof (at2_entry_t));
1322 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1323 		at->at_header.rqs_entry_count = 1;
1324 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1325 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1326 		} else {
1327 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1328 		}
1329 		at->at_status = CT_OK;
1330 		at->at_rxid = cso->tag_id;
1331 		at->at_iid = cso->ccb_h.target_id;
1332 		isp_put_atio2(isp, at, qe);
1333 	} else {
1334 		at_entry_t local, *at = &local;
1335 		MEMZERO(at, sizeof (at_entry_t));
1336 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1337 		at->at_header.rqs_entry_count = 1;
1338 		at->at_iid = cso->init_id;
1339 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1340 		at->at_tgt = cso->ccb_h.target_id;
1341 		at->at_lun = cso->ccb_h.target_lun;
1342 		at->at_status = CT_OK;
1343 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1344 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1345 		isp_put_atio(isp, at, qe);
1346 	}
1347 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1348 	ISP_ADD_REQUEST(isp, nxti);
1349 	isp_complete_ctio(ccb);
1350 }
1351 
1352 static void
1353 isp_complete_ctio(union ccb *ccb)
1354 {
1355 	struct ispsoftc *isp = XS_ISP(ccb);
1356 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1357 		ccb->ccb_h.status |= CAM_REQ_CMP;
1358 	}
1359 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1360 	xpt_done(ccb);
1361 }
1362 
1363 /*
1364  * Handle ATIO stuff that the generic code can't.
1365  * This means handling CDBs.
1366  */
1367 
1368 static int
1369 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1370 {
1371 	tstate_t *tptr;
1372 	int status, bus, iswildcard;
1373 	struct ccb_accept_tio *atiop;
1374 
1375 	/*
1376 	 * The firmware status (except for the QLTM_SVALID bit)
1377 	 * indicates why this ATIO was sent to us.
1378 	 *
1379 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1380 	 *
1381 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1382 	 * we're still connected on the SCSI bus.
1383 	 */
1384 	status = aep->at_status;
1385 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1386 		/*
1387 		 * Bus Phase Sequence error. We should have sense data
1388 		 * suggested by the f/w. I'm not sure quite yet what
1389 		 * to do about this for CAM.
1390 		 */
1391 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1392 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1393 		return (0);
1394 	}
1395 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1396 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1397 		    status);
1398 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1399 		return (0);
1400 	}
1401 
1402 	bus = GET_BUS_VAL(aep->at_iid);
1403 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1404 	if (tptr == NULL) {
1405 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1406 		iswildcard = 1;
1407 	} else {
1408 		iswildcard = 0;
1409 	}
1410 
1411 	if (tptr == NULL) {
1412 		/*
1413 		 * Because we can't autofeed sense data back with
1414 		 * a command for parallel SCSI, we can't give back
1415 		 * a CHECK CONDITION. We'll give back a BUSY status
1416 		 * instead. This works out okay because the only
1417 		 * time we should, in fact, get this, is in the
1418 		 * case that somebody configured us without the
1419 		 * blackhole driver, so they get what they deserve.
1420 		 */
1421 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1422 		return (0);
1423 	}
1424 
1425 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1426 	if (atiop == NULL) {
1427 		/*
1428 		 * Because we can't autofeed sense data back with
1429 		 * a command for parallel SCSI, we can't give back
1430 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1431 		 * instead. This works out okay because the only time we
1432 		 * should, in fact, get this, is in the case that we've
1433 		 * run out of ATIOS.
1434 		 */
1435 		xpt_print_path(tptr->owner);
1436 		isp_prt(isp, ISP_LOGWARN,
1437 		    "no ATIOS for lun %d from initiator %d on channel %d",
1438 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1439 		if (aep->at_flags & AT_TQAE)
1440 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1441 		else
1442 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1443 		rls_lun_statep(isp, tptr);
1444 		return (0);
1445 	}
1446 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1447 	if (iswildcard) {
1448 		atiop->ccb_h.target_id = aep->at_tgt;
1449 		atiop->ccb_h.target_lun = aep->at_lun;
1450 	}
1451 	if (aep->at_flags & AT_NODISC) {
1452 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1453 	} else {
1454 		atiop->ccb_h.flags = 0;
1455 	}
1456 
1457 	if (status & QLTM_SVALID) {
1458 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1459 		atiop->sense_len = amt;
1460 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1461 	} else {
1462 		atiop->sense_len = 0;
1463 	}
1464 
1465 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1466 	atiop->cdb_len = aep->at_cdblen;
1467 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1468 	atiop->ccb_h.status = CAM_CDB_RECVD;
1469 	/*
1470 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1471 	 * and the handle (which we have to preserve).
1472 	 */
1473 	AT_MAKE_TAGID(atiop->tag_id, aep);
1474 	if (aep->at_flags & AT_TQAE) {
1475 		atiop->tag_action = aep->at_tag_type;
1476 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1477 	}
1478 	xpt_done((union ccb*)atiop);
1479 	isp_prt(isp, ISP_LOGTDEBUG0,
1480 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1481 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1482 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1483 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1484 	    "nondisc" : "disconnecting");
1485 	rls_lun_statep(isp, tptr);
1486 	return (0);
1487 }
1488 
1489 static int
1490 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1491 {
1492 	lun_id_t lun;
1493 	tstate_t *tptr;
1494 	struct ccb_accept_tio *atiop;
1495 	atio_private_data_t *atp;
1496 
1497 	/*
1498 	 * The firmware status (except for the QLTM_SVALID bit)
1499 	 * indicates why this ATIO was sent to us.
1500 	 *
1501 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1502 	 */
1503 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1504 		isp_prt(isp, ISP_LOGWARN,
1505 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1506 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1507 		return (0);
1508 	}
1509 
1510 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1511 		lun = aep->at_scclun;
1512 	} else {
1513 		lun = aep->at_lun;
1514 	}
1515 	tptr = get_lun_statep(isp, 0, lun);
1516 	if (tptr == NULL) {
1517 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1518 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1519 	}
1520 
1521 	if (tptr == NULL) {
1522 		/*
1523 		 * What we'd like to know is whether or not we have a listener
1524 		 * upstream that really hasn't configured yet. If we do, then
1525 		 * we can give a more sensible reply here. If not, then we can
1526 		 * reject this out of hand.
1527 		 *
1528 		 * Choices for what to send were
1529 		 *
1530                  *	Not Ready, Unit Not Self-Configured Yet
1531 		 *	(0x2,0x3e,0x00)
1532 		 *
1533 		 * for the former and
1534 		 *
1535 		 *	Illegal Request, Logical Unit Not Supported
1536 		 *	(0x5,0x25,0x00)
1537 		 *
1538 		 * for the latter.
1539 		 *
1540 		 * We used to decide whether there was at least one listener
1541 		 * based upon whether the black hole driver was configured.
1542 		 * However, recent config(8) changes have made this hard to do
1543 		 * at this time.
1544 		 *
1545 		 */
1546 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1547 		return (0);
1548 	}
1549 
1550 	atp = isp_get_atpd(isp, 0);
1551 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1552 	if (atiop == NULL || atp == NULL) {
1553 		/*
1554 		 * Because we can't autofeed sense data back with
1555 		 * a command for parallel SCSI, we can't give back
1556 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1557 		 * instead. This works out okay because the only time we
1558 		 * should, in fact, get this, is in the case that we've
1559 		 * run out of ATIOS.
1560 		 */
1561 		xpt_print_path(tptr->owner);
1562 		isp_prt(isp, ISP_LOGWARN,
1563 		    "no %s for lun %d from initiator %d",
1564 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1565 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1566 		rls_lun_statep(isp, tptr);
1567 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1568 		return (0);
1569 	}
1570 	atp->state = ATPD_STATE_ATIO;
1571 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1572 	tptr->atio_count--;
1573 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1574 	    lun, tptr->atio_count);
1575 
1576 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1577 		atiop->ccb_h.target_id =
1578 		    ((fcparam *)isp->isp_param)->isp_loopid;
1579 		atiop->ccb_h.target_lun = lun;
1580 	}
1581 	/*
1582 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1583 	 */
1584 	atiop->sense_len = 0;
1585 
1586 	atiop->init_id = aep->at_iid;
1587 	atiop->cdb_len = ATIO2_CDBLEN;
1588 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1589 	atiop->ccb_h.status = CAM_CDB_RECVD;
1590 	atiop->tag_id = aep->at_rxid;
1591 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1592 	case ATIO2_TC_ATTR_SIMPLEQ:
1593 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1594 		break;
1595         case ATIO2_TC_ATTR_HEADOFQ:
1596 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1597 		break;
1598         case ATIO2_TC_ATTR_ORDERED:
1599 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1600 		break;
1601         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1602 	case ATIO2_TC_ATTR_UNTAGGED:
1603 	default:
1604 		atiop->tag_action = 0;
1605 		break;
1606 	}
1607 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1608 
1609 	atp->tag = atiop->tag_id;
1610 	atp->lun = lun;
1611 	atp->orig_datalen = aep->at_datalen;
1612 	atp->last_xframt = 0;
1613 	atp->bytes_xfered = 0;
1614 	atp->state = ATPD_STATE_CAM;
1615 	xpt_done((union ccb*)atiop);
1616 
1617 	isp_prt(isp, ISP_LOGTDEBUG0,
1618 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1619 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1620 	    lun, aep->at_taskflags, aep->at_datalen);
1621 	rls_lun_statep(isp, tptr);
1622 	return (0);
1623 }
1624 
1625 static int
1626 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1627 {
1628 	union ccb *ccb;
1629 	int sentstatus, ok, notify_cam, resid = 0;
1630 	u_int16_t tval;
1631 
1632 	/*
1633 	 * CTIO and CTIO2 are close enough....
1634 	 */
1635 
1636 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1637 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1638 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1639 
1640 	if (IS_FC(isp)) {
1641 		ct2_entry_t *ct = arg;
1642 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1643 		if (atp == NULL) {
1644 			isp_prt(isp, ISP_LOGERR,
1645 			    "cannot find adjunct for %x after I/O",
1646 			    ct->ct_rxid);
1647 			return (0);
1648 		}
1649 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1650 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1651 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1652 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1653 		}
1654 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1655 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1656 			resid = ct->ct_resid;
1657 			atp->bytes_xfered += (atp->last_xframt - resid);
1658 			atp->last_xframt = 0;
1659 		}
1660 		if (sentstatus || !ok) {
1661 			atp->tag = 0;
1662 		}
1663 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1664 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1665 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1666 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1667 		    resid, sentstatus? "FIN" : "MID");
1668 		tval = ct->ct_rxid;
1669 
1670 		/* XXX: should really come after isp_complete_ctio */
1671 		atp->state = ATPD_STATE_PDON;
1672 	} else {
1673 		ct_entry_t *ct = arg;
1674 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1675 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1676 		/*
1677 		 * We *ought* to be able to get back to the original ATIO
1678 		 * here, but for some reason this gets lost. It's just as
1679 		 * well because it's squirrelled away as part of periph
1680 		 * private data.
1681 		 *
1682 		 * We can live without it as long as we continue to use
1683 		 * the auto-replenish feature for CTIOs.
1684 		 */
1685 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1686 		if (ct->ct_status & QLTM_SVALID) {
1687 			char *sp = (char *)ct;
1688 			sp += CTIO_SENSE_OFFSET;
1689 			ccb->csio.sense_len =
1690 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1691 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1692 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1693 		}
1694 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1695 			resid = ct->ct_resid;
1696 		}
1697 		isp_prt(isp, ISP_LOGTDEBUG0,
1698 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1699 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1700 		    ct->ct_status, ct->ct_flags, resid,
1701 		    sentstatus? "FIN" : "MID");
1702 		tval = ct->ct_fwhandle;
1703 	}
1704 	ccb->csio.resid += resid;
1705 
1706 	/*
1707 	 * We're here either because intermediate data transfers are done
1708 	 * and/or the final status CTIO (which may have joined with a
1709 	 * Data Transfer) is done.
1710 	 *
1711 	 * In any case, for this platform, the upper layers figure out
1712 	 * what to do next, so all we do here is collect status and
1713 	 * pass information along. Any DMA handles have already been
1714 	 * freed.
1715 	 */
1716 	if (notify_cam == 0) {
1717 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1718 		return (0);
1719 	}
1720 
1721 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1722 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1723 
1724 	if (!ok) {
1725 		isp_target_putback_atio(ccb);
1726 	} else {
1727 		isp_complete_ctio(ccb);
1728 
1729 	}
1730 	return (0);
1731 }
1732 
1733 static int
1734 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1735 {
1736 	return (0);	/* XXXX */
1737 }
1738 
1739 static int
1740 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1741 {
1742 
1743 	switch (inp->in_status) {
1744 	case IN_PORT_LOGOUT:
1745 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1746 		   inp->in_iid);
1747 		break;
1748 	case IN_PORT_CHANGED:
1749 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1750 		   inp->in_iid);
1751 		break;
1752 	case IN_GLOBAL_LOGO:
1753 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1754 		break;
1755 	case IN_ABORT_TASK:
1756 	{
1757 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1758 		struct ccb_immed_notify *inot = NULL;
1759 
1760 		if (atp) {
1761 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1762 			if (tptr) {
1763 				inot = (struct ccb_immed_notify *)
1764 				    SLIST_FIRST(&tptr->inots);
1765 				if (inot) {
1766 					SLIST_REMOVE_HEAD(&tptr->inots,
1767 					    sim_links.sle);
1768 				}
1769 			}
1770 			isp_prt(isp, ISP_LOGWARN,
1771 			   "abort task RX_ID %x IID %d state %d",
1772 			   inp->in_seqid, inp->in_iid, atp->state);
1773 		} else {
1774 			isp_prt(isp, ISP_LOGWARN,
1775 			   "abort task RX_ID %x from iid %d, state unknown",
1776 			   inp->in_seqid, inp->in_iid);
1777 		}
1778 		if (inot) {
1779 			inot->initiator_id = inp->in_iid;
1780 			inot->sense_len = 0;
1781 			inot->message_args[0] = MSG_ABORT_TAG;
1782 			inot->message_args[1] = inp->in_seqid & 0xff;
1783 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1784 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1785 			xpt_done((union ccb *)inot);
1786 		}
1787 		break;
1788 	}
1789 	default:
1790 		break;
1791 	}
1792 	return (0);
1793 }
1794 #endif
1795 
1796 static void
1797 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1798 {
1799 	struct cam_sim *sim;
1800 	struct ispsoftc *isp;
1801 
1802 	sim = (struct cam_sim *)cbarg;
1803 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1804 	switch (code) {
1805 	case AC_LOST_DEVICE:
1806 		if (IS_SCSI(isp)) {
1807 			u_int16_t oflags, nflags;
1808 			sdparam *sdp = isp->isp_param;
1809 			int tgt;
1810 
1811 			tgt = xpt_path_target_id(path);
1812 			ISP_LOCK(isp);
1813 			sdp += cam_sim_bus(sim);
1814 			nflags = sdp->isp_devparam[tgt].nvrm_flags;
1815 #ifndef	ISP_TARGET_MODE
1816 			nflags &= DPARM_SAFE_DFLT;
1817 			if (isp->isp_loaded_fw) {
1818 				nflags |= DPARM_NARROW | DPARM_ASYNC;
1819 			}
1820 #else
1821 			nflags = DPARM_DEFAULT;
1822 #endif
1823 			oflags = sdp->isp_devparam[tgt].goal_flags;
1824 			sdp->isp_devparam[tgt].goal_flags = nflags;
1825 			sdp->isp_devparam[tgt].dev_update = 1;
1826 			isp->isp_update |= (1 << cam_sim_bus(sim));
1827 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1828 			sdp->isp_devparam[tgt].goal_flags = oflags;
1829 			ISP_UNLOCK(isp);
1830 		}
1831 		break;
1832 	default:
1833 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1834 		break;
1835 	}
1836 }
1837 
1838 static void
1839 isp_poll(struct cam_sim *sim)
1840 {
1841 	struct ispsoftc *isp = cam_sim_softc(sim);
1842 	u_int16_t isr, sema, mbox;
1843 
1844 	ISP_LOCK(isp);
1845 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1846 		isp_intr(isp, isr, sema, mbox);
1847 	}
1848 	ISP_UNLOCK(isp);
1849 }
1850 
1851 
1852 static void
1853 isp_watchdog(void *arg)
1854 {
1855 	XS_T *xs = arg;
1856 	struct ispsoftc *isp = XS_ISP(xs);
1857 	u_int32_t handle;
1858 	int iok;
1859 
1860 	/*
1861 	 * We've decided this command is dead. Make sure we're not trying
1862 	 * to kill a command that's already dead by getting it's handle and
1863 	 * and seeing whether it's still alive.
1864 	 */
1865 	ISP_LOCK(isp);
1866 	iok = isp->isp_osinfo.intsok;
1867 	isp->isp_osinfo.intsok = 0;
1868 	handle = isp_find_handle(isp, xs);
1869 	if (handle) {
1870 		u_int16_t isr, sema, mbox;
1871 
1872 		if (XS_CMD_DONE_P(xs)) {
1873 			isp_prt(isp, ISP_LOGDEBUG1,
1874 			    "watchdog found done cmd (handle 0x%x)", handle);
1875 			ISP_UNLOCK(isp);
1876 			return;
1877 		}
1878 
1879 		if (XS_CMD_WDOG_P(xs)) {
1880 			isp_prt(isp, ISP_LOGDEBUG2,
1881 			    "recursive watchdog (handle 0x%x)", handle);
1882 			ISP_UNLOCK(isp);
1883 			return;
1884 		}
1885 
1886 		XS_CMD_S_WDOG(xs);
1887 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1888 			isp_intr(isp, isr, sema, mbox);
1889 		}
1890 		if (XS_CMD_DONE_P(xs)) {
1891 			isp_prt(isp, ISP_LOGDEBUG2,
1892 			    "watchdog cleanup for handle 0x%x", handle);
1893 			xpt_done((union ccb *) xs);
1894 		} else if (XS_CMD_GRACE_P(xs)) {
1895 			/*
1896 			 * Make sure the command is *really* dead before we
1897 			 * release the handle (and DMA resources) for reuse.
1898 			 */
1899 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1900 
1901 			/*
1902 			 * After this point, the comamnd is really dead.
1903 			 */
1904 			if (XS_XFRLEN(xs)) {
1905 				ISP_DMAFREE(isp, xs, handle);
1906                 	}
1907 			isp_destroy_handle(isp, handle);
1908 			xpt_print_path(xs->ccb_h.path);
1909 			isp_prt(isp, ISP_LOGWARN,
1910 			    "watchdog timeout for handle 0x%x", handle);
1911 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1912 			XS_CMD_C_WDOG(xs);
1913 			isp_done(xs);
1914 		} else {
1915 			u_int16_t nxti, optr;
1916 			ispreq_t local, *mp= &local, *qe;
1917 
1918 			XS_CMD_C_WDOG(xs);
1919 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1920 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1921 				ISP_UNLOCK(isp);
1922 				return;
1923 			}
1924 			XS_CMD_S_GRACE(xs);
1925 			MEMZERO((void *) mp, sizeof (*mp));
1926 			mp->req_header.rqs_entry_count = 1;
1927 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1928 			mp->req_modifier = SYNC_ALL;
1929 			mp->req_target = XS_CHANNEL(xs) << 7;
1930 			isp_put_request(isp, mp, qe);
1931 			ISP_ADD_REQUEST(isp, nxti);
1932 		}
1933 	} else {
1934 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1935 	}
1936 	isp->isp_osinfo.intsok = iok;
1937 	ISP_UNLOCK(isp);
1938 }
1939 
1940 static void
1941 isp_kthread(void *arg)
1942 {
1943 	struct ispsoftc *isp = arg;
1944 
1945 	mtx_lock(&isp->isp_lock);
1946 	/*
1947 	 * The first loop is for our usage where we have yet to have
1948 	 * gotten good fibre channel state.
1949 	 */
1950 	for (;;) {
1951 		int wasfrozen;
1952 
1953 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1954 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1955 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1956 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1957 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1958 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1959 				    isp->isp_osinfo.ktmature == 0) {
1960 					break;
1961 				}
1962 			}
1963 			msleep(isp_kthread, &isp->isp_lock,
1964 			    PRIBIO, "isp_fcthrd", hz);
1965 		}
1966 
1967 		/*
1968 		 * Even if we didn't get good loop state we may be
1969 		 * unfreezing the SIMQ so that we can kill off
1970 		 * commands (if we've never seen loop before, for example).
1971 		 */
1972 		isp->isp_osinfo.ktmature = 1;
1973 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1974 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1975 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1976 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
1977 			ISPLOCK_2_CAMLOCK(isp);
1978 			xpt_release_simq(isp->isp_sim, 1);
1979 			CAMLOCK_2_ISPLOCK(isp);
1980 		}
1981 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
1982 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
1983 	}
1984 }
1985 
1986 static void
1987 isp_action(struct cam_sim *sim, union ccb *ccb)
1988 {
1989 	int bus, tgt, error;
1990 	struct ispsoftc *isp;
1991 	struct ccb_trans_settings *cts;
1992 
1993 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1994 
1995 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1996 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1997 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1998 	if (isp->isp_state != ISP_RUNSTATE &&
1999 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
2000 		CAMLOCK_2_ISPLOCK(isp);
2001 		isp_init(isp);
2002 		if (isp->isp_state != ISP_INITSTATE) {
2003 			ISP_UNLOCK(isp);
2004 			/*
2005 			 * Lie. Say it was a selection timeout.
2006 			 */
2007 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2008 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2009 			xpt_done(ccb);
2010 			return;
2011 		}
2012 		isp->isp_state = ISP_RUNSTATE;
2013 		ISPLOCK_2_CAMLOCK(isp);
2014 	}
2015 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2016 
2017 
2018 	switch (ccb->ccb_h.func_code) {
2019 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2020 		/*
2021 		 * Do a couple of preliminary checks...
2022 		 */
2023 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2024 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2025 				ccb->ccb_h.status = CAM_REQ_INVALID;
2026 				xpt_done(ccb);
2027 				break;
2028 			}
2029 		}
2030 #ifdef	DIAGNOSTIC
2031 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2032 			ccb->ccb_h.status = CAM_PATH_INVALID;
2033 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2034 			ccb->ccb_h.status = CAM_PATH_INVALID;
2035 		}
2036 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2037 			isp_prt(isp, ISP_LOGERR,
2038 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2039 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2040 			xpt_done(ccb);
2041 			break;
2042 		}
2043 #endif
2044 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2045 		CAMLOCK_2_ISPLOCK(isp);
2046 		error = isp_start((XS_T *) ccb);
2047 		switch (error) {
2048 		case CMD_QUEUED:
2049 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2050 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2051 				u_int64_t ticks = (u_int64_t) hz;
2052 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2053 					ticks = 60 * 1000 * ticks;
2054 				else
2055 					ticks = ccb->ccb_h.timeout * hz;
2056 				ticks = ((ticks + 999) / 1000) + hz + hz;
2057 				if (ticks >= 0x80000000) {
2058 					isp_prt(isp, ISP_LOGERR,
2059 					    "timeout overflow");
2060 					ticks = 0x7fffffff;
2061 				}
2062 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2063 				    (caddr_t)ccb, (int)ticks);
2064 			} else {
2065 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2066 			}
2067 			ISPLOCK_2_CAMLOCK(isp);
2068 			break;
2069 		case CMD_RQLATER:
2070 			/*
2071 			 * This can only happen for Fibre Channel
2072 			 */
2073 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2074 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2075 			    isp->isp_osinfo.ktmature) {
2076 				ISPLOCK_2_CAMLOCK(isp);
2077 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2078 				xpt_done(ccb);
2079 				break;
2080 			}
2081 			cv_signal(&isp->isp_osinfo.kthread_cv);
2082 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2083 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2084 			ISPLOCK_2_CAMLOCK(isp);
2085 			xpt_done(ccb);
2086 			break;
2087 		case CMD_EAGAIN:
2088 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2089 			ISPLOCK_2_CAMLOCK(isp);
2090 			xpt_done(ccb);
2091 			break;
2092 		case CMD_COMPLETE:
2093 			isp_done((struct ccb_scsiio *) ccb);
2094 			ISPLOCK_2_CAMLOCK(isp);
2095 			break;
2096 		default:
2097 			isp_prt(isp, ISP_LOGERR,
2098 			    "What's this? 0x%x at %d in file %s",
2099 			    error, __LINE__, __FILE__);
2100 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2101 			xpt_done(ccb);
2102 			ISPLOCK_2_CAMLOCK(isp);
2103 		}
2104 		break;
2105 
2106 #ifdef	ISP_TARGET_MODE
2107 	case XPT_EN_LUN:		/* Enable LUN as a target */
2108 	{
2109 		int iok;
2110 		CAMLOCK_2_ISPLOCK(isp);
2111 		iok = isp->isp_osinfo.intsok;
2112 		isp->isp_osinfo.intsok = 0;
2113 		isp_en_lun(isp, ccb);
2114 		isp->isp_osinfo.intsok = iok;
2115 		ISPLOCK_2_CAMLOCK(isp);
2116 		xpt_done(ccb);
2117 		break;
2118 	}
2119 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2120 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2121 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2122 	{
2123 		tstate_t *tptr =
2124 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2125 		if (tptr == NULL) {
2126 			ccb->ccb_h.status = CAM_LUN_INVALID;
2127 			xpt_done(ccb);
2128 			break;
2129 		}
2130 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2131 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2132 		ccb->ccb_h.flags = 0;
2133 
2134 		CAMLOCK_2_ISPLOCK(isp);
2135 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2136 			/*
2137 			 * Note that the command itself may not be done-
2138 			 * it may not even have had the first CTIO sent.
2139 			 */
2140 			tptr->atio_count++;
2141 			isp_prt(isp, ISP_LOGTDEBUG0,
2142 			    "Put FREE ATIO2, lun %d, count now %d",
2143 			    ccb->ccb_h.target_lun, tptr->atio_count);
2144 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2145 			    sim_links.sle);
2146 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2147 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2148 			    sim_links.sle);
2149 		} else {
2150 			;
2151 		}
2152 		rls_lun_statep(isp, tptr);
2153 		ccb->ccb_h.status = CAM_REQ_INPROG;
2154 		ISPLOCK_2_CAMLOCK(isp);
2155 		break;
2156 	}
2157 	case XPT_CONT_TARGET_IO:
2158 	{
2159 		CAMLOCK_2_ISPLOCK(isp);
2160 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2161 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2162 			isp_prt(isp, ISP_LOGWARN,
2163 			    "XPT_CONT_TARGET_IO: status 0x%x",
2164 			    ccb->ccb_h.status);
2165 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2166 			ISPLOCK_2_CAMLOCK(isp);
2167 			xpt_done(ccb);
2168 		} else {
2169 			ISPLOCK_2_CAMLOCK(isp);
2170 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2171 		}
2172 		break;
2173 	}
2174 #endif
2175 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2176 
2177 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2178 		tgt = ccb->ccb_h.target_id;
2179 		tgt |= (bus << 16);
2180 
2181 		CAMLOCK_2_ISPLOCK(isp);
2182 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2183 		ISPLOCK_2_CAMLOCK(isp);
2184 		if (error) {
2185 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2186 		} else {
2187 			ccb->ccb_h.status = CAM_REQ_CMP;
2188 		}
2189 		xpt_done(ccb);
2190 		break;
2191 	case XPT_ABORT:			/* Abort the specified CCB */
2192 	{
2193 		union ccb *accb = ccb->cab.abort_ccb;
2194 		CAMLOCK_2_ISPLOCK(isp);
2195 		switch (accb->ccb_h.func_code) {
2196 #ifdef	ISP_TARGET_MODE
2197 		case XPT_ACCEPT_TARGET_IO:
2198 		case XPT_IMMED_NOTIFY:
2199         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2200 			break;
2201 		case XPT_CONT_TARGET_IO:
2202 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2203 			ccb->ccb_h.status = CAM_UA_ABORT;
2204 			break;
2205 #endif
2206 		case XPT_SCSI_IO:
2207 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2208 			if (error) {
2209 				ccb->ccb_h.status = CAM_UA_ABORT;
2210 			} else {
2211 				ccb->ccb_h.status = CAM_REQ_CMP;
2212 			}
2213 			break;
2214 		default:
2215 			ccb->ccb_h.status = CAM_REQ_INVALID;
2216 			break;
2217 		}
2218 		ISPLOCK_2_CAMLOCK(isp);
2219 		xpt_done(ccb);
2220 		break;
2221 	}
2222 #ifdef	CAM_NEW_TRAN_CODE
2223 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2224 #else
2225 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2226 #endif
2227 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2228 		cts = &ccb->cts;
2229 		if (!IS_CURRENT_SETTINGS(cts)) {
2230 			ccb->ccb_h.status = CAM_REQ_INVALID;
2231 			xpt_done(ccb);
2232 			break;
2233 		}
2234 		tgt = cts->ccb_h.target_id;
2235 		CAMLOCK_2_ISPLOCK(isp);
2236 		if (IS_SCSI(isp)) {
2237 #ifndef	CAM_NEW_TRAN_CODE
2238 			sdparam *sdp = isp->isp_param;
2239 			u_int16_t *dptr;
2240 
2241 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2242 
2243 			sdp += bus;
2244 			/*
2245 			 * We always update (internally) from goal_flags
2246 			 * so any request to change settings just gets
2247 			 * vectored to that location.
2248 			 */
2249 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2250 
2251 			/*
2252 			 * Note that these operations affect the
2253 			 * the goal flags (goal_flags)- not
2254 			 * the current state flags. Then we mark
2255 			 * things so that the next operation to
2256 			 * this HBA will cause the update to occur.
2257 			 */
2258 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2259 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2260 					*dptr |= DPARM_DISC;
2261 				} else {
2262 					*dptr &= ~DPARM_DISC;
2263 				}
2264 			}
2265 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2266 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2267 					*dptr |= DPARM_TQING;
2268 				} else {
2269 					*dptr &= ~DPARM_TQING;
2270 				}
2271 			}
2272 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2273 				switch (cts->bus_width) {
2274 				case MSG_EXT_WDTR_BUS_16_BIT:
2275 					*dptr |= DPARM_WIDE;
2276 					break;
2277 				default:
2278 					*dptr &= ~DPARM_WIDE;
2279 				}
2280 			}
2281 			/*
2282 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2283 			 * of nonzero will cause us to go to the
2284 			 * selected (from NVRAM) maximum value for
2285 			 * this device. At a later point, we'll
2286 			 * allow finer control.
2287 			 */
2288 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2289 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2290 			    (cts->sync_offset > 0)) {
2291 				*dptr |= DPARM_SYNC;
2292 			} else {
2293 				*dptr &= ~DPARM_SYNC;
2294 			}
2295 			*dptr |= DPARM_SAFE_DFLT;
2296 #else
2297 			struct ccb_trans_settings_scsi *scsi =
2298 			    &cts->proto_specific.scsi;
2299 			struct ccb_trans_settings_spi *spi =
2300 			    &cts->xport_specific.spi;
2301 			sdparam *sdp = isp->isp_param;
2302 			u_int16_t *dptr;
2303 
2304 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2305 			sdp += bus;
2306 			/*
2307 			 * We always update (internally) from goal_flags
2308 			 * so any request to change settings just gets
2309 			 * vectored to that location.
2310 			 */
2311 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2312 
2313 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2314 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2315 					*dptr |= DPARM_DISC;
2316 				else
2317 					*dptr &= ~DPARM_DISC;
2318 			}
2319 
2320 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2321 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2322 					*dptr |= DPARM_TQING;
2323 				else
2324 					*dptr &= ~DPARM_TQING;
2325 			}
2326 
2327 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2328 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2329 					*dptr |= DPARM_WIDE;
2330 				else
2331 					*dptr &= ~DPARM_WIDE;
2332 			}
2333 
2334 			/*
2335 			 * XXX: FIX ME
2336 			 */
2337 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2338 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2339 			    (spi->sync_period && spi->sync_offset)) {
2340 				*dptr |= DPARM_SYNC;
2341 				/*
2342 				 * XXX: CHECK FOR LEGALITY
2343 				 */
2344 				sdp->isp_devparam[tgt].goal_period =
2345 				    spi->sync_period;
2346 				sdp->isp_devparam[tgt].goal_offset =
2347 				    spi->sync_offset;
2348 			} else {
2349 				*dptr &= ~DPARM_SYNC;
2350 			}
2351 #endif
2352 			isp_prt(isp, ISP_LOGDEBUG0,
2353 			    "SET bus %d targ %d to flags %x off %x per %x",
2354 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2355 			    sdp->isp_devparam[tgt].goal_offset,
2356 			    sdp->isp_devparam[tgt].goal_period);
2357 			sdp->isp_devparam[tgt].dev_update = 1;
2358 			isp->isp_update |= (1 << bus);
2359 		}
2360 		ISPLOCK_2_CAMLOCK(isp);
2361 		ccb->ccb_h.status = CAM_REQ_CMP;
2362 		xpt_done(ccb);
2363 		break;
2364 	case XPT_GET_TRAN_SETTINGS:
2365 		cts = &ccb->cts;
2366 		tgt = cts->ccb_h.target_id;
2367 		CAMLOCK_2_ISPLOCK(isp);
2368 		if (IS_FC(isp)) {
2369 #ifndef	CAM_NEW_TRAN_CODE
2370 			/*
2371 			 * a lot of normal SCSI things don't make sense.
2372 			 */
2373 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2374 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2375 			/*
2376 			 * How do you measure the width of a high
2377 			 * speed serial bus? Well, in bytes.
2378 			 *
2379 			 * Offset and period make no sense, though, so we set
2380 			 * (above) a 'base' transfer speed to be gigabit.
2381 			 */
2382 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2383 #else
2384 			fcparam *fcp = isp->isp_param;
2385 			struct ccb_trans_settings_fc *fc =
2386 			    &cts->xport_specific.fc;
2387 
2388 			cts->protocol = PROTO_SCSI;
2389 			cts->protocol_version = SCSI_REV_2;
2390 			cts->transport = XPORT_FC;
2391 			cts->transport_version = 0;
2392 
2393 			fc->valid = CTS_FC_VALID_SPEED;
2394 			if (fcp->isp_gbspeed == 2)
2395 				fc->bitrate = 200000;
2396 			else
2397 				fc->bitrate = 100000;
2398 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2399 				struct lportdb *lp = &fcp->portdb[tgt];
2400 				fc->wwnn = lp->node_wwn;
2401 				fc->wwpn = lp->port_wwn;
2402 				fc->port = lp->portid;
2403 				fc->valid |= CTS_FC_VALID_WWNN |
2404 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2405 			}
2406 #endif
2407 		} else {
2408 #ifdef	CAM_NEW_TRAN_CODE
2409 			struct ccb_trans_settings_scsi *scsi =
2410 			    &cts->proto_specific.scsi;
2411 			struct ccb_trans_settings_spi *spi =
2412 			    &cts->xport_specific.spi;
2413 #endif
2414 			sdparam *sdp = isp->isp_param;
2415 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2416 			u_int16_t dval, pval, oval;
2417 
2418 			sdp += bus;
2419 
2420 			if (IS_CURRENT_SETTINGS(cts)) {
2421 				sdp->isp_devparam[tgt].dev_refresh = 1;
2422 				isp->isp_update |= (1 << bus);
2423 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2424 				    NULL);
2425 				dval = sdp->isp_devparam[tgt].actv_flags;
2426 				oval = sdp->isp_devparam[tgt].actv_offset;
2427 				pval = sdp->isp_devparam[tgt].actv_period;
2428 			} else {
2429 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2430 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2431 				pval = sdp->isp_devparam[tgt].nvrm_period;
2432 			}
2433 
2434 #ifndef	CAM_NEW_TRAN_CODE
2435 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2436 
2437 			if (dval & DPARM_DISC) {
2438 				cts->flags |= CCB_TRANS_DISC_ENB;
2439 			}
2440 			if (dval & DPARM_TQING) {
2441 				cts->flags |= CCB_TRANS_TAG_ENB;
2442 			}
2443 			if (dval & DPARM_WIDE) {
2444 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2445 			} else {
2446 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2447 			}
2448 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2449 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2450 
2451 			if ((dval & DPARM_SYNC) && oval != 0) {
2452 				cts->sync_period = pval;
2453 				cts->sync_offset = oval;
2454 				cts->valid |=
2455 				    CCB_TRANS_SYNC_RATE_VALID |
2456 				    CCB_TRANS_SYNC_OFFSET_VALID;
2457 			}
2458 #else
2459 			cts->protocol = PROTO_SCSI;
2460 			cts->protocol_version = SCSI_REV_2;
2461 			cts->transport = XPORT_SPI;
2462 			cts->transport_version = 2;
2463 
2464 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2465 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2466 			if (dval & DPARM_DISC) {
2467 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2468 			}
2469 			if (dval & DPARM_TQING) {
2470 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2471 			}
2472 			if ((dval & DPARM_SYNC) && oval && pval) {
2473 				spi->sync_offset = oval;
2474 				spi->sync_period = pval;
2475 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2476 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2477 			}
2478 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2479 			if (dval & DPARM_WIDE) {
2480 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2481 			} else {
2482 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2483 			}
2484 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2485 				scsi->valid = CTS_SCSI_VALID_TQ;
2486 				spi->valid |= CTS_SPI_VALID_DISC;
2487 			} else {
2488 				scsi->valid = 0;
2489 			}
2490 #endif
2491 			isp_prt(isp, ISP_LOGDEBUG0,
2492 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2493 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2494 			    bus, tgt, dval, oval, pval);
2495 		}
2496 		ISPLOCK_2_CAMLOCK(isp);
2497 		ccb->ccb_h.status = CAM_REQ_CMP;
2498 		xpt_done(ccb);
2499 		break;
2500 
2501 	case XPT_CALC_GEOMETRY:
2502 	{
2503 		struct ccb_calc_geometry *ccg;
2504 		u_int32_t secs_per_cylinder;
2505 		u_int32_t size_mb;
2506 
2507 		ccg = &ccb->ccg;
2508 		if (ccg->block_size == 0) {
2509 			isp_prt(isp, ISP_LOGERR,
2510 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2511 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2512 			ccb->ccb_h.status = CAM_REQ_INVALID;
2513 			xpt_done(ccb);
2514 			break;
2515 		}
2516 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2517 		if (size_mb > 1024) {
2518 			ccg->heads = 255;
2519 			ccg->secs_per_track = 63;
2520 		} else {
2521 			ccg->heads = 64;
2522 			ccg->secs_per_track = 32;
2523 		}
2524 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2525 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2526 		ccb->ccb_h.status = CAM_REQ_CMP;
2527 		xpt_done(ccb);
2528 		break;
2529 	}
2530 	case XPT_RESET_BUS:		/* Reset the specified bus */
2531 		bus = cam_sim_bus(sim);
2532 		CAMLOCK_2_ISPLOCK(isp);
2533 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2534 		ISPLOCK_2_CAMLOCK(isp);
2535 		if (error)
2536 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2537 		else {
2538 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2539 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2540 			else if (isp->isp_path != NULL)
2541 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2542 			ccb->ccb_h.status = CAM_REQ_CMP;
2543 		}
2544 		xpt_done(ccb);
2545 		break;
2546 
2547 	case XPT_TERM_IO:		/* Terminate the I/O process */
2548 		ccb->ccb_h.status = CAM_REQ_INVALID;
2549 		xpt_done(ccb);
2550 		break;
2551 
2552 	case XPT_PATH_INQ:		/* Path routing inquiry */
2553 	{
2554 		struct ccb_pathinq *cpi = &ccb->cpi;
2555 
2556 		cpi->version_num = 1;
2557 #ifdef	ISP_TARGET_MODE
2558 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2559 #else
2560 		cpi->target_sprt = 0;
2561 #endif
2562 		cpi->hba_eng_cnt = 0;
2563 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2564 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2565 		cpi->bus_id = cam_sim_bus(sim);
2566 		if (IS_FC(isp)) {
2567 			cpi->hba_misc = PIM_NOBUSRESET;
2568 			/*
2569 			 * Because our loop ID can shift from time to time,
2570 			 * make our initiator ID out of range of our bus.
2571 			 */
2572 			cpi->initiator_id = cpi->max_target + 1;
2573 
2574 			/*
2575 			 * Set base transfer capabilities for Fibre Channel.
2576 			 * Technically not correct because we don't know
2577 			 * what media we're running on top of- but we'll
2578 			 * look good if we always say 100MB/s.
2579 			 */
2580 			if (FCPARAM(isp)->isp_gbspeed == 2)
2581 				cpi->base_transfer_speed = 200000;
2582 			else
2583 				cpi->base_transfer_speed = 100000;
2584 			cpi->hba_inquiry = PI_TAG_ABLE;
2585 #ifdef	CAM_NEW_TRAN_CODE
2586 			cpi->transport = XPORT_FC;
2587 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2588 #endif
2589 		} else {
2590 			sdparam *sdp = isp->isp_param;
2591 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2592 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2593 			cpi->hba_misc = 0;
2594 			cpi->initiator_id = sdp->isp_initiator_id;
2595 			cpi->base_transfer_speed = 3300;
2596 #ifdef	CAM_NEW_TRAN_CODE
2597 			cpi->transport = XPORT_SPI;
2598 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2599 #endif
2600 		}
2601 #ifdef	CAM_NEW_TRAN_CODE
2602 		cpi->protocol = PROTO_SCSI;
2603 		cpi->protocol_version = SCSI_REV_2;
2604 #endif
2605 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2606 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2607 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2608 		cpi->unit_number = cam_sim_unit(sim);
2609 		cpi->ccb_h.status = CAM_REQ_CMP;
2610 		xpt_done(ccb);
2611 		break;
2612 	}
2613 	default:
2614 		ccb->ccb_h.status = CAM_REQ_INVALID;
2615 		xpt_done(ccb);
2616 		break;
2617 	}
2618 }
2619 
2620 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2621 void
2622 isp_done(struct ccb_scsiio *sccb)
2623 {
2624 	struct ispsoftc *isp = XS_ISP(sccb);
2625 
2626 	if (XS_NOERR(sccb))
2627 		XS_SETERR(sccb, CAM_REQ_CMP);
2628 
2629 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2630 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2631 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2632 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2633 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2634 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2635 		} else {
2636 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2637 		}
2638 	}
2639 
2640 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2641 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2642 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2643 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2644 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2645 			isp_prt(isp, ISP_LOGDEBUG0,
2646 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2647 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2648 			    sccb->ccb_h.status, sccb->scsi_status);
2649 		}
2650 	}
2651 
2652 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2653 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2654 		xpt_print_path(sccb->ccb_h.path);
2655 		isp_prt(isp, ISP_LOGINFO,
2656 		    "cam completion status 0x%x", sccb->ccb_h.status);
2657 	}
2658 
2659 	XS_CMD_S_DONE(sccb);
2660 	if (XS_CMD_WDOG_P(sccb) == 0) {
2661 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2662 		if (XS_CMD_GRACE_P(sccb)) {
2663 			isp_prt(isp, ISP_LOGDEBUG2,
2664 			    "finished command on borrowed time");
2665 		}
2666 		XS_CMD_S_CLEAR(sccb);
2667 		ISPLOCK_2_CAMLOCK(isp);
2668 		xpt_done((union ccb *) sccb);
2669 		CAMLOCK_2_ISPLOCK(isp);
2670 	}
2671 }
2672 
2673 int
2674 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2675 {
2676 	int bus, rv = 0;
2677 	switch (cmd) {
2678 	case ISPASYNC_NEW_TGT_PARAMS:
2679 	{
2680 #ifdef	CAM_NEW_TRAN_CODE
2681 		struct ccb_trans_settings_scsi *scsi;
2682 		struct ccb_trans_settings_spi *spi;
2683 #endif
2684 		int flags, tgt;
2685 		sdparam *sdp = isp->isp_param;
2686 		struct ccb_trans_settings cts;
2687 		struct cam_path *tmppath;
2688 
2689 		bzero(&cts, sizeof (struct ccb_trans_settings));
2690 
2691 		tgt = *((int *)arg);
2692 		bus = (tgt >> 16) & 0xffff;
2693 		tgt &= 0xffff;
2694 		sdp += bus;
2695 		ISPLOCK_2_CAMLOCK(isp);
2696 		if (xpt_create_path(&tmppath, NULL,
2697 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2698 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2699 			CAMLOCK_2_ISPLOCK(isp);
2700 			isp_prt(isp, ISP_LOGWARN,
2701 			    "isp_async cannot make temp path for %d.%d",
2702 			    tgt, bus);
2703 			rv = -1;
2704 			break;
2705 		}
2706 		CAMLOCK_2_ISPLOCK(isp);
2707 		flags = sdp->isp_devparam[tgt].actv_flags;
2708 #ifdef	CAM_NEW_TRAN_CODE
2709 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2710 		cts.protocol = PROTO_SCSI;
2711 		cts.transport = XPORT_SPI;
2712 
2713 		scsi = &cts.proto_specific.scsi;
2714 		spi = &cts.xport_specific.spi;
2715 
2716 		if (flags & DPARM_TQING) {
2717 			scsi->valid |= CTS_SCSI_VALID_TQ;
2718 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2719 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2720 		}
2721 
2722 		if (flags & DPARM_DISC) {
2723 			spi->valid |= CTS_SPI_VALID_DISC;
2724 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2725 		}
2726 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2727 		if (flags & DPARM_WIDE) {
2728 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2729 		} else {
2730 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2731 		}
2732 		if (flags & DPARM_SYNC) {
2733 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2734 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2735 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2736 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2737 		}
2738 #else
2739 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2740 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2741 		if (flags & DPARM_DISC) {
2742 			cts.flags |= CCB_TRANS_DISC_ENB;
2743 		}
2744 		if (flags & DPARM_TQING) {
2745 			cts.flags |= CCB_TRANS_TAG_ENB;
2746 		}
2747 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2748 		cts.bus_width = (flags & DPARM_WIDE)?
2749 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2750 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2751 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2752 		if (flags & DPARM_SYNC) {
2753 			cts.valid |=
2754 			    CCB_TRANS_SYNC_RATE_VALID |
2755 			    CCB_TRANS_SYNC_OFFSET_VALID;
2756 		}
2757 #endif
2758 		isp_prt(isp, ISP_LOGDEBUG2,
2759 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2760 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2761 		    sdp->isp_devparam[tgt].actv_offset, flags);
2762 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2763 		ISPLOCK_2_CAMLOCK(isp);
2764 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2765 		xpt_free_path(tmppath);
2766 		CAMLOCK_2_ISPLOCK(isp);
2767 		break;
2768 	}
2769 	case ISPASYNC_BUS_RESET:
2770 		bus = *((int *)arg);
2771 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2772 		    bus);
2773 		if (bus > 0 && isp->isp_path2) {
2774 			ISPLOCK_2_CAMLOCK(isp);
2775 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2776 			CAMLOCK_2_ISPLOCK(isp);
2777 		} else if (isp->isp_path) {
2778 			ISPLOCK_2_CAMLOCK(isp);
2779 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2780 			CAMLOCK_2_ISPLOCK(isp);
2781 		}
2782 		break;
2783 	case ISPASYNC_LIP:
2784 		if (isp->isp_path) {
2785 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2786 		}
2787 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2788 		break;
2789 	case ISPASYNC_LOOP_RESET:
2790 		if (isp->isp_path) {
2791 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2792 		}
2793 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2794 		break;
2795 	case ISPASYNC_LOOP_DOWN:
2796 		if (isp->isp_path) {
2797 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2798 		}
2799 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2800 		break;
2801 	case ISPASYNC_LOOP_UP:
2802 		/*
2803 		 * Now we just note that Loop has come up. We don't
2804 		 * actually do anything because we're waiting for a
2805 		 * Change Notify before activating the FC cleanup
2806 		 * thread to look at the state of the loop again.
2807 		 */
2808 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2809 		break;
2810 	case ISPASYNC_PROMENADE:
2811 	{
2812 		struct cam_path *tmppath;
2813 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2814 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2815 		static const char *roles[4] = {
2816 		    "(none)", "Target", "Initiator", "Target/Initiator"
2817 		};
2818 		fcparam *fcp = isp->isp_param;
2819 		int tgt = *((int *) arg);
2820 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2821 		struct lportdb *lp = &fcp->portdb[tgt];
2822 
2823 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2824 		    roles[lp->roles & 0x3],
2825 		    (lp->valid)? "Arrived" : "Departed",
2826 		    (u_int32_t) (lp->port_wwn >> 32),
2827 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2828 		    (u_int32_t) (lp->node_wwn >> 32),
2829 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2830 
2831 		ISPLOCK_2_CAMLOCK(isp);
2832 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2833 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2834 			CAMLOCK_2_ISPLOCK(isp);
2835                         break;
2836                 }
2837 		/*
2838 		 * Policy: only announce targets.
2839 		 */
2840 		if (lp->roles & is_tgt_mask) {
2841 			if (lp->valid) {
2842 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2843 			} else {
2844 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2845 			}
2846 		}
2847 		xpt_free_path(tmppath);
2848 		CAMLOCK_2_ISPLOCK(isp);
2849 		break;
2850 	}
2851 	case ISPASYNC_CHANGE_NOTIFY:
2852 		if (arg == ISPASYNC_CHANGE_PDB) {
2853 			isp_prt(isp, ISP_LOGINFO,
2854 			    "Port Database Changed");
2855 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2856 			isp_prt(isp, ISP_LOGINFO,
2857 			    "Name Server Database Changed");
2858 		}
2859 		cv_signal(&isp->isp_osinfo.kthread_cv);
2860 		break;
2861 	case ISPASYNC_FABRIC_DEV:
2862 	{
2863 		int target, base, lim;
2864 		fcparam *fcp = isp->isp_param;
2865 		struct lportdb *lp = NULL;
2866 		struct lportdb *clp = (struct lportdb *) arg;
2867 		char *pt;
2868 
2869 		switch (clp->port_type) {
2870 		case 1:
2871 			pt = "   N_Port";
2872 			break;
2873 		case 2:
2874 			pt = "  NL_Port";
2875 			break;
2876 		case 3:
2877 			pt = "F/NL_Port";
2878 			break;
2879 		case 0x7f:
2880 			pt = "  Nx_Port";
2881 			break;
2882 		case 0x81:
2883 			pt = "  F_port";
2884 			break;
2885 		case 0x82:
2886 			pt = "  FL_Port";
2887 			break;
2888 		case 0x84:
2889 			pt = "   E_port";
2890 			break;
2891 		default:
2892 			pt = " ";
2893 			break;
2894 		}
2895 
2896 		isp_prt(isp, ISP_LOGINFO,
2897 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2898 
2899 		/*
2900 		 * If we don't have an initiator role we bail.
2901 		 *
2902 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2903 		 */
2904 
2905 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2906 			break;
2907 		}
2908 
2909 		/*
2910 		 * Is this entry for us? If so, we bail.
2911 		 */
2912 
2913 		if (fcp->isp_portid == clp->portid) {
2914 			break;
2915 		}
2916 
2917 		/*
2918 		 * Else, the default policy is to find room for it in
2919 		 * our local port database. Later, when we execute
2920 		 * the call to isp_pdb_sync either this newly arrived
2921 		 * or already logged in device will be (re)announced.
2922 		 */
2923 
2924 		if (fcp->isp_topo == TOPO_FL_PORT)
2925 			base = FC_SNS_ID+1;
2926 		else
2927 			base = 0;
2928 
2929 		if (fcp->isp_topo == TOPO_N_PORT)
2930 			lim = 1;
2931 		else
2932 			lim = MAX_FC_TARG;
2933 
2934 		/*
2935 		 * Is it already in our list?
2936 		 */
2937 		for (target = base; target < lim; target++) {
2938 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2939 				continue;
2940 			}
2941 			lp = &fcp->portdb[target];
2942 			if (lp->port_wwn == clp->port_wwn &&
2943 			    lp->node_wwn == clp->node_wwn) {
2944 				lp->fabric_dev = 1;
2945 				break;
2946 			}
2947 		}
2948 		if (target < lim) {
2949 			break;
2950 		}
2951 		for (target = base; target < lim; target++) {
2952 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2953 				continue;
2954 			}
2955 			lp = &fcp->portdb[target];
2956 			if (lp->port_wwn == 0) {
2957 				break;
2958 			}
2959 		}
2960 		if (target == lim) {
2961 			isp_prt(isp, ISP_LOGWARN,
2962 			    "out of space for fabric devices");
2963 			break;
2964 		}
2965 		lp->port_type = clp->port_type;
2966 		lp->fc4_type = clp->fc4_type;
2967 		lp->node_wwn = clp->node_wwn;
2968 		lp->port_wwn = clp->port_wwn;
2969 		lp->portid = clp->portid;
2970 		lp->fabric_dev = 1;
2971 		break;
2972 	}
2973 #ifdef	ISP_TARGET_MODE
2974 	case ISPASYNC_TARGET_MESSAGE:
2975 	{
2976 		tmd_msg_t *mp = arg;
2977 		isp_prt(isp, ISP_LOGALL,
2978 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2979 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2980 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2981 		    mp->nt_msg[0]);
2982 		break;
2983 	}
2984 	case ISPASYNC_TARGET_EVENT:
2985 	{
2986 		tmd_event_t *ep = arg;
2987 		isp_prt(isp, ISP_LOGALL,
2988 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2989 		break;
2990 	}
2991 	case ISPASYNC_TARGET_ACTION:
2992 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2993 		default:
2994 			isp_prt(isp, ISP_LOGWARN,
2995 			   "event 0x%x for unhandled target action",
2996 			    ((isphdr_t *)arg)->rqs_entry_type);
2997 			break;
2998 		case RQSTYPE_NOTIFY:
2999 			if (IS_SCSI(isp)) {
3000 				rv = isp_handle_platform_notify_scsi(isp,
3001 				    (in_entry_t *) arg);
3002 			} else {
3003 				rv = isp_handle_platform_notify_fc(isp,
3004 				    (in_fcentry_t *) arg);
3005 			}
3006 			break;
3007 		case RQSTYPE_ATIO:
3008 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3009 			break;
3010 		case RQSTYPE_ATIO2:
3011 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3012 			break;
3013 		case RQSTYPE_CTIO2:
3014 		case RQSTYPE_CTIO:
3015 			rv = isp_handle_platform_ctio(isp, arg);
3016 			break;
3017 		case RQSTYPE_ENABLE_LUN:
3018 		case RQSTYPE_MODIFY_LUN:
3019 			if (IS_DUALBUS(isp)) {
3020 				bus =
3021 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3022 			} else {
3023 				bus = 0;
3024 			}
3025 			isp_cv_signal_rqe(isp, bus,
3026 			    ((lun_entry_t *)arg)->le_status);
3027 			break;
3028 		}
3029 		break;
3030 #endif
3031 	case ISPASYNC_FW_CRASH:
3032 	{
3033 		u_int16_t mbox1, mbox6;
3034 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3035 		if (IS_DUALBUS(isp)) {
3036 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3037 		} else {
3038 			mbox6 = 0;
3039 		}
3040                 isp_prt(isp, ISP_LOGERR,
3041                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3042                     mbox6, mbox1);
3043 #ifdef	ISP_FW_CRASH_DUMP
3044 		/*
3045 		 * XXX: really need a thread to do this right.
3046 		 */
3047 		if (IS_FC(isp)) {
3048 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3049 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3050 			isp_freeze_loopdown(isp, "f/w crash");
3051 			isp_fw_dump(isp);
3052 		}
3053 		isp_reinit(isp);
3054 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3055 #endif
3056 		break;
3057 	}
3058 	case ISPASYNC_UNHANDLED_RESPONSE:
3059 		break;
3060 	default:
3061 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3062 		break;
3063 	}
3064 	return (rv);
3065 }
3066 
3067 
3068 /*
3069  * Locks are held before coming here.
3070  */
3071 void
3072 isp_uninit(struct ispsoftc *isp)
3073 {
3074 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3075 	DISABLE_INTS(isp);
3076 }
3077 
3078 void
3079 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3080 {
3081 	va_list ap;
3082 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3083 		return;
3084 	}
3085 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3086 	va_start(ap, fmt);
3087 	vprintf(fmt, ap);
3088 	va_end(ap);
3089 	printf("\n");
3090 }
3091