xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 2546665afcaf0d53dc2c7058fee96354b3680f5a)
1 /*
2  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
3  *
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice immediately at the beginning of the file, without modification,
11  *    this list of conditions, and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <dev/isp/isp_freebsd.h>
32 #include <sys/unistd.h>
33 #include <sys/kthread.h>
34 #include <machine/stdarg.h>	/* for use by isp_prt below */
35 #include <sys/conf.h>
36 #include <sys/module.h>
37 #include <sys/ioccom.h>
38 #include <dev/isp/isp_ioctl.h>
39 
40 
41 MODULE_VERSION(isp, 1);
42 MODULE_DEPEND(isp, cam, 1, 1, 1);
43 int isp_announced = 0;
44 ispfwfunc *isp_get_firmware_p = NULL;
45 
46 static d_ioctl_t ispioctl;
47 static void isp_intr_enable(void *);
48 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
49 static void isp_poll(struct cam_sim *);
50 static timeout_t isp_watchdog;
51 static void isp_kthread(void *);
52 static void isp_action(struct cam_sim *, union ccb *);
53 
54 
55 static struct cdevsw isp_cdevsw = {
56 	.d_version =	D_VERSION,
57 	.d_flags =	D_NEEDGIANT,
58 	.d_ioctl =	ispioctl,
59 	.d_name =	"isp",
60 };
61 
62 static struct ispsoftc *isplist = NULL;
63 
64 void
65 isp_attach(struct ispsoftc *isp)
66 {
67 	int primary, secondary;
68 	struct ccb_setasync csa;
69 	struct cam_devq *devq;
70 	struct cam_sim *sim;
71 	struct cam_path *path;
72 
73 	/*
74 	 * Establish (in case of 12X0) which bus is the primary.
75 	 */
76 
77 	primary = 0;
78 	secondary = 1;
79 
80 	/*
81 	 * Create the device queue for our SIM(s).
82 	 */
83 	devq = cam_simq_alloc(isp->isp_maxcmds);
84 	if (devq == NULL) {
85 		return;
86 	}
87 
88 	/*
89 	 * Construct our SIM entry.
90 	 */
91 	ISPLOCK_2_CAMLOCK(isp);
92 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
93 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
94 	if (sim == NULL) {
95 		cam_simq_free(devq);
96 		CAMLOCK_2_ISPLOCK(isp);
97 		return;
98 	}
99 	CAMLOCK_2_ISPLOCK(isp);
100 
101 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
102 	isp->isp_osinfo.ehook.ich_arg = isp;
103 	ISPLOCK_2_CAMLOCK(isp);
104 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
105 		cam_sim_free(sim, TRUE);
106 		CAMLOCK_2_ISPLOCK(isp);
107 		isp_prt(isp, ISP_LOGERR,
108 		    "could not establish interrupt enable hook");
109 		return;
110 	}
111 
112 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
113 		cam_sim_free(sim, TRUE);
114 		CAMLOCK_2_ISPLOCK(isp);
115 		return;
116 	}
117 
118 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
119 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
120 		xpt_bus_deregister(cam_sim_path(sim));
121 		cam_sim_free(sim, TRUE);
122 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
123 		CAMLOCK_2_ISPLOCK(isp);
124 		return;
125 	}
126 
127 	xpt_setup_ccb(&csa.ccb_h, path, 5);
128 	csa.ccb_h.func_code = XPT_SASYNC_CB;
129 	csa.event_enable = AC_LOST_DEVICE;
130 	csa.callback = isp_cam_async;
131 	csa.callback_arg = sim;
132 	xpt_action((union ccb *)&csa);
133 	CAMLOCK_2_ISPLOCK(isp);
134 	isp->isp_sim = sim;
135 	isp->isp_path = path;
136 	/*
137 	 * Create a kernel thread for fibre channel instances. We
138 	 * don't have dual channel FC cards.
139 	 */
140 	if (IS_FC(isp)) {
141 		ISPLOCK_2_CAMLOCK(isp);
142 		/* XXX: LOCK VIOLATION */
143 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
144 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
145 		    RFHIGHPID, 0, "%s: fc_thrd",
146 		    device_get_nameunit(isp->isp_dev))) {
147 			xpt_bus_deregister(cam_sim_path(sim));
148 			cam_sim_free(sim, TRUE);
149 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
150 			CAMLOCK_2_ISPLOCK(isp);
151 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
152 			return;
153 		}
154 		CAMLOCK_2_ISPLOCK(isp);
155 	}
156 
157 
158 	/*
159 	 * If we have a second channel, construct SIM entry for that.
160 	 */
161 	if (IS_DUALBUS(isp)) {
162 		ISPLOCK_2_CAMLOCK(isp);
163 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
164 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
165 		if (sim == NULL) {
166 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
167 			xpt_free_path(isp->isp_path);
168 			cam_simq_free(devq);
169 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
170 			return;
171 		}
172 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
173 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
174 			xpt_free_path(isp->isp_path);
175 			cam_sim_free(sim, TRUE);
176 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
177 			CAMLOCK_2_ISPLOCK(isp);
178 			return;
179 		}
180 
181 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
182 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
183 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
184 			xpt_free_path(isp->isp_path);
185 			xpt_bus_deregister(cam_sim_path(sim));
186 			cam_sim_free(sim, TRUE);
187 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
188 			CAMLOCK_2_ISPLOCK(isp);
189 			return;
190 		}
191 
192 		xpt_setup_ccb(&csa.ccb_h, path, 5);
193 		csa.ccb_h.func_code = XPT_SASYNC_CB;
194 		csa.event_enable = AC_LOST_DEVICE;
195 		csa.callback = isp_cam_async;
196 		csa.callback_arg = sim;
197 		xpt_action((union ccb *)&csa);
198 		CAMLOCK_2_ISPLOCK(isp);
199 		isp->isp_sim2 = sim;
200 		isp->isp_path2 = path;
201 	}
202 
203 	/*
204 	 * Create device nodes
205 	 */
206 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
207 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
208 
209 	if (isp->isp_role != ISP_ROLE_NONE) {
210 		isp->isp_state = ISP_RUNSTATE;
211 		ENABLE_INTS(isp);
212 	}
213 	if (isplist == NULL) {
214 		isplist = isp;
215 	} else {
216 		struct ispsoftc *tmp = isplist;
217 		while (tmp->isp_osinfo.next) {
218 			tmp = tmp->isp_osinfo.next;
219 		}
220 		tmp->isp_osinfo.next = isp;
221 	}
222 
223 }
224 
225 static INLINE void
226 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
227 {
228 	if (isp->isp_osinfo.simqfrozen == 0) {
229 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
230 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
231 		ISPLOCK_2_CAMLOCK(isp);
232 		xpt_freeze_simq(isp->isp_sim, 1);
233 		CAMLOCK_2_ISPLOCK(isp);
234 	} else {
235 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
236 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
237 	}
238 }
239 
240 static int
241 ispioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
242 {
243 	struct ispsoftc *isp;
244 	int nr, retval = ENOTTY;
245 
246 	isp = isplist;
247 	while (isp) {
248 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
249 			break;
250 		}
251 		isp = isp->isp_osinfo.next;
252 	}
253 	if (isp == NULL)
254 		return (ENXIO);
255 
256 	switch (cmd) {
257 #ifdef	ISP_FW_CRASH_DUMP
258 	case ISP_GET_FW_CRASH_DUMP:
259 	{
260 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
261 		size_t sz;
262 
263 		retval = 0;
264 		if (IS_2200(isp))
265 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
266 		else
267 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
268 		ISP_LOCK(isp);
269 		if (ptr && *ptr) {
270 			void *uaddr = *((void **) addr);
271 			if (copyout(ptr, uaddr, sz)) {
272 				retval = EFAULT;
273 			} else {
274 				*ptr = 0;
275 			}
276 		} else {
277 			retval = ENXIO;
278 		}
279 		ISP_UNLOCK(isp);
280 		break;
281 	}
282 
283 	case ISP_FORCE_CRASH_DUMP:
284 		ISP_LOCK(isp);
285 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
286 		isp_fw_dump(isp);
287 		isp_reinit(isp);
288 		ISP_UNLOCK(isp);
289 		retval = 0;
290 		break;
291 #endif
292 	case ISP_SDBLEV:
293 	{
294 		int olddblev = isp->isp_dblev;
295 		isp->isp_dblev = *(int *)addr;
296 		*(int *)addr = olddblev;
297 		retval = 0;
298 		break;
299 	}
300 	case ISP_GETROLE:
301 		*(int *)addr = isp->isp_role;
302 		retval = 0;
303 		break;
304 	case ISP_SETROLE:
305 		nr = *(int *)addr;
306 		if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
307 			retval = EINVAL;
308 			break;
309 		}
310 		*(int *)addr = isp->isp_role;
311 		isp->isp_role = nr;
312 		/* FALLTHROUGH */
313 	case ISP_RESETHBA:
314 		ISP_LOCK(isp);
315 		isp_reinit(isp);
316 		ISP_UNLOCK(isp);
317 		retval = 0;
318 		break;
319 	case ISP_RESCAN:
320 		if (IS_FC(isp)) {
321 			ISP_LOCK(isp);
322 			if (isp_fc_runstate(isp, 5 * 1000000)) {
323 				retval = EIO;
324 			} else {
325 				retval = 0;
326 			}
327 			ISP_UNLOCK(isp);
328 		}
329 		break;
330 	case ISP_FC_LIP:
331 		if (IS_FC(isp)) {
332 			ISP_LOCK(isp);
333 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
334 				retval = EIO;
335 			} else {
336 				retval = 0;
337 			}
338 			ISP_UNLOCK(isp);
339 		}
340 		break;
341 	case ISP_FC_GETDINFO:
342 	{
343 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
344 		struct lportdb *lp;
345 
346 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
347 			retval = EINVAL;
348 			break;
349 		}
350 		ISP_LOCK(isp);
351 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
352 		if (lp->valid) {
353 			ifc->loopid = lp->loopid;
354 			ifc->portid = lp->portid;
355 			ifc->node_wwn = lp->node_wwn;
356 			ifc->port_wwn = lp->port_wwn;
357 			retval = 0;
358 		} else {
359 			retval = ENODEV;
360 		}
361 		ISP_UNLOCK(isp);
362 		break;
363 	}
364 	case ISP_GET_STATS:
365 	{
366 		isp_stats_t *sp = (isp_stats_t *) addr;
367 
368 		MEMZERO(sp, sizeof (*sp));
369 		sp->isp_stat_version = ISP_STATS_VERSION;
370 		sp->isp_type = isp->isp_type;
371 		sp->isp_revision = isp->isp_revision;
372 		ISP_LOCK(isp);
373 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
374 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
375 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
376 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
377 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
378 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
379 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
380 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
381 		ISP_UNLOCK(isp);
382 		retval = 0;
383 		break;
384 	}
385 	case ISP_CLR_STATS:
386 		ISP_LOCK(isp);
387 		isp->isp_intcnt = 0;
388 		isp->isp_intbogus = 0;
389 		isp->isp_intmboxc = 0;
390 		isp->isp_intoasync = 0;
391 		isp->isp_rsltccmplt = 0;
392 		isp->isp_fphccmplt = 0;
393 		isp->isp_rscchiwater = 0;
394 		isp->isp_fpcchiwater = 0;
395 		ISP_UNLOCK(isp);
396 		retval = 0;
397 		break;
398 	case ISP_FC_GETHINFO:
399 	{
400 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
401 		MEMZERO(hba, sizeof (*hba));
402 		ISP_LOCK(isp);
403 		hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
404 		hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
405 		hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
406 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
407 		hba->fc_scsi_supported = 1;
408 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
409 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
410 		hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
411 		hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
412 		hba->active_node_wwn = ISP_NODEWWN(isp);
413 		hba->active_port_wwn = ISP_PORTWWN(isp);
414 		ISP_UNLOCK(isp);
415 		retval = 0;
416 		break;
417 	}
418 	case ISP_GET_FC_PARAM:
419 	{
420 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
421 
422 		if (!IS_FC(isp)) {
423 			retval = EINVAL;
424 			break;
425 		}
426 		f->parameter = 0;
427 		if (strcmp(f->param_name, "framelength") == 0) {
428 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
429 			retval = 0;
430 			break;
431 		}
432 		if (strcmp(f->param_name, "exec_throttle") == 0) {
433 			f->parameter = FCPARAM(isp)->isp_execthrottle;
434 			retval = 0;
435 			break;
436 		}
437 		if (strcmp(f->param_name, "fullduplex") == 0) {
438 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
439 				f->parameter = 1;
440 			retval = 0;
441 			break;
442 		}
443 		if (strcmp(f->param_name, "loopid") == 0) {
444 			f->parameter = FCPARAM(isp)->isp_loopid;
445 			retval = 0;
446 			break;
447 		}
448 		retval = EINVAL;
449 		break;
450 	}
451 	case ISP_SET_FC_PARAM:
452 	{
453 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
454 		u_int32_t param = f->parameter;
455 
456 		if (!IS_FC(isp)) {
457 			retval = EINVAL;
458 			break;
459 		}
460 		f->parameter = 0;
461 		if (strcmp(f->param_name, "framelength") == 0) {
462 			if (param != 512 && param != 1024 && param != 1024) {
463 				retval = EINVAL;
464 				break;
465 			}
466 			FCPARAM(isp)->isp_maxfrmlen = param;
467 			retval = 0;
468 			break;
469 		}
470 		if (strcmp(f->param_name, "exec_throttle") == 0) {
471 			if (param < 16 || param > 255) {
472 				retval = EINVAL;
473 				break;
474 			}
475 			FCPARAM(isp)->isp_execthrottle = param;
476 			retval = 0;
477 			break;
478 		}
479 		if (strcmp(f->param_name, "fullduplex") == 0) {
480 			if (param != 0 && param != 1) {
481 				retval = EINVAL;
482 				break;
483 			}
484 			if (param) {
485 				FCPARAM(isp)->isp_fwoptions |=
486 				    ICBOPT_FULL_DUPLEX;
487 			} else {
488 				FCPARAM(isp)->isp_fwoptions &=
489 				    ~ICBOPT_FULL_DUPLEX;
490 			}
491 			retval = 0;
492 			break;
493 		}
494 		if (strcmp(f->param_name, "loopid") == 0) {
495 			if (param < 0 || param > 125) {
496 				retval = EINVAL;
497 				break;
498 			}
499 			FCPARAM(isp)->isp_loopid = param;
500 			retval = 0;
501 			break;
502 		}
503 		retval = EINVAL;
504 		break;
505 	}
506 	default:
507 		break;
508 	}
509 	return (retval);
510 }
511 
512 static void
513 isp_intr_enable(void *arg)
514 {
515 	struct ispsoftc *isp = arg;
516 	if (isp->isp_role != ISP_ROLE_NONE) {
517 		ENABLE_INTS(isp);
518 		isp->isp_osinfo.intsok = 1;
519 	}
520 	/* Release our hook so that the boot can continue. */
521 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
522 }
523 
524 /*
525  * Put the target mode functions here, because some are inlines
526  */
527 
528 #ifdef	ISP_TARGET_MODE
529 
530 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
531 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
532 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
533 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
534 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
535 static cam_status
536 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
537 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
538 static int isp_en_lun(struct ispsoftc *, union ccb *);
539 static void isp_ledone(struct ispsoftc *, lun_entry_t *);
540 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
541 static timeout_t isp_refire_putback_atio;
542 static void isp_complete_ctio(union ccb *);
543 static void isp_target_putback_atio(union ccb *);
544 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
545 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
546 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
547 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
548 static void isp_handle_platform_ctio_fastpost(struct ispsoftc *, u_int32_t);
549 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
550 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
551 
552 static INLINE int
553 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
554 {
555 	tstate_t *tptr;
556 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
557 	if (tptr == NULL) {
558 		return (0);
559 	}
560 	do {
561 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
562 			return (1);
563 		}
564 	} while ((tptr = tptr->next) != NULL);
565 	return (0);
566 }
567 
568 static INLINE int
569 are_any_luns_enabled(struct ispsoftc *isp, int port)
570 {
571 	int lo, hi;
572 	if (IS_DUALBUS(isp)) {
573 		lo = (port * (LUN_HASH_SIZE >> 1));
574 		hi = lo + (LUN_HASH_SIZE >> 1);
575 	} else {
576 		lo = 0;
577 		hi = LUN_HASH_SIZE;
578 	}
579 	for (lo = 0; lo < hi; lo++) {
580 		if (isp->isp_osinfo.lun_hash[lo]) {
581 			return (1);
582 		}
583 	}
584 	return (0);
585 }
586 
587 static INLINE tstate_t *
588 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
589 {
590 	tstate_t *tptr = NULL;
591 
592 	if (lun == CAM_LUN_WILDCARD) {
593 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
594 			tptr = &isp->isp_osinfo.tsdflt[bus];
595 			tptr->hold++;
596 			return (tptr);
597 		}
598 		return (NULL);
599 	} else {
600 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
601 		if (tptr == NULL) {
602 			return (NULL);
603 		}
604 	}
605 
606 	do {
607 		if (tptr->lun == lun && tptr->bus == bus) {
608 			tptr->hold++;
609 			return (tptr);
610 		}
611 	} while ((tptr = tptr->next) != NULL);
612 	return (tptr);
613 }
614 
615 static INLINE void
616 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
617 {
618 	if (tptr->hold)
619 		tptr->hold--;
620 }
621 
622 static INLINE atio_private_data_t *
623 isp_get_atpd(struct ispsoftc *isp, int tag)
624 {
625 	atio_private_data_t *atp;
626 	for (atp = isp->isp_osinfo.atpdp;
627 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
628 		if (atp->tag == tag)
629 			return (atp);
630 	}
631 	return (NULL);
632 }
633 
634 static cam_status
635 create_lun_state(struct ispsoftc *isp, int bus,
636     struct cam_path *path, tstate_t **rslt)
637 {
638 	cam_status status;
639 	lun_id_t lun;
640 	int hfx;
641 	tstate_t *tptr, *new;
642 
643 	lun = xpt_path_lun_id(path);
644 	if (lun < 0) {
645 		return (CAM_LUN_INVALID);
646 	}
647 	if (is_lun_enabled(isp, bus, lun)) {
648 		return (CAM_LUN_ALRDY_ENA);
649 	}
650 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
651 	if (new == NULL) {
652 		return (CAM_RESRC_UNAVAIL);
653 	}
654 
655 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
656 	    xpt_path_target_id(path), xpt_path_lun_id(path));
657 	if (status != CAM_REQ_CMP) {
658 		free(new, M_DEVBUF);
659 		return (status);
660 	}
661 	new->bus = bus;
662 	new->lun = lun;
663 	SLIST_INIT(&new->atios);
664 	SLIST_INIT(&new->inots);
665 	new->hold = 1;
666 
667 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
668 	tptr = isp->isp_osinfo.lun_hash[hfx];
669 	if (tptr == NULL) {
670 		isp->isp_osinfo.lun_hash[hfx] = new;
671 	} else {
672 		while (tptr->next)
673 			tptr = tptr->next;
674 		tptr->next = new;
675 	}
676 	*rslt = new;
677 	return (CAM_REQ_CMP);
678 }
679 
680 static INLINE void
681 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
682 {
683 	int hfx;
684 	tstate_t *lw, *pw;
685 
686 	if (tptr->hold) {
687 		return;
688 	}
689 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
690 	pw = isp->isp_osinfo.lun_hash[hfx];
691 	if (pw == NULL) {
692 		return;
693 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
694 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
695 	} else {
696 		lw = pw;
697 		pw = lw->next;
698 		while (pw) {
699 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
700 				lw->next = pw->next;
701 				break;
702 			}
703 			lw = pw;
704 			pw = pw->next;
705 		}
706 		if (pw == NULL) {
707 			return;
708 		}
709 	}
710 	free(tptr, M_DEVBUF);
711 }
712 
713 /*
714  * Enable luns.
715  */
716 static int
717 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
718 {
719 	struct ccb_en_lun *cel = &ccb->cel;
720 	tstate_t *tptr;
721 	u_int32_t seq;
722 	int bus, cmd, av, wildcard, tm_on;
723 	lun_id_t lun;
724 	target_id_t tgt;
725 
726 	bus = XS_CHANNEL(ccb);
727 	if (bus > 1) {
728 		xpt_print_path(ccb->ccb_h.path);
729 		printf("illegal bus %d\n", bus);
730 		ccb->ccb_h.status = CAM_PATH_INVALID;
731 		return (-1);
732 	}
733 	tgt = ccb->ccb_h.target_id;
734 	lun = ccb->ccb_h.target_lun;
735 
736 	isp_prt(isp, ISP_LOGTDEBUG0,
737 	    "isp_en_lun: %sabling lun 0x%x on channel %d",
738 	    cel->enable? "en" : "dis", lun, bus);
739 
740 
741 	if ((lun != CAM_LUN_WILDCARD) &&
742 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
743 		ccb->ccb_h.status = CAM_LUN_INVALID;
744 		return (-1);
745 	}
746 
747 	if (IS_SCSI(isp)) {
748 		sdparam *sdp = isp->isp_param;
749 		sdp += bus;
750 		if (tgt != CAM_TARGET_WILDCARD &&
751 		    tgt != sdp->isp_initiator_id) {
752 			ccb->ccb_h.status = CAM_TID_INVALID;
753 			return (-1);
754 		}
755 	} else {
756 		/*
757 		 * There's really no point in doing this yet w/o multi-tid
758 		 * capability. Even then, it's problematic.
759 		 */
760 #if	0
761 		if (tgt != CAM_TARGET_WILDCARD &&
762 		    tgt != FCPARAM(isp)->isp_iid) {
763 			ccb->ccb_h.status = CAM_TID_INVALID;
764 			return (-1);
765 		}
766 #endif
767 		/*
768 		 * This is as a good a place as any to check f/w capabilities.
769 		 */
770 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
771 			isp_prt(isp, ISP_LOGERR,
772 			    "firmware does not support target mode");
773 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
774 			return (-1);
775 		}
776 		/*
777 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
778 		 * XXX: dorks with our already fragile enable/disable code.
779 		 */
780 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
781 			isp_prt(isp, ISP_LOGERR,
782 			    "firmware not SCCLUN capable");
783 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
784 			return (-1);
785 		}
786 	}
787 
788 	if (tgt == CAM_TARGET_WILDCARD) {
789 		if (lun == CAM_LUN_WILDCARD) {
790 			wildcard = 1;
791 		} else {
792 			ccb->ccb_h.status = CAM_LUN_INVALID;
793 			return (-1);
794 		}
795 	} else {
796 		wildcard = 0;
797 	}
798 
799 	tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0;
800 
801 	/*
802 	 * Next check to see whether this is a target/lun wildcard action.
803 	 *
804 	 * If so, we know that we can accept commands for luns that haven't
805 	 * been enabled yet and send them upstream. Otherwise, we have to
806 	 * handle them locally (if we see them at all).
807 	 */
808 
809 	if (wildcard) {
810 		tptr = &isp->isp_osinfo.tsdflt[bus];
811 		if (cel->enable) {
812 			if (tm_on) {
813 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
814 				return (-1);
815 			}
816 			ccb->ccb_h.status =
817 			    xpt_create_path(&tptr->owner, NULL,
818 			    xpt_path_path_id(ccb->ccb_h.path),
819 			    xpt_path_target_id(ccb->ccb_h.path),
820 			    xpt_path_lun_id(ccb->ccb_h.path));
821 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
822 				return (-1);
823 			}
824 			SLIST_INIT(&tptr->atios);
825 			SLIST_INIT(&tptr->inots);
826 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
827 		} else {
828 			if (tm_on == 0) {
829 				ccb->ccb_h.status = CAM_REQ_CMP;
830 				return (-1);
831 			}
832 			if (tptr->hold) {
833 				ccb->ccb_h.status = CAM_SCSI_BUSY;
834 				return (-1);
835 			}
836 			xpt_free_path(tptr->owner);
837 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
838 		}
839 	}
840 
841 	/*
842 	 * Now check to see whether this bus needs to be
843 	 * enabled/disabled with respect to target mode.
844 	 */
845 	av = bus << 31;
846 	if (cel->enable && tm_on == 0) {
847 		av |= ENABLE_TARGET_FLAG;
848 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
849 		if (av) {
850 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
851 			if (wildcard) {
852 				isp->isp_osinfo.tmflags[bus] &=
853 				    ~TM_WILDCARD_ENABLED;
854 				xpt_free_path(tptr->owner);
855 			}
856 			return (-1);
857 		}
858 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
859 		isp_prt(isp, ISP_LOGINFO,
860 		    "Target Mode enabled on channel %d", bus);
861 	} else if (cel->enable == 0 && tm_on && wildcard) {
862 		if (are_any_luns_enabled(isp, bus)) {
863 			ccb->ccb_h.status = CAM_SCSI_BUSY;
864 			return (-1);
865 		}
866 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
867 		if (av) {
868 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
869 			return (-1);
870 		}
871 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
872 		isp_prt(isp, ISP_LOGINFO,
873 		    "Target Mode disabled on channel %d", bus);
874 	}
875 
876 	if (wildcard) {
877 		ccb->ccb_h.status = CAM_REQ_CMP;
878 		return (-1);
879 	}
880 
881 	/*
882 	 * Find an empty slot
883 	 */
884 	for (seq = 0; seq < NLEACT; seq++) {
885 		if (isp->isp_osinfo.leact[seq] == 0) {
886 			break;
887 		}
888 	}
889 	if (seq >= NLEACT) {
890 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
891 		return (-1);
892 
893 	}
894 	isp->isp_osinfo.leact[seq] = ccb;
895 
896 	if (cel->enable) {
897 		ccb->ccb_h.status =
898 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
899 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
900 			isp->isp_osinfo.leact[seq] = 0;
901 			return (-1);
902 		}
903 	} else {
904 		tptr = get_lun_statep(isp, bus, lun);
905 		if (tptr == NULL) {
906 			ccb->ccb_h.status = CAM_LUN_INVALID;
907 			return (-1);
908 		}
909 	}
910 
911 	if (cel->enable) {
912 		int c, n, ulun = lun;
913 
914 		cmd = RQSTYPE_ENABLE_LUN;
915 		c = DFLT_CMND_CNT;
916 		n = DFLT_INOT_CNT;
917 		if (IS_FC(isp) && lun != 0) {
918 			cmd = RQSTYPE_MODIFY_LUN;
919 			n = 0;
920 			/*
921 		 	 * For SCC firmware, we only deal with setting
922 			 * (enabling or modifying) lun 0.
923 			 */
924 			ulun = 0;
925 		}
926 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
927 			rls_lun_statep(isp, tptr);
928 			ccb->ccb_h.status = CAM_REQ_INPROG;
929 			return (seq);
930 		}
931 	} else {
932 		int c, n, ulun = lun;
933 
934 		cmd = -RQSTYPE_MODIFY_LUN;
935 		c = DFLT_CMND_CNT;
936 		n = DFLT_INOT_CNT;
937 		if (IS_FC(isp) && lun != 0) {
938 			n = 0;
939 			/*
940 		 	 * For SCC firmware, we only deal with setting
941 			 * (enabling or modifying) lun 0.
942 			 */
943 			ulun = 0;
944 		}
945 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
946 			rls_lun_statep(isp, tptr);
947 			ccb->ccb_h.status = CAM_REQ_INPROG;
948 			return (seq);
949 		}
950 	}
951 	rls_lun_statep(isp, tptr);
952 	xpt_print_path(ccb->ccb_h.path);
953 	printf("isp_lun_cmd failed\n");
954 	isp->isp_osinfo.leact[seq] = 0;
955 	ccb->ccb_h.status = CAM_REQ_CMP_ERR;
956 	return (-1);
957 }
958 
959 static void
960 isp_ledone(struct ispsoftc *isp, lun_entry_t *lep)
961 {
962 	const char lfmt[] = "lun %d now %sabled for target mode on channel %d";
963 	union ccb *ccb;
964 	u_int32_t seq;
965 	tstate_t *tptr;
966 	int av;
967 	struct ccb_en_lun *cel;
968 
969 	seq = lep->le_reserved - 1;
970 	if (seq >= NLEACT) {
971 		isp_prt(isp, ISP_LOGERR,
972 		    "seq out of range (%u) in isp_ledone", seq);
973 		return;
974 	}
975 	ccb = isp->isp_osinfo.leact[seq];
976 	if (ccb == 0) {
977 		isp_prt(isp, ISP_LOGERR,
978 		    "no ccb for seq %u in isp_ledone", seq);
979 		return;
980 	}
981 	cel = &ccb->cel;
982 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb));
983 	if (tptr == NULL) {
984 		xpt_print_path(ccb->ccb_h.path);
985 		printf("null tptr in isp_ledone\n");
986 		isp->isp_osinfo.leact[seq] = 0;
987 		return;
988 	}
989 
990 	if (lep->le_status != LUN_OK) {
991 		xpt_print_path(ccb->ccb_h.path);
992 		printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status);
993 err:
994 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
995 		xpt_print_path(ccb->ccb_h.path);
996 		rls_lun_statep(isp, tptr);
997 		isp->isp_osinfo.leact[seq] = 0;
998 		ISPLOCK_2_CAMLOCK(isp);
999 		xpt_done(ccb);
1000 		CAMLOCK_2_ISPLOCK(isp);
1001 		return;
1002 	} else {
1003 		isp_prt(isp, ISP_LOGTDEBUG0,
1004 		    "isp_ledone: ENABLE/MODIFY done okay");
1005 	}
1006 
1007 
1008 	if (cel->enable) {
1009 		ccb->ccb_h.status = CAM_REQ_CMP;
1010 		isp_prt(isp, /* ISP_LOGINFO */ ISP_LOGALL, lfmt,
1011 		    XS_LUN(ccb), "en", XS_CHANNEL(ccb));
1012 		rls_lun_statep(isp, tptr);
1013 		isp->isp_osinfo.leact[seq] = 0;
1014 		ISPLOCK_2_CAMLOCK(isp);
1015 		xpt_done(ccb);
1016 		CAMLOCK_2_ISPLOCK(isp);
1017 		return;
1018 	}
1019 
1020 	if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) {
1021 		if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb),
1022 		    XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) {
1023 			xpt_print_path(ccb->ccb_h.path);
1024 			printf("isp_ledone: isp_lun_cmd failed\n");
1025 			goto err;
1026 		}
1027 		rls_lun_statep(isp, tptr);
1028 		return;
1029 	}
1030 
1031 	isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb));
1032 	rls_lun_statep(isp, tptr);
1033 	destroy_lun_state(isp, tptr);
1034 	ccb->ccb_h.status = CAM_REQ_CMP;
1035 	isp->isp_osinfo.leact[seq] = 0;
1036 	ISPLOCK_2_CAMLOCK(isp);
1037 	xpt_done(ccb);
1038 	CAMLOCK_2_ISPLOCK(isp);
1039 	if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) {
1040 		int bus = XS_CHANNEL(ccb);
1041 		av = bus << 31;
1042 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1043 		if (av) {
1044 			isp_prt(isp, ISP_LOGWARN,
1045 			    "disable target mode on channel %d failed", bus);
1046 		} else {
1047 			isp_prt(isp, ISP_LOGINFO,
1048 			    "Target Mode disabled on channel %d", bus);
1049 		}
1050 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1051 	}
1052 }
1053 
1054 
1055 static cam_status
1056 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1057 {
1058 	tstate_t *tptr;
1059 	struct ccb_hdr_slist *lp;
1060 	struct ccb_hdr *curelm;
1061 	int found, *ctr;
1062 	union ccb *accb = ccb->cab.abort_ccb;
1063 
1064 	isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb);
1065 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1066 		int badpath = 0;
1067 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1068 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1069 			badpath = 1;
1070 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1071 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1072 			badpath = 1;
1073 		}
1074 		if (badpath) {
1075 			/*
1076 			 * Being restrictive about target ids is really about
1077 			 * making sure we're aborting for the right multi-tid
1078 			 * path. This doesn't really make much sense at present.
1079 			 */
1080 #if	0
1081 			return (CAM_PATH_INVALID);
1082 #endif
1083 		}
1084 	}
1085 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1086 	if (tptr == NULL) {
1087 		isp_prt(isp, ISP_LOGTDEBUG0,
1088 		    "isp_abort_tgt_ccb: can't get statep");
1089 		return (CAM_PATH_INVALID);
1090 	}
1091 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1092 		lp = &tptr->atios;
1093 		ctr = &tptr->atio_count;
1094 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1095 		lp = &tptr->inots;
1096 		ctr = &tptr->inot_count;
1097 	} else {
1098 		rls_lun_statep(isp, tptr);
1099 		isp_prt(isp, ISP_LOGTDEBUG0,
1100 		    "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code);
1101 		return (CAM_UA_ABORT);
1102 	}
1103 	curelm = SLIST_FIRST(lp);
1104 	found = 0;
1105 	if (curelm == &accb->ccb_h) {
1106 		found = 1;
1107 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1108 	} else {
1109 		while(curelm != NULL) {
1110 			struct ccb_hdr *nextelm;
1111 
1112 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1113 			if (nextelm == &accb->ccb_h) {
1114 				found = 1;
1115 				SLIST_NEXT(curelm, sim_links.sle) =
1116 				    SLIST_NEXT(nextelm, sim_links.sle);
1117 				break;
1118 			}
1119 			curelm = nextelm;
1120 		}
1121 	}
1122 	rls_lun_statep(isp, tptr);
1123 	if (found) {
1124 		*ctr--;
1125 		accb->ccb_h.status = CAM_REQ_ABORTED;
1126 		xpt_done(accb);
1127 		return (CAM_REQ_CMP);
1128 	}
1129 	isp_prt(isp, ISP_LOGTDEBUG0,
1130 	    "isp_abort_tgt_ccb: CCB %p not found\n", ccb);
1131 	return (CAM_PATH_INVALID);
1132 }
1133 
1134 static cam_status
1135 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1136 {
1137 	void *qe;
1138 	struct ccb_scsiio *cso = &ccb->csio;
1139 	u_int16_t *hp, save_handle;
1140 	u_int16_t nxti, optr;
1141 	u_int8_t local[QENTRY_LEN];
1142 
1143 
1144 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1145 		xpt_print_path(ccb->ccb_h.path);
1146 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1147 		return (CAM_RESRC_UNAVAIL);
1148 	}
1149 	bzero(local, QENTRY_LEN);
1150 
1151 	/*
1152 	 * We're either moving data or completing a command here.
1153 	 */
1154 
1155 	if (IS_FC(isp)) {
1156 		atio_private_data_t *atp;
1157 		ct2_entry_t *cto = (ct2_entry_t *) local;
1158 
1159 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1160 		cto->ct_header.rqs_entry_count = 1;
1161 		cto->ct_iid = cso->init_id;
1162 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1163 			cto->ct_lun = ccb->ccb_h.target_lun;
1164 		}
1165 
1166 		atp = isp_get_atpd(isp, cso->tag_id);
1167 		if (atp == NULL) {
1168 			isp_prt(isp, ISP_LOGERR,
1169 			    "cannot find private data adjunct for tag %x",
1170 			    cso->tag_id);
1171 			return (-1);
1172 		}
1173 
1174 		cto->ct_rxid = cso->tag_id;
1175 		if (cso->dxfer_len == 0) {
1176 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1177 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1178 				cto->ct_flags |= CT2_SENDSTATUS;
1179 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1180 				cto->ct_resid =
1181 				    atp->orig_datalen - atp->bytes_xfered;
1182 				if (cto->ct_resid < 0) {
1183 					cto->rsp.m1.ct_scsi_status |=
1184 					    CT2_DATA_OVER;
1185 				} else if (cto->ct_resid > 0) {
1186 					cto->rsp.m1.ct_scsi_status |=
1187 					    CT2_DATA_UNDER;
1188 				}
1189 			}
1190 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1191 				int m = min(cso->sense_len, MAXRESPLEN);
1192 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1193 				cto->rsp.m1.ct_senselen = m;
1194 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1195 			}
1196 		} else {
1197 			cto->ct_flags |= CT2_FLAG_MODE0;
1198 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1199 				cto->ct_flags |= CT2_DATA_IN;
1200 			} else {
1201 				cto->ct_flags |= CT2_DATA_OUT;
1202 			}
1203 			cto->ct_reloff = atp->bytes_xfered;
1204 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1205 				cto->ct_flags |= CT2_SENDSTATUS;
1206 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1207 				cto->ct_resid =
1208 				    atp->orig_datalen -
1209 				    (atp->bytes_xfered + cso->dxfer_len);
1210 				if (cto->ct_resid < 0) {
1211 					cto->rsp.m0.ct_scsi_status |=
1212 					    CT2_DATA_OVER;
1213 				} else if (cto->ct_resid > 0) {
1214 					cto->rsp.m0.ct_scsi_status |=
1215 					    CT2_DATA_UNDER;
1216 				}
1217 			} else {
1218 				atp->last_xframt = cso->dxfer_len;
1219 			}
1220 			/*
1221 			 * If we're sending data and status back together,
1222 			 * we can't also send back sense data as well.
1223 			 */
1224 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1225 		}
1226 
1227 		if (cto->ct_flags & CT2_SENDSTATUS) {
1228 			isp_prt(isp, ISP_LOGTDEBUG0,
1229 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1230 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1231 			    cso->dxfer_len, cto->ct_resid);
1232 			cto->ct_flags |= CT2_CCINCR;
1233 			atp->state = ATPD_STATE_LAST_CTIO;
1234 		} else
1235 			atp->state = ATPD_STATE_CTIO;
1236 		cto->ct_timeout = 10;
1237 		hp = &cto->ct_syshandle;
1238 	} else {
1239 		ct_entry_t *cto = (ct_entry_t *) local;
1240 
1241 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1242 		cto->ct_header.rqs_entry_count = 1;
1243 		cto->ct_iid = cso->init_id;
1244 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1245 		cto->ct_tgt = ccb->ccb_h.target_id;
1246 		cto->ct_lun = ccb->ccb_h.target_lun;
1247 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1248 		if (AT_HAS_TAG(cso->tag_id)) {
1249 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1250 			cto->ct_flags |= CT_TQAE;
1251 		}
1252 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1253 			cto->ct_flags |= CT_NODISC;
1254 		}
1255 		if (cso->dxfer_len == 0) {
1256 			cto->ct_flags |= CT_NO_DATA;
1257 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1258 			cto->ct_flags |= CT_DATA_IN;
1259 		} else {
1260 			cto->ct_flags |= CT_DATA_OUT;
1261 		}
1262 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1263 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1264 			cto->ct_scsi_status = cso->scsi_status;
1265 			cto->ct_resid = cso->resid;
1266 			isp_prt(isp, ISP_LOGTDEBUG0,
1267 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1268 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1269 			    cso->tag_id);
1270 		}
1271 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1272 		cto->ct_timeout = 10;
1273 		hp = &cto->ct_syshandle;
1274 	}
1275 
1276 	if (isp_save_xs_tgt(isp, ccb, hp)) {
1277 		xpt_print_path(ccb->ccb_h.path);
1278 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1279 		return (CAM_RESRC_UNAVAIL);
1280 	}
1281 
1282 
1283 	/*
1284 	 * Call the dma setup routines for this entry (and any subsequent
1285 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1286 	 * new things to play with. As with isp_start's usage of DMA setup,
1287 	 * any swizzling is done in the machine dependent layer. Because
1288 	 * of this, we put the request onto the queue area first in native
1289 	 * format.
1290 	 */
1291 
1292 	save_handle = *hp;
1293 
1294 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1295 	case CMD_QUEUED:
1296 		ISP_ADD_REQUEST(isp, nxti);
1297 		return (CAM_REQ_INPROG);
1298 
1299 	case CMD_EAGAIN:
1300 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1301 		isp_destroy_tgt_handle(isp, save_handle);
1302 		return (CAM_RESRC_UNAVAIL);
1303 
1304 	default:
1305 		isp_destroy_tgt_handle(isp, save_handle);
1306 		return (XS_ERR(ccb));
1307 	}
1308 }
1309 
1310 static void
1311 isp_refire_putback_atio(void *arg)
1312 {
1313 	int s = splcam();
1314 	isp_target_putback_atio(arg);
1315 	splx(s);
1316 }
1317 
1318 static void
1319 isp_target_putback_atio(union ccb *ccb)
1320 {
1321 	struct ispsoftc *isp;
1322 	struct ccb_scsiio *cso;
1323 	u_int16_t nxti, optr;
1324 	void *qe;
1325 
1326 	isp = XS_ISP(ccb);
1327 
1328 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1329 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1330 		isp_prt(isp, ISP_LOGWARN,
1331 		    "isp_target_putback_atio: Request Queue Overflow");
1332 		return;
1333 	}
1334 	bzero(qe, QENTRY_LEN);
1335 	cso = &ccb->csio;
1336 	if (IS_FC(isp)) {
1337 		at2_entry_t local, *at = &local;
1338 		MEMZERO(at, sizeof (at2_entry_t));
1339 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1340 		at->at_header.rqs_entry_count = 1;
1341 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1342 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1343 		} else {
1344 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1345 		}
1346 		at->at_status = CT_OK;
1347 		at->at_rxid = cso->tag_id;
1348 		at->at_iid = cso->ccb_h.target_id;
1349 		isp_put_atio2(isp, at, qe);
1350 	} else {
1351 		at_entry_t local, *at = &local;
1352 		MEMZERO(at, sizeof (at_entry_t));
1353 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1354 		at->at_header.rqs_entry_count = 1;
1355 		at->at_iid = cso->init_id;
1356 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1357 		at->at_tgt = cso->ccb_h.target_id;
1358 		at->at_lun = cso->ccb_h.target_lun;
1359 		at->at_status = CT_OK;
1360 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1361 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1362 		isp_put_atio(isp, at, qe);
1363 	}
1364 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1365 	ISP_ADD_REQUEST(isp, nxti);
1366 	isp_complete_ctio(ccb);
1367 }
1368 
1369 static void
1370 isp_complete_ctio(union ccb *ccb)
1371 {
1372 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1373 		ccb->ccb_h.status |= CAM_REQ_CMP;
1374 	}
1375 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1376 	xpt_done(ccb);
1377 }
1378 
1379 /*
1380  * Handle ATIO stuff that the generic code can't.
1381  * This means handling CDBs.
1382  */
1383 
1384 static int
1385 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1386 {
1387 	tstate_t *tptr;
1388 	int status, bus, iswildcard;
1389 	struct ccb_accept_tio *atiop;
1390 
1391 	/*
1392 	 * The firmware status (except for the QLTM_SVALID bit)
1393 	 * indicates why this ATIO was sent to us.
1394 	 *
1395 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1396 	 *
1397 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1398 	 * we're still connected on the SCSI bus.
1399 	 */
1400 	status = aep->at_status;
1401 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1402 		/*
1403 		 * Bus Phase Sequence error. We should have sense data
1404 		 * suggested by the f/w. I'm not sure quite yet what
1405 		 * to do about this for CAM.
1406 		 */
1407 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1408 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1409 		return (0);
1410 	}
1411 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1412 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1413 		    status);
1414 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1415 		return (0);
1416 	}
1417 
1418 	bus = GET_BUS_VAL(aep->at_iid);
1419 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1420 	if (tptr == NULL) {
1421 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1422 		if (tptr == NULL) {
1423 			isp_endcmd(isp, aep,
1424 			    SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1425 			    (0x5 << 12) | (0x25 << 16), 0);
1426 			return (0);
1427 		}
1428 		iswildcard = 1;
1429 	} else {
1430 		iswildcard = 0;
1431 	}
1432 
1433 	if (tptr == NULL) {
1434 		/*
1435 		 * Because we can't autofeed sense data back with
1436 		 * a command for parallel SCSI, we can't give back
1437 		 * a CHECK CONDITION. We'll give back a BUSY status
1438 		 * instead. This works out okay because the only
1439 		 * time we should, in fact, get this, is in the
1440 		 * case that somebody configured us without the
1441 		 * blackhole driver, so they get what they deserve.
1442 		 */
1443 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1444 		return (0);
1445 	}
1446 
1447 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1448 	if (atiop == NULL) {
1449 		/*
1450 		 * Because we can't autofeed sense data back with
1451 		 * a command for parallel SCSI, we can't give back
1452 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1453 		 * instead. This works out okay because the only time we
1454 		 * should, in fact, get this, is in the case that we've
1455 		 * run out of ATIOS.
1456 		 */
1457 		xpt_print_path(tptr->owner);
1458 		isp_prt(isp, ISP_LOGWARN,
1459 		    "no ATIOS for lun %d from initiator %d on channel %d",
1460 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1461 		if (aep->at_flags & AT_TQAE)
1462 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1463 		else
1464 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1465 		rls_lun_statep(isp, tptr);
1466 		return (0);
1467 	}
1468 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1469 	tptr->atio_count--;
1470 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1471 	    aep->at_lun, tptr->atio_count);
1472 	if (iswildcard) {
1473 		atiop->ccb_h.target_id = aep->at_tgt;
1474 		atiop->ccb_h.target_lun = aep->at_lun;
1475 	}
1476 	if (aep->at_flags & AT_NODISC) {
1477 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1478 	} else {
1479 		atiop->ccb_h.flags = 0;
1480 	}
1481 
1482 	if (status & QLTM_SVALID) {
1483 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1484 		atiop->sense_len = amt;
1485 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1486 	} else {
1487 		atiop->sense_len = 0;
1488 	}
1489 
1490 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1491 	atiop->cdb_len = aep->at_cdblen;
1492 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1493 	atiop->ccb_h.status = CAM_CDB_RECVD;
1494 	/*
1495 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1496 	 * and the handle (which we have to preserve).
1497 	 */
1498 	AT_MAKE_TAGID(atiop->tag_id, aep);
1499 	if (aep->at_flags & AT_TQAE) {
1500 		atiop->tag_action = aep->at_tag_type;
1501 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1502 	}
1503 	xpt_done((union ccb*)atiop);
1504 	isp_prt(isp, ISP_LOGTDEBUG0,
1505 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1506 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1507 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1508 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1509 	    "nondisc" : "disconnecting");
1510 	rls_lun_statep(isp, tptr);
1511 	return (0);
1512 }
1513 
1514 static int
1515 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1516 {
1517 	lun_id_t lun;
1518 	tstate_t *tptr;
1519 	struct ccb_accept_tio *atiop;
1520 	atio_private_data_t *atp;
1521 
1522 	/*
1523 	 * The firmware status (except for the QLTM_SVALID bit)
1524 	 * indicates why this ATIO was sent to us.
1525 	 *
1526 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1527 	 */
1528 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1529 		isp_prt(isp, ISP_LOGWARN,
1530 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1531 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1532 		return (0);
1533 	}
1534 
1535 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1536 		lun = aep->at_scclun;
1537 	} else {
1538 		lun = aep->at_lun;
1539 	}
1540 	tptr = get_lun_statep(isp, 0, lun);
1541 	if (tptr == NULL) {
1542 		isp_prt(isp, ISP_LOGTDEBUG0,
1543 		    "[0x%x] no state pointer for lun %d", aep->at_rxid, lun);
1544 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1545 		if (tptr == NULL) {
1546 			isp_endcmd(isp, aep,
1547 			    SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1548 			    (0x5 << 12) | (0x25 << 16), 0);
1549 			return (0);
1550 		}
1551 	}
1552 
1553 	atp = isp_get_atpd(isp, 0);
1554 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1555 	if (atiop == NULL || atp == NULL) {
1556 
1557 		/*
1558 		 * Because we can't autofeed sense data back with
1559 		 * a command for parallel SCSI, we can't give back
1560 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1561 		 * instead. This works out okay because the only time we
1562 		 * should, in fact, get this, is in the case that we've
1563 		 * run out of ATIOS.
1564 		 */
1565 		xpt_print_path(tptr->owner);
1566 		isp_prt(isp, ISP_LOGWARN,
1567 		    "no %s for lun %d from initiator %d",
1568 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1569 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1570 		rls_lun_statep(isp, tptr);
1571 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1572 		return (0);
1573 	}
1574 	atp->state = ATPD_STATE_ATIO;
1575 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1576 	tptr->atio_count--;
1577 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1578 	    lun, tptr->atio_count);
1579 
1580 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1581 		atiop->ccb_h.target_id =
1582 		    ((fcparam *)isp->isp_param)->isp_loopid;
1583 		atiop->ccb_h.target_lun = lun;
1584 	}
1585 	/*
1586 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1587 	 */
1588 	atiop->sense_len = 0;
1589 
1590 	atiop->init_id = aep->at_iid;
1591 	atiop->cdb_len = ATIO2_CDBLEN;
1592 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1593 	atiop->ccb_h.status = CAM_CDB_RECVD;
1594 	atiop->tag_id = aep->at_rxid;
1595 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1596 	case ATIO2_TC_ATTR_SIMPLEQ:
1597 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1598 		break;
1599         case ATIO2_TC_ATTR_HEADOFQ:
1600 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1601 		break;
1602         case ATIO2_TC_ATTR_ORDERED:
1603 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1604 		break;
1605         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1606 	case ATIO2_TC_ATTR_UNTAGGED:
1607 	default:
1608 		atiop->tag_action = 0;
1609 		break;
1610 	}
1611 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1612 
1613 	atp->tag = atiop->tag_id;
1614 	atp->lun = lun;
1615 	atp->orig_datalen = aep->at_datalen;
1616 	atp->last_xframt = 0;
1617 	atp->bytes_xfered = 0;
1618 	atp->state = ATPD_STATE_CAM;
1619 	ISPLOCK_2_CAMLOCK(siP);
1620 	xpt_done((union ccb*)atiop);
1621 
1622 	isp_prt(isp, ISP_LOGTDEBUG0,
1623 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1624 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1625 	    lun, aep->at_taskflags, aep->at_datalen);
1626 	rls_lun_statep(isp, tptr);
1627 	return (0);
1628 }
1629 
1630 static int
1631 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1632 {
1633 	union ccb *ccb;
1634 	int sentstatus, ok, notify_cam, resid = 0;
1635 	u_int16_t tval;
1636 
1637 	/*
1638 	 * CTIO and CTIO2 are close enough....
1639 	 */
1640 
1641 	ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle);
1642 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1643 	isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1644 
1645 	if (IS_FC(isp)) {
1646 		ct2_entry_t *ct = arg;
1647 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1648 		if (atp == NULL) {
1649 			isp_prt(isp, ISP_LOGERR,
1650 			    "cannot find adjunct for %x after I/O",
1651 			    ct->ct_rxid);
1652 			return (0);
1653 		}
1654 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1655 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1656 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1657 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1658 		}
1659 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1660 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1661 			resid = ct->ct_resid;
1662 			atp->bytes_xfered += (atp->last_xframt - resid);
1663 			atp->last_xframt = 0;
1664 		}
1665 		if (sentstatus || !ok) {
1666 			atp->tag = 0;
1667 		}
1668 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1669 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1670 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1671 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1672 		    resid, sentstatus? "FIN" : "MID");
1673 		tval = ct->ct_rxid;
1674 
1675 		/* XXX: should really come after isp_complete_ctio */
1676 		atp->state = ATPD_STATE_PDON;
1677 	} else {
1678 		ct_entry_t *ct = arg;
1679 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1680 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1681 		/*
1682 		 * We *ought* to be able to get back to the original ATIO
1683 		 * here, but for some reason this gets lost. It's just as
1684 		 * well because it's squirrelled away as part of periph
1685 		 * private data.
1686 		 *
1687 		 * We can live without it as long as we continue to use
1688 		 * the auto-replenish feature for CTIOs.
1689 		 */
1690 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1691 		if (ct->ct_status & QLTM_SVALID) {
1692 			char *sp = (char *)ct;
1693 			sp += CTIO_SENSE_OFFSET;
1694 			ccb->csio.sense_len =
1695 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1696 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1697 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1698 		}
1699 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1700 			resid = ct->ct_resid;
1701 		}
1702 		isp_prt(isp, ISP_LOGTDEBUG0,
1703 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1704 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1705 		    ct->ct_status, ct->ct_flags, resid,
1706 		    sentstatus? "FIN" : "MID");
1707 		tval = ct->ct_fwhandle;
1708 	}
1709 	ccb->csio.resid += resid;
1710 
1711 	/*
1712 	 * We're here either because intermediate data transfers are done
1713 	 * and/or the final status CTIO (which may have joined with a
1714 	 * Data Transfer) is done.
1715 	 *
1716 	 * In any case, for this platform, the upper layers figure out
1717 	 * what to do next, so all we do here is collect status and
1718 	 * pass information along. Any DMA handles have already been
1719 	 * freed.
1720 	 */
1721 	if (notify_cam == 0) {
1722 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1723 		return (0);
1724 	}
1725 
1726 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1727 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1728 
1729 	if (!ok) {
1730 		isp_target_putback_atio(ccb);
1731 	} else {
1732 		isp_complete_ctio(ccb);
1733 
1734 	}
1735 	return (0);
1736 }
1737 
1738 static void
1739 isp_handle_platform_ctio_fastpost(struct ispsoftc *isp, u_int32_t token)
1740 {
1741 	union ccb *ccb;
1742 	ccb = isp_find_xs_tgt(isp, token & 0xffff);
1743 	KASSERT((ccb != NULL),
1744 	    ("null ccb in isp_handle_platform_ctio_fastpost"));
1745 	isp_destroy_tgt_handle(isp, token & 0xffff);
1746 	isp_prt(isp, ISP_LOGTDEBUG1, "CTIOx[%x] fastpost complete",
1747 	    token & 0xffff);
1748 	isp_complete_ctio(ccb);
1749 }
1750 
1751 static int
1752 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1753 {
1754 	return (0);	/* XXXX */
1755 }
1756 
1757 static int
1758 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1759 {
1760 
1761 	switch (inp->in_status) {
1762 	case IN_PORT_LOGOUT:
1763 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1764 		   inp->in_iid);
1765 		break;
1766 	case IN_PORT_CHANGED:
1767 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1768 		   inp->in_iid);
1769 		break;
1770 	case IN_GLOBAL_LOGO:
1771 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1772 		break;
1773 	case IN_ABORT_TASK:
1774 	{
1775 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1776 		struct ccb_immed_notify *inot = NULL;
1777 
1778 		if (atp) {
1779 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1780 			if (tptr) {
1781 				inot = (struct ccb_immed_notify *)
1782 				    SLIST_FIRST(&tptr->inots);
1783 				if (inot) {
1784 					tptr->inot_count--;
1785 					SLIST_REMOVE_HEAD(&tptr->inots,
1786 					    sim_links.sle);
1787 					isp_prt(isp, ISP_LOGTDEBUG0,
1788 					    "Take FREE INOT count now %d",
1789 					    tptr->inot_count);
1790 				}
1791 			}
1792 			isp_prt(isp, ISP_LOGWARN,
1793 			   "abort task RX_ID %x IID %d state %d",
1794 			   inp->in_seqid, inp->in_iid, atp->state);
1795 		} else {
1796 			isp_prt(isp, ISP_LOGWARN,
1797 			   "abort task RX_ID %x from iid %d, state unknown",
1798 			   inp->in_seqid, inp->in_iid);
1799 		}
1800 		if (inot) {
1801 			inot->initiator_id = inp->in_iid;
1802 			inot->sense_len = 0;
1803 			inot->message_args[0] = MSG_ABORT_TAG;
1804 			inot->message_args[1] = inp->in_seqid & 0xff;
1805 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1806 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1807 			xpt_done((union ccb *)inot);
1808 		}
1809 		break;
1810 	}
1811 	default:
1812 		break;
1813 	}
1814 	return (0);
1815 }
1816 #endif
1817 
1818 static void
1819 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1820 {
1821 	struct cam_sim *sim;
1822 	struct ispsoftc *isp;
1823 
1824 	sim = (struct cam_sim *)cbarg;
1825 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1826 	switch (code) {
1827 	case AC_LOST_DEVICE:
1828 		if (IS_SCSI(isp)) {
1829 			u_int16_t oflags, nflags;
1830 			sdparam *sdp = isp->isp_param;
1831 			int tgt;
1832 
1833 			tgt = xpt_path_target_id(path);
1834 			if (tgt >= 0) {
1835 				sdp += cam_sim_bus(sim);
1836 				ISP_LOCK(isp);
1837 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1838 #ifndef	ISP_TARGET_MODE
1839 				nflags &= DPARM_SAFE_DFLT;
1840 				if (isp->isp_loaded_fw) {
1841 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1842 				}
1843 #else
1844 				nflags = DPARM_DEFAULT;
1845 #endif
1846 				oflags = sdp->isp_devparam[tgt].goal_flags;
1847 				sdp->isp_devparam[tgt].goal_flags = nflags;
1848 				sdp->isp_devparam[tgt].dev_update = 1;
1849 				isp->isp_update |= (1 << cam_sim_bus(sim));
1850 				(void) isp_control(isp,
1851 				    ISPCTL_UPDATE_PARAMS, NULL);
1852 				sdp->isp_devparam[tgt].goal_flags = oflags;
1853 				ISP_UNLOCK(isp);
1854 			}
1855 		}
1856 		break;
1857 	default:
1858 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1859 		break;
1860 	}
1861 }
1862 
1863 static void
1864 isp_poll(struct cam_sim *sim)
1865 {
1866 	struct ispsoftc *isp = cam_sim_softc(sim);
1867 	u_int16_t isr, sema, mbox;
1868 
1869 	ISP_LOCK(isp);
1870 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1871 		isp_intr(isp, isr, sema, mbox);
1872 	}
1873 	ISP_UNLOCK(isp);
1874 }
1875 
1876 
1877 static void
1878 isp_watchdog(void *arg)
1879 {
1880 	XS_T *xs = arg;
1881 	struct ispsoftc *isp = XS_ISP(xs);
1882 	u_int32_t handle;
1883 	int iok;
1884 
1885 	/*
1886 	 * We've decided this command is dead. Make sure we're not trying
1887 	 * to kill a command that's already dead by getting it's handle and
1888 	 * and seeing whether it's still alive.
1889 	 */
1890 	ISP_LOCK(isp);
1891 	iok = isp->isp_osinfo.intsok;
1892 	isp->isp_osinfo.intsok = 0;
1893 	handle = isp_find_handle(isp, xs);
1894 	if (handle) {
1895 		u_int16_t isr, sema, mbox;
1896 
1897 		if (XS_CMD_DONE_P(xs)) {
1898 			isp_prt(isp, ISP_LOGDEBUG1,
1899 			    "watchdog found done cmd (handle 0x%x)", handle);
1900 			ISP_UNLOCK(isp);
1901 			return;
1902 		}
1903 
1904 		if (XS_CMD_WDOG_P(xs)) {
1905 			isp_prt(isp, ISP_LOGDEBUG2,
1906 			    "recursive watchdog (handle 0x%x)", handle);
1907 			ISP_UNLOCK(isp);
1908 			return;
1909 		}
1910 
1911 		XS_CMD_S_WDOG(xs);
1912 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1913 			isp_intr(isp, isr, sema, mbox);
1914 		}
1915 		if (XS_CMD_DONE_P(xs)) {
1916 			isp_prt(isp, ISP_LOGDEBUG2,
1917 			    "watchdog cleanup for handle 0x%x", handle);
1918 			xpt_done((union ccb *) xs);
1919 		} else if (XS_CMD_GRACE_P(xs)) {
1920 			/*
1921 			 * Make sure the command is *really* dead before we
1922 			 * release the handle (and DMA resources) for reuse.
1923 			 */
1924 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1925 
1926 			/*
1927 			 * After this point, the comamnd is really dead.
1928 			 */
1929 			if (XS_XFRLEN(xs)) {
1930 				ISP_DMAFREE(isp, xs, handle);
1931                 	}
1932 			isp_destroy_handle(isp, handle);
1933 			xpt_print_path(xs->ccb_h.path);
1934 			isp_prt(isp, ISP_LOGWARN,
1935 			    "watchdog timeout for handle 0x%x", handle);
1936 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1937 			XS_CMD_C_WDOG(xs);
1938 			isp_done(xs);
1939 		} else {
1940 			u_int16_t nxti, optr;
1941 			ispreq_t local, *mp= &local, *qe;
1942 
1943 			XS_CMD_C_WDOG(xs);
1944 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1945 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1946 				ISP_UNLOCK(isp);
1947 				return;
1948 			}
1949 			XS_CMD_S_GRACE(xs);
1950 			MEMZERO((void *) mp, sizeof (*mp));
1951 			mp->req_header.rqs_entry_count = 1;
1952 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1953 			mp->req_modifier = SYNC_ALL;
1954 			mp->req_target = XS_CHANNEL(xs) << 7;
1955 			isp_put_request(isp, mp, qe);
1956 			ISP_ADD_REQUEST(isp, nxti);
1957 		}
1958 	} else {
1959 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1960 	}
1961 	isp->isp_osinfo.intsok = iok;
1962 	ISP_UNLOCK(isp);
1963 }
1964 
1965 static void
1966 isp_kthread(void *arg)
1967 {
1968 	struct ispsoftc *isp = arg;
1969 
1970 #ifdef	ISP_SMPLOCK
1971 	mtx_lock(&isp->isp_lock);
1972 #else
1973 	mtx_lock(&Giant);
1974 #endif
1975 	/*
1976 	 * The first loop is for our usage where we have yet to have
1977 	 * gotten good fibre channel state.
1978 	 */
1979 	for (;;) {
1980 		int wasfrozen;
1981 
1982 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1983 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1984 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1985 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1986 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1987 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1988 				    isp->isp_osinfo.ktmature == 0) {
1989 					break;
1990 				}
1991 			}
1992 #ifdef	ISP_SMPLOCK
1993 			msleep(isp_kthread, &isp->isp_lock,
1994 			    PRIBIO, "isp_fcthrd", hz);
1995 #else
1996 			(void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1997 #endif
1998 		}
1999 
2000 		/*
2001 		 * Even if we didn't get good loop state we may be
2002 		 * unfreezing the SIMQ so that we can kill off
2003 		 * commands (if we've never seen loop before, for example).
2004 		 */
2005 		isp->isp_osinfo.ktmature = 1;
2006 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2007 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2008 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2009 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2010 			ISPLOCK_2_CAMLOCK(isp);
2011 			xpt_release_simq(isp->isp_sim, 1);
2012 			CAMLOCK_2_ISPLOCK(isp);
2013 		}
2014 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2015 #ifdef	ISP_SMPLOCK
2016 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2017 #else
2018 		(void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2019 #endif
2020 	}
2021 }
2022 
2023 static void
2024 isp_action(struct cam_sim *sim, union ccb *ccb)
2025 {
2026 	int bus, tgt, error;
2027 	struct ispsoftc *isp;
2028 	struct ccb_trans_settings *cts;
2029 
2030 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2031 
2032 	isp = (struct ispsoftc *)cam_sim_softc(sim);
2033 	ccb->ccb_h.sim_priv.entries[0].field = 0;
2034 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2035 	if (isp->isp_state != ISP_RUNSTATE &&
2036 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
2037 		CAMLOCK_2_ISPLOCK(isp);
2038 		isp_init(isp);
2039 		if (isp->isp_state != ISP_INITSTATE) {
2040 			ISP_UNLOCK(isp);
2041 			/*
2042 			 * Lie. Say it was a selection timeout.
2043 			 */
2044 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2045 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2046 			xpt_done(ccb);
2047 			return;
2048 		}
2049 		isp->isp_state = ISP_RUNSTATE;
2050 		ISPLOCK_2_CAMLOCK(isp);
2051 	}
2052 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2053 
2054 
2055 	switch (ccb->ccb_h.func_code) {
2056 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2057 		/*
2058 		 * Do a couple of preliminary checks...
2059 		 */
2060 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2061 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2062 				ccb->ccb_h.status = CAM_REQ_INVALID;
2063 				xpt_done(ccb);
2064 				break;
2065 			}
2066 		}
2067 #ifdef	DIAGNOSTIC
2068 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2069 			ccb->ccb_h.status = CAM_PATH_INVALID;
2070 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2071 			ccb->ccb_h.status = CAM_PATH_INVALID;
2072 		}
2073 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2074 			isp_prt(isp, ISP_LOGERR,
2075 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2076 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2077 			xpt_done(ccb);
2078 			break;
2079 		}
2080 #endif
2081 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2082 		CAMLOCK_2_ISPLOCK(isp);
2083 		error = isp_start((XS_T *) ccb);
2084 		switch (error) {
2085 		case CMD_QUEUED:
2086 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2087 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2088 				u_int64_t ticks = (u_int64_t) hz;
2089 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2090 					ticks = 60 * 1000 * ticks;
2091 				else
2092 					ticks = ccb->ccb_h.timeout * hz;
2093 				ticks = ((ticks + 999) / 1000) + hz + hz;
2094 				if (ticks >= 0x80000000) {
2095 					isp_prt(isp, ISP_LOGERR,
2096 					    "timeout overflow");
2097 					ticks = 0x7fffffff;
2098 				}
2099 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2100 				    (caddr_t)ccb, (int)ticks);
2101 			} else {
2102 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2103 			}
2104 			ISPLOCK_2_CAMLOCK(isp);
2105 			break;
2106 		case CMD_RQLATER:
2107 			/*
2108 			 * This can only happen for Fibre Channel
2109 			 */
2110 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2111 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2112 			    isp->isp_osinfo.ktmature) {
2113 				ISPLOCK_2_CAMLOCK(isp);
2114 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2115 				xpt_done(ccb);
2116 				break;
2117 			}
2118 #ifdef	ISP_SMPLOCK
2119 			cv_signal(&isp->isp_osinfo.kthread_cv);
2120 #else
2121 			wakeup(&isp->isp_osinfo.kthread_cv);
2122 #endif
2123 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2124 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2125 			ISPLOCK_2_CAMLOCK(isp);
2126 			xpt_done(ccb);
2127 			break;
2128 		case CMD_EAGAIN:
2129 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2130 			ISPLOCK_2_CAMLOCK(isp);
2131 			xpt_done(ccb);
2132 			break;
2133 		case CMD_COMPLETE:
2134 			isp_done((struct ccb_scsiio *) ccb);
2135 			ISPLOCK_2_CAMLOCK(isp);
2136 			break;
2137 		default:
2138 			isp_prt(isp, ISP_LOGERR,
2139 			    "What's this? 0x%x at %d in file %s",
2140 			    error, __LINE__, __FILE__);
2141 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2142 			xpt_done(ccb);
2143 			ISPLOCK_2_CAMLOCK(isp);
2144 		}
2145 		break;
2146 
2147 #ifdef	ISP_TARGET_MODE
2148 	case XPT_EN_LUN:		/* Enable LUN as a target */
2149 	{
2150 		int seq, iok, i;
2151 		CAMLOCK_2_ISPLOCK(isp);
2152 		iok = isp->isp_osinfo.intsok;
2153 		isp->isp_osinfo.intsok = 0;
2154 		seq = isp_en_lun(isp, ccb);
2155 		if (seq < 0) {
2156 			isp->isp_osinfo.intsok = iok;
2157 			ISPLOCK_2_CAMLOCK(isp);
2158 			xpt_done(ccb);
2159 			break;
2160 		}
2161 		for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) {
2162 			u_int16_t isr, sema, mbox;
2163 			if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
2164 				isp_intr(isp, isr, sema, mbox);
2165 			}
2166 			DELAY(1000);
2167 		}
2168 		isp->isp_osinfo.intsok = iok;
2169 		ISPLOCK_2_CAMLOCK(isp);
2170 		break;
2171 	}
2172 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2173 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2174 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2175 	{
2176 		tstate_t *tptr =
2177 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2178 		if (tptr == NULL) {
2179 			ccb->ccb_h.status = CAM_LUN_INVALID;
2180 			xpt_done(ccb);
2181 			break;
2182 		}
2183 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2184 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2185 		ccb->ccb_h.flags = 0;
2186 
2187 		CAMLOCK_2_ISPLOCK(isp);
2188 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2189 			/*
2190 			 * Note that the command itself may not be done-
2191 			 * it may not even have had the first CTIO sent.
2192 			 */
2193 			tptr->atio_count++;
2194 			isp_prt(isp, ISP_LOGTDEBUG0,
2195 			    "Put FREE ATIO, lun %d, count now %d",
2196 			    ccb->ccb_h.target_lun, tptr->atio_count);
2197 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2198 			    sim_links.sle);
2199 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2200 			tptr->inot_count++;
2201 			isp_prt(isp, ISP_LOGTDEBUG0,
2202 			    "Put FREE INOT, lun %d, count now %d",
2203 			    ccb->ccb_h.target_lun, tptr->inot_count);
2204 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2205 			    sim_links.sle);
2206 		} else {
2207 			isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");;
2208 		}
2209 		rls_lun_statep(isp, tptr);
2210 		ccb->ccb_h.status = CAM_REQ_INPROG;
2211 		ISPLOCK_2_CAMLOCK(isp);
2212 		break;
2213 	}
2214 	case XPT_CONT_TARGET_IO:
2215 	{
2216 		CAMLOCK_2_ISPLOCK(isp);
2217 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2218 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2219 			isp_prt(isp, ISP_LOGWARN,
2220 			    "XPT_CONT_TARGET_IO: status 0x%x",
2221 			    ccb->ccb_h.status);
2222 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2223 			ISPLOCK_2_CAMLOCK(isp);
2224 			xpt_done(ccb);
2225 		} else {
2226 			ISPLOCK_2_CAMLOCK(isp);
2227 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2228 		}
2229 		break;
2230 	}
2231 #endif
2232 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2233 
2234 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2235 		tgt = ccb->ccb_h.target_id;
2236 		tgt |= (bus << 16);
2237 
2238 		CAMLOCK_2_ISPLOCK(isp);
2239 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2240 		ISPLOCK_2_CAMLOCK(isp);
2241 		if (error) {
2242 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2243 		} else {
2244 			ccb->ccb_h.status = CAM_REQ_CMP;
2245 		}
2246 		xpt_done(ccb);
2247 		break;
2248 	case XPT_ABORT:			/* Abort the specified CCB */
2249 	{
2250 		union ccb *accb = ccb->cab.abort_ccb;
2251 		CAMLOCK_2_ISPLOCK(isp);
2252 		switch (accb->ccb_h.func_code) {
2253 #ifdef	ISP_TARGET_MODE
2254 		case XPT_ACCEPT_TARGET_IO:
2255 		case XPT_IMMED_NOTIFY:
2256         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2257 			break;
2258 		case XPT_CONT_TARGET_IO:
2259 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2260 			ccb->ccb_h.status = CAM_UA_ABORT;
2261 			break;
2262 #endif
2263 		case XPT_SCSI_IO:
2264 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2265 			if (error) {
2266 				ccb->ccb_h.status = CAM_UA_ABORT;
2267 			} else {
2268 				ccb->ccb_h.status = CAM_REQ_CMP;
2269 			}
2270 			break;
2271 		default:
2272 			ccb->ccb_h.status = CAM_REQ_INVALID;
2273 			break;
2274 		}
2275 		ISPLOCK_2_CAMLOCK(isp);
2276 		xpt_done(ccb);
2277 		break;
2278 	}
2279 #ifdef	CAM_NEW_TRAN_CODE
2280 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2281 #else
2282 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2283 #endif
2284 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2285 		cts = &ccb->cts;
2286 		if (!IS_CURRENT_SETTINGS(cts)) {
2287 			ccb->ccb_h.status = CAM_REQ_INVALID;
2288 			xpt_done(ccb);
2289 			break;
2290 		}
2291 		tgt = cts->ccb_h.target_id;
2292 		CAMLOCK_2_ISPLOCK(isp);
2293 		if (IS_SCSI(isp)) {
2294 #ifndef	CAM_NEW_TRAN_CODE
2295 			sdparam *sdp = isp->isp_param;
2296 			u_int16_t *dptr;
2297 
2298 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2299 
2300 			sdp += bus;
2301 			/*
2302 			 * We always update (internally) from goal_flags
2303 			 * so any request to change settings just gets
2304 			 * vectored to that location.
2305 			 */
2306 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2307 
2308 			/*
2309 			 * Note that these operations affect the
2310 			 * the goal flags (goal_flags)- not
2311 			 * the current state flags. Then we mark
2312 			 * things so that the next operation to
2313 			 * this HBA will cause the update to occur.
2314 			 */
2315 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2316 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2317 					*dptr |= DPARM_DISC;
2318 				} else {
2319 					*dptr &= ~DPARM_DISC;
2320 				}
2321 			}
2322 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2323 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2324 					*dptr |= DPARM_TQING;
2325 				} else {
2326 					*dptr &= ~DPARM_TQING;
2327 				}
2328 			}
2329 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2330 				switch (cts->bus_width) {
2331 				case MSG_EXT_WDTR_BUS_16_BIT:
2332 					*dptr |= DPARM_WIDE;
2333 					break;
2334 				default:
2335 					*dptr &= ~DPARM_WIDE;
2336 				}
2337 			}
2338 			/*
2339 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2340 			 * of nonzero will cause us to go to the
2341 			 * selected (from NVRAM) maximum value for
2342 			 * this device. At a later point, we'll
2343 			 * allow finer control.
2344 			 */
2345 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2346 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2347 			    (cts->sync_offset > 0)) {
2348 				*dptr |= DPARM_SYNC;
2349 			} else {
2350 				*dptr &= ~DPARM_SYNC;
2351 			}
2352 			*dptr |= DPARM_SAFE_DFLT;
2353 #else
2354 			struct ccb_trans_settings_scsi *scsi =
2355 			    &cts->proto_specific.scsi;
2356 			struct ccb_trans_settings_spi *spi =
2357 			    &cts->xport_specific.spi;
2358 			sdparam *sdp = isp->isp_param;
2359 			u_int16_t *dptr;
2360 
2361 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2362 			sdp += bus;
2363 			/*
2364 			 * We always update (internally) from goal_flags
2365 			 * so any request to change settings just gets
2366 			 * vectored to that location.
2367 			 */
2368 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2369 
2370 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2371 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2372 					*dptr |= DPARM_DISC;
2373 				else
2374 					*dptr &= ~DPARM_DISC;
2375 			}
2376 
2377 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2378 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2379 					*dptr |= DPARM_TQING;
2380 				else
2381 					*dptr &= ~DPARM_TQING;
2382 			}
2383 
2384 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2385 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2386 					*dptr |= DPARM_WIDE;
2387 				else
2388 					*dptr &= ~DPARM_WIDE;
2389 			}
2390 
2391 			/*
2392 			 * XXX: FIX ME
2393 			 */
2394 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2395 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2396 			    (spi->sync_period && spi->sync_offset)) {
2397 				*dptr |= DPARM_SYNC;
2398 				/*
2399 				 * XXX: CHECK FOR LEGALITY
2400 				 */
2401 				sdp->isp_devparam[tgt].goal_period =
2402 				    spi->sync_period;
2403 				sdp->isp_devparam[tgt].goal_offset =
2404 				    spi->sync_offset;
2405 			} else {
2406 				*dptr &= ~DPARM_SYNC;
2407 			}
2408 #endif
2409 			isp_prt(isp, ISP_LOGDEBUG0,
2410 			    "SET bus %d targ %d to flags %x off %x per %x",
2411 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2412 			    sdp->isp_devparam[tgt].goal_offset,
2413 			    sdp->isp_devparam[tgt].goal_period);
2414 			sdp->isp_devparam[tgt].dev_update = 1;
2415 			isp->isp_update |= (1 << bus);
2416 		}
2417 		ISPLOCK_2_CAMLOCK(isp);
2418 		ccb->ccb_h.status = CAM_REQ_CMP;
2419 		xpt_done(ccb);
2420 		break;
2421 	case XPT_GET_TRAN_SETTINGS:
2422 		cts = &ccb->cts;
2423 		tgt = cts->ccb_h.target_id;
2424 		CAMLOCK_2_ISPLOCK(isp);
2425 		if (IS_FC(isp)) {
2426 #ifndef	CAM_NEW_TRAN_CODE
2427 			/*
2428 			 * a lot of normal SCSI things don't make sense.
2429 			 */
2430 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2431 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2432 			/*
2433 			 * How do you measure the width of a high
2434 			 * speed serial bus? Well, in bytes.
2435 			 *
2436 			 * Offset and period make no sense, though, so we set
2437 			 * (above) a 'base' transfer speed to be gigabit.
2438 			 */
2439 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2440 #else
2441 			fcparam *fcp = isp->isp_param;
2442 			struct ccb_trans_settings_fc *fc =
2443 			    &cts->xport_specific.fc;
2444 
2445 			cts->protocol = PROTO_SCSI;
2446 			cts->protocol_version = SCSI_REV_2;
2447 			cts->transport = XPORT_FC;
2448 			cts->transport_version = 0;
2449 
2450 			fc->valid = CTS_FC_VALID_SPEED;
2451 			if (fcp->isp_gbspeed == 2)
2452 				fc->bitrate = 200000;
2453 			else
2454 				fc->bitrate = 100000;
2455 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2456 				struct lportdb *lp = &fcp->portdb[tgt];
2457 				fc->wwnn = lp->node_wwn;
2458 				fc->wwpn = lp->port_wwn;
2459 				fc->port = lp->portid;
2460 				fc->valid |= CTS_FC_VALID_WWNN |
2461 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2462 			}
2463 #endif
2464 		} else {
2465 #ifdef	CAM_NEW_TRAN_CODE
2466 			struct ccb_trans_settings_scsi *scsi =
2467 			    &cts->proto_specific.scsi;
2468 			struct ccb_trans_settings_spi *spi =
2469 			    &cts->xport_specific.spi;
2470 #endif
2471 			sdparam *sdp = isp->isp_param;
2472 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2473 			u_int16_t dval, pval, oval;
2474 
2475 			sdp += bus;
2476 
2477 			if (IS_CURRENT_SETTINGS(cts)) {
2478 				sdp->isp_devparam[tgt].dev_refresh = 1;
2479 				isp->isp_update |= (1 << bus);
2480 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2481 				    NULL);
2482 				dval = sdp->isp_devparam[tgt].actv_flags;
2483 				oval = sdp->isp_devparam[tgt].actv_offset;
2484 				pval = sdp->isp_devparam[tgt].actv_period;
2485 			} else {
2486 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2487 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2488 				pval = sdp->isp_devparam[tgt].nvrm_period;
2489 			}
2490 
2491 #ifndef	CAM_NEW_TRAN_CODE
2492 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2493 
2494 			if (dval & DPARM_DISC) {
2495 				cts->flags |= CCB_TRANS_DISC_ENB;
2496 			}
2497 			if (dval & DPARM_TQING) {
2498 				cts->flags |= CCB_TRANS_TAG_ENB;
2499 			}
2500 			if (dval & DPARM_WIDE) {
2501 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2502 			} else {
2503 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2504 			}
2505 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2506 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2507 
2508 			if ((dval & DPARM_SYNC) && oval != 0) {
2509 				cts->sync_period = pval;
2510 				cts->sync_offset = oval;
2511 				cts->valid |=
2512 				    CCB_TRANS_SYNC_RATE_VALID |
2513 				    CCB_TRANS_SYNC_OFFSET_VALID;
2514 			}
2515 #else
2516 			cts->protocol = PROTO_SCSI;
2517 			cts->protocol_version = SCSI_REV_2;
2518 			cts->transport = XPORT_SPI;
2519 			cts->transport_version = 2;
2520 
2521 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2522 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2523 			if (dval & DPARM_DISC) {
2524 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2525 			}
2526 			if (dval & DPARM_TQING) {
2527 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2528 			}
2529 			if ((dval & DPARM_SYNC) && oval && pval) {
2530 				spi->sync_offset = oval;
2531 				spi->sync_period = pval;
2532 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2533 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2534 			}
2535 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2536 			if (dval & DPARM_WIDE) {
2537 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2538 			} else {
2539 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2540 			}
2541 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2542 				scsi->valid = CTS_SCSI_VALID_TQ;
2543 				spi->valid |= CTS_SPI_VALID_DISC;
2544 			} else {
2545 				scsi->valid = 0;
2546 			}
2547 #endif
2548 			isp_prt(isp, ISP_LOGDEBUG0,
2549 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2550 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2551 			    bus, tgt, dval, oval, pval);
2552 		}
2553 		ISPLOCK_2_CAMLOCK(isp);
2554 		ccb->ccb_h.status = CAM_REQ_CMP;
2555 		xpt_done(ccb);
2556 		break;
2557 
2558 	case XPT_CALC_GEOMETRY:
2559 	{
2560 		struct ccb_calc_geometry *ccg;
2561 
2562 		ccg = &ccb->ccg;
2563 		if (ccg->block_size == 0) {
2564 			isp_prt(isp, ISP_LOGERR,
2565 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2566 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2567 			ccb->ccb_h.status = CAM_REQ_INVALID;
2568 			xpt_done(ccb);
2569 			break;
2570 		}
2571 		cam_calc_geometry(ccg, /*extended*/1);
2572 		xpt_done(ccb);
2573 		break;
2574 	}
2575 	case XPT_RESET_BUS:		/* Reset the specified bus */
2576 		bus = cam_sim_bus(sim);
2577 		CAMLOCK_2_ISPLOCK(isp);
2578 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2579 		ISPLOCK_2_CAMLOCK(isp);
2580 		if (error)
2581 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2582 		else {
2583 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2584 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2585 			else if (isp->isp_path != NULL)
2586 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2587 			ccb->ccb_h.status = CAM_REQ_CMP;
2588 		}
2589 		xpt_done(ccb);
2590 		break;
2591 
2592 	case XPT_TERM_IO:		/* Terminate the I/O process */
2593 		ccb->ccb_h.status = CAM_REQ_INVALID;
2594 		xpt_done(ccb);
2595 		break;
2596 
2597 	case XPT_PATH_INQ:		/* Path routing inquiry */
2598 	{
2599 		struct ccb_pathinq *cpi = &ccb->cpi;
2600 
2601 		cpi->version_num = 1;
2602 #ifdef	ISP_TARGET_MODE
2603 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2604 #else
2605 		cpi->target_sprt = 0;
2606 #endif
2607 		cpi->hba_eng_cnt = 0;
2608 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2609 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2610 		cpi->bus_id = cam_sim_bus(sim);
2611 		if (IS_FC(isp)) {
2612 			cpi->hba_misc = PIM_NOBUSRESET;
2613 			/*
2614 			 * Because our loop ID can shift from time to time,
2615 			 * make our initiator ID out of range of our bus.
2616 			 */
2617 			cpi->initiator_id = cpi->max_target + 1;
2618 
2619 			/*
2620 			 * Set base transfer capabilities for Fibre Channel.
2621 			 * Technically not correct because we don't know
2622 			 * what media we're running on top of- but we'll
2623 			 * look good if we always say 100MB/s.
2624 			 */
2625 			if (FCPARAM(isp)->isp_gbspeed == 2)
2626 				cpi->base_transfer_speed = 200000;
2627 			else
2628 				cpi->base_transfer_speed = 100000;
2629 			cpi->hba_inquiry = PI_TAG_ABLE;
2630 #ifdef	CAM_NEW_TRAN_CODE
2631 			cpi->transport = XPORT_FC;
2632 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2633 #endif
2634 		} else {
2635 			sdparam *sdp = isp->isp_param;
2636 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2637 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2638 			cpi->hba_misc = 0;
2639 			cpi->initiator_id = sdp->isp_initiator_id;
2640 			cpi->base_transfer_speed = 3300;
2641 #ifdef	CAM_NEW_TRAN_CODE
2642 			cpi->transport = XPORT_SPI;
2643 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2644 #endif
2645 		}
2646 #ifdef	CAM_NEW_TRAN_CODE
2647 		cpi->protocol = PROTO_SCSI;
2648 		cpi->protocol_version = SCSI_REV_2;
2649 #endif
2650 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2651 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2652 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2653 		cpi->unit_number = cam_sim_unit(sim);
2654 		cpi->ccb_h.status = CAM_REQ_CMP;
2655 		xpt_done(ccb);
2656 		break;
2657 	}
2658 	default:
2659 		ccb->ccb_h.status = CAM_REQ_INVALID;
2660 		xpt_done(ccb);
2661 		break;
2662 	}
2663 }
2664 
2665 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2666 void
2667 isp_done(struct ccb_scsiio *sccb)
2668 {
2669 	struct ispsoftc *isp = XS_ISP(sccb);
2670 
2671 	if (XS_NOERR(sccb))
2672 		XS_SETERR(sccb, CAM_REQ_CMP);
2673 
2674 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2675 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2676 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2677 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2678 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2679 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2680 		} else {
2681 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2682 		}
2683 	}
2684 
2685 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2686 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2687 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2688 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2689 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2690 			isp_prt(isp, ISP_LOGDEBUG0,
2691 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2692 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2693 			    sccb->ccb_h.status, sccb->scsi_status);
2694 		}
2695 	}
2696 
2697 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2698 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2699 		xpt_print_path(sccb->ccb_h.path);
2700 		isp_prt(isp, ISP_LOGINFO,
2701 		    "cam completion status 0x%x", sccb->ccb_h.status);
2702 	}
2703 
2704 	XS_CMD_S_DONE(sccb);
2705 	if (XS_CMD_WDOG_P(sccb) == 0) {
2706 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2707 		if (XS_CMD_GRACE_P(sccb)) {
2708 			isp_prt(isp, ISP_LOGDEBUG2,
2709 			    "finished command on borrowed time");
2710 		}
2711 		XS_CMD_S_CLEAR(sccb);
2712 		ISPLOCK_2_CAMLOCK(isp);
2713 		xpt_done((union ccb *) sccb);
2714 		CAMLOCK_2_ISPLOCK(isp);
2715 	}
2716 }
2717 
2718 int
2719 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2720 {
2721 	int bus, rv = 0;
2722 	switch (cmd) {
2723 	case ISPASYNC_NEW_TGT_PARAMS:
2724 	{
2725 #ifdef	CAM_NEW_TRAN_CODE
2726 		struct ccb_trans_settings_scsi *scsi;
2727 		struct ccb_trans_settings_spi *spi;
2728 #endif
2729 		int flags, tgt;
2730 		sdparam *sdp = isp->isp_param;
2731 		struct ccb_trans_settings cts;
2732 		struct cam_path *tmppath;
2733 
2734 		bzero(&cts, sizeof (struct ccb_trans_settings));
2735 
2736 		tgt = *((int *)arg);
2737 		bus = (tgt >> 16) & 0xffff;
2738 		tgt &= 0xffff;
2739 		sdp += bus;
2740 		ISPLOCK_2_CAMLOCK(isp);
2741 		if (xpt_create_path(&tmppath, NULL,
2742 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2743 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2744 			CAMLOCK_2_ISPLOCK(isp);
2745 			isp_prt(isp, ISP_LOGWARN,
2746 			    "isp_async cannot make temp path for %d.%d",
2747 			    tgt, bus);
2748 			rv = -1;
2749 			break;
2750 		}
2751 		CAMLOCK_2_ISPLOCK(isp);
2752 		flags = sdp->isp_devparam[tgt].actv_flags;
2753 #ifdef	CAM_NEW_TRAN_CODE
2754 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2755 		cts.protocol = PROTO_SCSI;
2756 		cts.transport = XPORT_SPI;
2757 
2758 		scsi = &cts.proto_specific.scsi;
2759 		spi = &cts.xport_specific.spi;
2760 
2761 		if (flags & DPARM_TQING) {
2762 			scsi->valid |= CTS_SCSI_VALID_TQ;
2763 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2764 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2765 		}
2766 
2767 		if (flags & DPARM_DISC) {
2768 			spi->valid |= CTS_SPI_VALID_DISC;
2769 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2770 		}
2771 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2772 		if (flags & DPARM_WIDE) {
2773 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2774 		} else {
2775 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2776 		}
2777 		if (flags & DPARM_SYNC) {
2778 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2779 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2780 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2781 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2782 		}
2783 #else
2784 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2785 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2786 		if (flags & DPARM_DISC) {
2787 			cts.flags |= CCB_TRANS_DISC_ENB;
2788 		}
2789 		if (flags & DPARM_TQING) {
2790 			cts.flags |= CCB_TRANS_TAG_ENB;
2791 		}
2792 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2793 		cts.bus_width = (flags & DPARM_WIDE)?
2794 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2795 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2796 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2797 		if (flags & DPARM_SYNC) {
2798 			cts.valid |=
2799 			    CCB_TRANS_SYNC_RATE_VALID |
2800 			    CCB_TRANS_SYNC_OFFSET_VALID;
2801 		}
2802 #endif
2803 		isp_prt(isp, ISP_LOGDEBUG2,
2804 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2805 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2806 		    sdp->isp_devparam[tgt].actv_offset, flags);
2807 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2808 		ISPLOCK_2_CAMLOCK(isp);
2809 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2810 		xpt_free_path(tmppath);
2811 		CAMLOCK_2_ISPLOCK(isp);
2812 		break;
2813 	}
2814 	case ISPASYNC_BUS_RESET:
2815 		bus = *((int *)arg);
2816 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2817 		    bus);
2818 		if (bus > 0 && isp->isp_path2) {
2819 			ISPLOCK_2_CAMLOCK(isp);
2820 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2821 			CAMLOCK_2_ISPLOCK(isp);
2822 		} else if (isp->isp_path) {
2823 			ISPLOCK_2_CAMLOCK(isp);
2824 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2825 			CAMLOCK_2_ISPLOCK(isp);
2826 		}
2827 		break;
2828 	case ISPASYNC_LIP:
2829 		if (isp->isp_path) {
2830 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2831 		}
2832 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2833 		break;
2834 	case ISPASYNC_LOOP_RESET:
2835 		if (isp->isp_path) {
2836 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2837 		}
2838 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2839 		break;
2840 	case ISPASYNC_LOOP_DOWN:
2841 		if (isp->isp_path) {
2842 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2843 		}
2844 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2845 		break;
2846 	case ISPASYNC_LOOP_UP:
2847 		/*
2848 		 * Now we just note that Loop has come up. We don't
2849 		 * actually do anything because we're waiting for a
2850 		 * Change Notify before activating the FC cleanup
2851 		 * thread to look at the state of the loop again.
2852 		 */
2853 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2854 		break;
2855 	case ISPASYNC_PROMENADE:
2856 	{
2857 		struct cam_path *tmppath;
2858 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2859 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2860 		static const char *roles[4] = {
2861 		    "(none)", "Target", "Initiator", "Target/Initiator"
2862 		};
2863 		fcparam *fcp = isp->isp_param;
2864 		int tgt = *((int *) arg);
2865 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2866 		struct lportdb *lp = &fcp->portdb[tgt];
2867 
2868 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2869 		    roles[lp->roles & 0x3],
2870 		    (lp->valid)? "Arrived" : "Departed",
2871 		    (u_int32_t) (lp->port_wwn >> 32),
2872 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2873 		    (u_int32_t) (lp->node_wwn >> 32),
2874 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2875 
2876 		ISPLOCK_2_CAMLOCK(isp);
2877 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2878 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2879 			CAMLOCK_2_ISPLOCK(isp);
2880                         break;
2881                 }
2882 		/*
2883 		 * Policy: only announce targets.
2884 		 */
2885 		if (lp->roles & is_tgt_mask) {
2886 			if (lp->valid) {
2887 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2888 			} else {
2889 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2890 			}
2891 		}
2892 		xpt_free_path(tmppath);
2893 		CAMLOCK_2_ISPLOCK(isp);
2894 		break;
2895 	}
2896 	case ISPASYNC_CHANGE_NOTIFY:
2897 		if (arg == ISPASYNC_CHANGE_PDB) {
2898 			isp_prt(isp, ISP_LOGINFO,
2899 			    "Port Database Changed");
2900 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2901 			isp_prt(isp, ISP_LOGINFO,
2902 			    "Name Server Database Changed");
2903 		}
2904 #ifdef	ISP_SMPLOCK
2905 		cv_signal(&isp->isp_osinfo.kthread_cv);
2906 #else
2907 		wakeup(&isp->isp_osinfo.kthread_cv);
2908 #endif
2909 		break;
2910 	case ISPASYNC_FABRIC_DEV:
2911 	{
2912 		int target, base, lim;
2913 		fcparam *fcp = isp->isp_param;
2914 		struct lportdb *lp = NULL;
2915 		struct lportdb *clp = (struct lportdb *) arg;
2916 		char *pt;
2917 
2918 		switch (clp->port_type) {
2919 		case 1:
2920 			pt = "   N_Port";
2921 			break;
2922 		case 2:
2923 			pt = "  NL_Port";
2924 			break;
2925 		case 3:
2926 			pt = "F/NL_Port";
2927 			break;
2928 		case 0x7f:
2929 			pt = "  Nx_Port";
2930 			break;
2931 		case 0x81:
2932 			pt = "  F_port";
2933 			break;
2934 		case 0x82:
2935 			pt = "  FL_Port";
2936 			break;
2937 		case 0x84:
2938 			pt = "   E_port";
2939 			break;
2940 		default:
2941 			pt = " ";
2942 			break;
2943 		}
2944 
2945 		isp_prt(isp, ISP_LOGINFO,
2946 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2947 
2948 		/*
2949 		 * If we don't have an initiator role we bail.
2950 		 *
2951 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2952 		 */
2953 
2954 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2955 			break;
2956 		}
2957 
2958 		/*
2959 		 * Is this entry for us? If so, we bail.
2960 		 */
2961 
2962 		if (fcp->isp_portid == clp->portid) {
2963 			break;
2964 		}
2965 
2966 		/*
2967 		 * Else, the default policy is to find room for it in
2968 		 * our local port database. Later, when we execute
2969 		 * the call to isp_pdb_sync either this newly arrived
2970 		 * or already logged in device will be (re)announced.
2971 		 */
2972 
2973 		if (fcp->isp_topo == TOPO_FL_PORT)
2974 			base = FC_SNS_ID+1;
2975 		else
2976 			base = 0;
2977 
2978 		if (fcp->isp_topo == TOPO_N_PORT)
2979 			lim = 1;
2980 		else
2981 			lim = MAX_FC_TARG;
2982 
2983 		/*
2984 		 * Is it already in our list?
2985 		 */
2986 		for (target = base; target < lim; target++) {
2987 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2988 				continue;
2989 			}
2990 			lp = &fcp->portdb[target];
2991 			if (lp->port_wwn == clp->port_wwn &&
2992 			    lp->node_wwn == clp->node_wwn) {
2993 				lp->fabric_dev = 1;
2994 				break;
2995 			}
2996 		}
2997 		if (target < lim) {
2998 			break;
2999 		}
3000 		for (target = base; target < lim; target++) {
3001 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
3002 				continue;
3003 			}
3004 			lp = &fcp->portdb[target];
3005 			if (lp->port_wwn == 0) {
3006 				break;
3007 			}
3008 		}
3009 		if (target == lim) {
3010 			isp_prt(isp, ISP_LOGWARN,
3011 			    "out of space for fabric devices");
3012 			break;
3013 		}
3014 		lp->port_type = clp->port_type;
3015 		lp->fc4_type = clp->fc4_type;
3016 		lp->node_wwn = clp->node_wwn;
3017 		lp->port_wwn = clp->port_wwn;
3018 		lp->portid = clp->portid;
3019 		lp->fabric_dev = 1;
3020 		break;
3021 	}
3022 #ifdef	ISP_TARGET_MODE
3023 	case ISPASYNC_TARGET_MESSAGE:
3024 	{
3025 		tmd_msg_t *mp = arg;
3026 		isp_prt(isp, ISP_LOGALL,
3027 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3028 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3029 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3030 		    mp->nt_msg[0]);
3031 		break;
3032 	}
3033 	case ISPASYNC_TARGET_EVENT:
3034 	{
3035 		tmd_event_t *ep = arg;
3036 		if (ep->ev_event == ASYNC_CTIO_DONE) {
3037 			/*
3038 			 * ACK the interrupt first
3039 			 */
3040 			ISP_WRITE(isp, BIU_SEMA, 0);
3041 			ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT);
3042 			isp_handle_platform_ctio_fastpost(isp, ep->ev_bus);
3043 			break;
3044 		}
3045 		isp_prt(isp, ISP_LOGALL,
3046 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3047 		break;
3048 	}
3049 	case ISPASYNC_TARGET_ACTION:
3050 		switch (((isphdr_t *)arg)->rqs_entry_type) {
3051 		default:
3052 			isp_prt(isp, ISP_LOGWARN,
3053 			   "event 0x%x for unhandled target action",
3054 			    ((isphdr_t *)arg)->rqs_entry_type);
3055 			break;
3056 		case RQSTYPE_NOTIFY:
3057 			if (IS_SCSI(isp)) {
3058 				rv = isp_handle_platform_notify_scsi(isp,
3059 				    (in_entry_t *) arg);
3060 			} else {
3061 				rv = isp_handle_platform_notify_fc(isp,
3062 				    (in_fcentry_t *) arg);
3063 			}
3064 			break;
3065 		case RQSTYPE_ATIO:
3066 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3067 			break;
3068 		case RQSTYPE_ATIO2:
3069 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3070 			break;
3071 		case RQSTYPE_CTIO2:
3072 		case RQSTYPE_CTIO:
3073 			rv = isp_handle_platform_ctio(isp, arg);
3074 			break;
3075 		case RQSTYPE_ENABLE_LUN:
3076 		case RQSTYPE_MODIFY_LUN:
3077 			isp_ledone(isp, (lun_entry_t *) arg);
3078 			break;
3079 		}
3080 		break;
3081 #endif
3082 	case ISPASYNC_FW_CRASH:
3083 	{
3084 		u_int16_t mbox1, mbox6;
3085 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3086 		if (IS_DUALBUS(isp)) {
3087 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3088 		} else {
3089 			mbox6 = 0;
3090 		}
3091                 isp_prt(isp, ISP_LOGERR,
3092                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3093                     mbox6, mbox1);
3094 #ifdef	ISP_FW_CRASH_DUMP
3095 		/*
3096 		 * XXX: really need a thread to do this right.
3097 		 */
3098 		if (IS_FC(isp)) {
3099 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3100 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3101 			isp_freeze_loopdown(isp, "f/w crash");
3102 			isp_fw_dump(isp);
3103 		}
3104 		isp_reinit(isp);
3105 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3106 #endif
3107 		break;
3108 	}
3109 	case ISPASYNC_UNHANDLED_RESPONSE:
3110 		break;
3111 	default:
3112 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3113 		break;
3114 	}
3115 	return (rv);
3116 }
3117 
3118 
3119 /*
3120  * Locks are held before coming here.
3121  */
3122 void
3123 isp_uninit(struct ispsoftc *isp)
3124 {
3125 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3126 	DISABLE_INTS(isp);
3127 }
3128 
3129 void
3130 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3131 {
3132 	va_list ap;
3133 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3134 		return;
3135 	}
3136 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3137 	va_start(ap, fmt);
3138 	vprintf(fmt, ap);
3139 	va_end(ap);
3140 	printf("\n");
3141 }
3142