xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
3  *
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice immediately at the beginning of the file, without modification,
11  *    this list of conditions, and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <dev/isp/isp_freebsd.h>
32 #include <sys/unistd.h>
33 #include <sys/kthread.h>
34 #include <machine/stdarg.h>	/* for use by isp_prt below */
35 #include <sys/conf.h>
36 #include <sys/module.h>
37 #include <sys/ioccom.h>
38 #include <dev/isp/isp_ioctl.h>
39 
40 
41 MODULE_VERSION(isp, 1);
42 MODULE_DEPEND(isp, cam, 1, 1, 1);
43 int isp_announced = 0;
44 ispfwfunc *isp_get_firmware_p = NULL;
45 
46 static d_ioctl_t ispioctl;
47 static void isp_intr_enable(void *);
48 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
49 static void isp_poll(struct cam_sim *);
50 static timeout_t isp_watchdog;
51 static void isp_kthread(void *);
52 static void isp_action(struct cam_sim *, union ccb *);
53 
54 
55 static struct cdevsw isp_cdevsw = {
56 	.d_version =	D_VERSION,
57 	.d_flags =	D_NEEDGIANT,
58 	.d_ioctl =	ispioctl,
59 	.d_name =	"isp",
60 };
61 
62 static struct ispsoftc *isplist = NULL;
63 
64 void
65 isp_attach(struct ispsoftc *isp)
66 {
67 	int primary, secondary;
68 	struct ccb_setasync csa;
69 	struct cam_devq *devq;
70 	struct cam_sim *sim;
71 	struct cam_path *path;
72 
73 	/*
74 	 * Establish (in case of 12X0) which bus is the primary.
75 	 */
76 
77 	primary = 0;
78 	secondary = 1;
79 
80 	/*
81 	 * Create the device queue for our SIM(s).
82 	 */
83 	devq = cam_simq_alloc(isp->isp_maxcmds);
84 	if (devq == NULL) {
85 		return;
86 	}
87 
88 	/*
89 	 * Construct our SIM entry.
90 	 */
91 	ISPLOCK_2_CAMLOCK(isp);
92 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
93 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
94 	if (sim == NULL) {
95 		cam_simq_free(devq);
96 		CAMLOCK_2_ISPLOCK(isp);
97 		return;
98 	}
99 	CAMLOCK_2_ISPLOCK(isp);
100 
101 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
102 	isp->isp_osinfo.ehook.ich_arg = isp;
103 	ISPLOCK_2_CAMLOCK(isp);
104 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
105 		cam_sim_free(sim, TRUE);
106 		CAMLOCK_2_ISPLOCK(isp);
107 		isp_prt(isp, ISP_LOGERR,
108 		    "could not establish interrupt enable hook");
109 		return;
110 	}
111 
112 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
113 		cam_sim_free(sim, TRUE);
114 		CAMLOCK_2_ISPLOCK(isp);
115 		return;
116 	}
117 
118 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
119 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
120 		xpt_bus_deregister(cam_sim_path(sim));
121 		cam_sim_free(sim, TRUE);
122 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
123 		CAMLOCK_2_ISPLOCK(isp);
124 		return;
125 	}
126 
127 	xpt_setup_ccb(&csa.ccb_h, path, 5);
128 	csa.ccb_h.func_code = XPT_SASYNC_CB;
129 	csa.event_enable = AC_LOST_DEVICE;
130 	csa.callback = isp_cam_async;
131 	csa.callback_arg = sim;
132 	xpt_action((union ccb *)&csa);
133 	CAMLOCK_2_ISPLOCK(isp);
134 	isp->isp_sim = sim;
135 	isp->isp_path = path;
136 	/*
137 	 * Create a kernel thread for fibre channel instances. We
138 	 * don't have dual channel FC cards.
139 	 */
140 	if (IS_FC(isp)) {
141 		ISPLOCK_2_CAMLOCK(isp);
142 		/* XXX: LOCK VIOLATION */
143 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
144 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
145 		    RFHIGHPID, 0, "%s: fc_thrd",
146 		    device_get_nameunit(isp->isp_dev))) {
147 			xpt_bus_deregister(cam_sim_path(sim));
148 			cam_sim_free(sim, TRUE);
149 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
150 			CAMLOCK_2_ISPLOCK(isp);
151 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
152 			return;
153 		}
154 		CAMLOCK_2_ISPLOCK(isp);
155 	}
156 
157 
158 	/*
159 	 * If we have a second channel, construct SIM entry for that.
160 	 */
161 	if (IS_DUALBUS(isp)) {
162 		ISPLOCK_2_CAMLOCK(isp);
163 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
164 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
165 		if (sim == NULL) {
166 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
167 			xpt_free_path(isp->isp_path);
168 			cam_simq_free(devq);
169 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
170 			return;
171 		}
172 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
173 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
174 			xpt_free_path(isp->isp_path);
175 			cam_sim_free(sim, TRUE);
176 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
177 			CAMLOCK_2_ISPLOCK(isp);
178 			return;
179 		}
180 
181 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
182 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
183 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
184 			xpt_free_path(isp->isp_path);
185 			xpt_bus_deregister(cam_sim_path(sim));
186 			cam_sim_free(sim, TRUE);
187 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
188 			CAMLOCK_2_ISPLOCK(isp);
189 			return;
190 		}
191 
192 		xpt_setup_ccb(&csa.ccb_h, path, 5);
193 		csa.ccb_h.func_code = XPT_SASYNC_CB;
194 		csa.event_enable = AC_LOST_DEVICE;
195 		csa.callback = isp_cam_async;
196 		csa.callback_arg = sim;
197 		xpt_action((union ccb *)&csa);
198 		CAMLOCK_2_ISPLOCK(isp);
199 		isp->isp_sim2 = sim;
200 		isp->isp_path2 = path;
201 	}
202 
203 	/*
204 	 * Create device nodes
205 	 */
206 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
207 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
208 
209 	if (isp->isp_role != ISP_ROLE_NONE) {
210 		isp->isp_state = ISP_RUNSTATE;
211 		ENABLE_INTS(isp);
212 	}
213 	if (isplist == NULL) {
214 		isplist = isp;
215 	} else {
216 		struct ispsoftc *tmp = isplist;
217 		while (tmp->isp_osinfo.next) {
218 			tmp = tmp->isp_osinfo.next;
219 		}
220 		tmp->isp_osinfo.next = isp;
221 	}
222 
223 }
224 
225 static INLINE void
226 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
227 {
228 	if (isp->isp_osinfo.simqfrozen == 0) {
229 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
230 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
231 		ISPLOCK_2_CAMLOCK(isp);
232 		xpt_freeze_simq(isp->isp_sim, 1);
233 		CAMLOCK_2_ISPLOCK(isp);
234 	} else {
235 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
236 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
237 	}
238 }
239 
240 static int
241 ispioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
242 {
243 	struct ispsoftc *isp;
244 	int nr, retval = ENOTTY;
245 
246 	isp = isplist;
247 	while (isp) {
248 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
249 			break;
250 		}
251 		isp = isp->isp_osinfo.next;
252 	}
253 	if (isp == NULL)
254 		return (ENXIO);
255 
256 	switch (cmd) {
257 #ifdef	ISP_FW_CRASH_DUMP
258 	case ISP_GET_FW_CRASH_DUMP:
259 	{
260 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
261 		size_t sz;
262 
263 		retval = 0;
264 		if (IS_2200(isp))
265 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
266 		else
267 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
268 		ISP_LOCK(isp);
269 		if (ptr && *ptr) {
270 			void *uaddr = *((void **) addr);
271 			if (copyout(ptr, uaddr, sz)) {
272 				retval = EFAULT;
273 			} else {
274 				*ptr = 0;
275 			}
276 		} else {
277 			retval = ENXIO;
278 		}
279 		ISP_UNLOCK(isp);
280 		break;
281 	}
282 
283 	case ISP_FORCE_CRASH_DUMP:
284 		ISP_LOCK(isp);
285 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
286 		isp_fw_dump(isp);
287 		isp_reinit(isp);
288 		ISP_UNLOCK(isp);
289 		retval = 0;
290 		break;
291 #endif
292 	case ISP_SDBLEV:
293 	{
294 		int olddblev = isp->isp_dblev;
295 		isp->isp_dblev = *(int *)addr;
296 		*(int *)addr = olddblev;
297 		retval = 0;
298 		break;
299 	}
300 	case ISP_GETROLE:
301 		*(int *)addr = isp->isp_role;
302 		retval = 0;
303 		break;
304 	case ISP_SETROLE:
305 		nr = *(int *)addr;
306 		if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
307 			retval = EINVAL;
308 			break;
309 		}
310 		*(int *)addr = isp->isp_role;
311 		isp->isp_role = nr;
312 		/* FALLTHROUGH */
313 	case ISP_RESETHBA:
314 		ISP_LOCK(isp);
315 		isp_reinit(isp);
316 		ISP_UNLOCK(isp);
317 		retval = 0;
318 		break;
319 	case ISP_RESCAN:
320 		if (IS_FC(isp)) {
321 			ISP_LOCK(isp);
322 			if (isp_fc_runstate(isp, 5 * 1000000)) {
323 				retval = EIO;
324 			} else {
325 				retval = 0;
326 			}
327 			ISP_UNLOCK(isp);
328 		}
329 		break;
330 	case ISP_FC_LIP:
331 		if (IS_FC(isp)) {
332 			ISP_LOCK(isp);
333 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
334 				retval = EIO;
335 			} else {
336 				retval = 0;
337 			}
338 			ISP_UNLOCK(isp);
339 		}
340 		break;
341 	case ISP_FC_GETDINFO:
342 	{
343 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
344 		struct lportdb *lp;
345 
346 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
347 			retval = EINVAL;
348 			break;
349 		}
350 		ISP_LOCK(isp);
351 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
352 		if (lp->valid) {
353 			ifc->loopid = lp->loopid;
354 			ifc->portid = lp->portid;
355 			ifc->node_wwn = lp->node_wwn;
356 			ifc->port_wwn = lp->port_wwn;
357 			retval = 0;
358 		} else {
359 			retval = ENODEV;
360 		}
361 		ISP_UNLOCK(isp);
362 		break;
363 	}
364 	case ISP_GET_STATS:
365 	{
366 		isp_stats_t *sp = (isp_stats_t *) addr;
367 
368 		MEMZERO(sp, sizeof (*sp));
369 		sp->isp_stat_version = ISP_STATS_VERSION;
370 		sp->isp_type = isp->isp_type;
371 		sp->isp_revision = isp->isp_revision;
372 		ISP_LOCK(isp);
373 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
374 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
375 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
376 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
377 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
378 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
379 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
380 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
381 		ISP_UNLOCK(isp);
382 		retval = 0;
383 		break;
384 	}
385 	case ISP_CLR_STATS:
386 		ISP_LOCK(isp);
387 		isp->isp_intcnt = 0;
388 		isp->isp_intbogus = 0;
389 		isp->isp_intmboxc = 0;
390 		isp->isp_intoasync = 0;
391 		isp->isp_rsltccmplt = 0;
392 		isp->isp_fphccmplt = 0;
393 		isp->isp_rscchiwater = 0;
394 		isp->isp_fpcchiwater = 0;
395 		ISP_UNLOCK(isp);
396 		retval = 0;
397 		break;
398 	case ISP_FC_GETHINFO:
399 	{
400 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
401 		MEMZERO(hba, sizeof (*hba));
402 		ISP_LOCK(isp);
403 		hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
404 		hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
405 		hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
406 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
407 		hba->fc_scsi_supported = 1;
408 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
409 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
410 		hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
411 		hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
412 		hba->active_node_wwn = ISP_NODEWWN(isp);
413 		hba->active_port_wwn = ISP_PORTWWN(isp);
414 		ISP_UNLOCK(isp);
415 		retval = 0;
416 		break;
417 	}
418 	case ISP_GET_FC_PARAM:
419 	{
420 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
421 
422 		if (!IS_FC(isp)) {
423 			retval = EINVAL;
424 			break;
425 		}
426 		f->parameter = 0;
427 		if (strcmp(f->param_name, "framelength") == 0) {
428 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
429 			retval = 0;
430 			break;
431 		}
432 		if (strcmp(f->param_name, "exec_throttle") == 0) {
433 			f->parameter = FCPARAM(isp)->isp_execthrottle;
434 			retval = 0;
435 			break;
436 		}
437 		if (strcmp(f->param_name, "fullduplex") == 0) {
438 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
439 				f->parameter = 1;
440 			retval = 0;
441 			break;
442 		}
443 		if (strcmp(f->param_name, "loopid") == 0) {
444 			f->parameter = FCPARAM(isp)->isp_loopid;
445 			retval = 0;
446 			break;
447 		}
448 		retval = EINVAL;
449 		break;
450 	}
451 	case ISP_SET_FC_PARAM:
452 	{
453 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
454 		u_int32_t param = f->parameter;
455 
456 		if (!IS_FC(isp)) {
457 			retval = EINVAL;
458 			break;
459 		}
460 		f->parameter = 0;
461 		if (strcmp(f->param_name, "framelength") == 0) {
462 			if (param != 512 && param != 1024 && param != 1024) {
463 				retval = EINVAL;
464 				break;
465 			}
466 			FCPARAM(isp)->isp_maxfrmlen = param;
467 			retval = 0;
468 			break;
469 		}
470 		if (strcmp(f->param_name, "exec_throttle") == 0) {
471 			if (param < 16 || param > 255) {
472 				retval = EINVAL;
473 				break;
474 			}
475 			FCPARAM(isp)->isp_execthrottle = param;
476 			retval = 0;
477 			break;
478 		}
479 		if (strcmp(f->param_name, "fullduplex") == 0) {
480 			if (param != 0 && param != 1) {
481 				retval = EINVAL;
482 				break;
483 			}
484 			if (param) {
485 				FCPARAM(isp)->isp_fwoptions |=
486 				    ICBOPT_FULL_DUPLEX;
487 			} else {
488 				FCPARAM(isp)->isp_fwoptions &=
489 				    ~ICBOPT_FULL_DUPLEX;
490 			}
491 			retval = 0;
492 			break;
493 		}
494 		if (strcmp(f->param_name, "loopid") == 0) {
495 			if (param < 0 || param > 125) {
496 				retval = EINVAL;
497 				break;
498 			}
499 			FCPARAM(isp)->isp_loopid = param;
500 			retval = 0;
501 			break;
502 		}
503 		retval = EINVAL;
504 		break;
505 	}
506 	default:
507 		break;
508 	}
509 	return (retval);
510 }
511 
512 static void
513 isp_intr_enable(void *arg)
514 {
515 	struct ispsoftc *isp = arg;
516 	if (isp->isp_role != ISP_ROLE_NONE) {
517 		ENABLE_INTS(isp);
518 #if	0
519 		isp->isp_osinfo.intsok = 1;
520 #endif
521 	}
522 	/* Release our hook so that the boot can continue. */
523 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
524 }
525 
526 /*
527  * Put the target mode functions here, because some are inlines
528  */
529 
530 #ifdef	ISP_TARGET_MODE
531 
532 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
533 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
534 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
535 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
536 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
537 static cam_status
538 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
539 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
540 static int isp_en_lun(struct ispsoftc *, union ccb *);
541 static void isp_ledone(struct ispsoftc *, lun_entry_t *);
542 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
543 static timeout_t isp_refire_putback_atio;
544 static void isp_complete_ctio(union ccb *);
545 static void isp_target_putback_atio(union ccb *);
546 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
547 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
548 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
549 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
550 static void isp_handle_platform_ctio_fastpost(struct ispsoftc *, u_int32_t);
551 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
552 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
553 
554 static INLINE int
555 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
556 {
557 	tstate_t *tptr;
558 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
559 	if (tptr == NULL) {
560 		return (0);
561 	}
562 	do {
563 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
564 			return (1);
565 		}
566 	} while ((tptr = tptr->next) != NULL);
567 	return (0);
568 }
569 
570 static INLINE int
571 are_any_luns_enabled(struct ispsoftc *isp, int port)
572 {
573 	int lo, hi;
574 	if (IS_DUALBUS(isp)) {
575 		lo = (port * (LUN_HASH_SIZE >> 1));
576 		hi = lo + (LUN_HASH_SIZE >> 1);
577 	} else {
578 		lo = 0;
579 		hi = LUN_HASH_SIZE;
580 	}
581 	for (lo = 0; lo < hi; lo++) {
582 		if (isp->isp_osinfo.lun_hash[lo]) {
583 			return (1);
584 		}
585 	}
586 	return (0);
587 }
588 
589 static INLINE tstate_t *
590 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
591 {
592 	tstate_t *tptr = NULL;
593 
594 	if (lun == CAM_LUN_WILDCARD) {
595 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
596 			tptr = &isp->isp_osinfo.tsdflt[bus];
597 			tptr->hold++;
598 			return (tptr);
599 		}
600 		return (NULL);
601 	} else {
602 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
603 		if (tptr == NULL) {
604 			return (NULL);
605 		}
606 	}
607 
608 	do {
609 		if (tptr->lun == lun && tptr->bus == bus) {
610 			tptr->hold++;
611 			return (tptr);
612 		}
613 	} while ((tptr = tptr->next) != NULL);
614 	return (tptr);
615 }
616 
617 static INLINE void
618 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
619 {
620 	if (tptr->hold)
621 		tptr->hold--;
622 }
623 
624 static INLINE atio_private_data_t *
625 isp_get_atpd(struct ispsoftc *isp, int tag)
626 {
627 	atio_private_data_t *atp;
628 	for (atp = isp->isp_osinfo.atpdp;
629 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
630 		if (atp->tag == tag)
631 			return (atp);
632 	}
633 	return (NULL);
634 }
635 
636 static cam_status
637 create_lun_state(struct ispsoftc *isp, int bus,
638     struct cam_path *path, tstate_t **rslt)
639 {
640 	cam_status status;
641 	lun_id_t lun;
642 	int hfx;
643 	tstate_t *tptr, *new;
644 
645 	lun = xpt_path_lun_id(path);
646 	if (lun < 0) {
647 		return (CAM_LUN_INVALID);
648 	}
649 	if (is_lun_enabled(isp, bus, lun)) {
650 		return (CAM_LUN_ALRDY_ENA);
651 	}
652 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
653 	if (new == NULL) {
654 		return (CAM_RESRC_UNAVAIL);
655 	}
656 
657 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
658 	    xpt_path_target_id(path), xpt_path_lun_id(path));
659 	if (status != CAM_REQ_CMP) {
660 		free(new, M_DEVBUF);
661 		return (status);
662 	}
663 	new->bus = bus;
664 	new->lun = lun;
665 	SLIST_INIT(&new->atios);
666 	SLIST_INIT(&new->inots);
667 	new->hold = 1;
668 
669 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
670 	tptr = isp->isp_osinfo.lun_hash[hfx];
671 	if (tptr == NULL) {
672 		isp->isp_osinfo.lun_hash[hfx] = new;
673 	} else {
674 		while (tptr->next)
675 			tptr = tptr->next;
676 		tptr->next = new;
677 	}
678 	*rslt = new;
679 	return (CAM_REQ_CMP);
680 }
681 
682 static INLINE void
683 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
684 {
685 	int hfx;
686 	tstate_t *lw, *pw;
687 
688 	if (tptr->hold) {
689 		return;
690 	}
691 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
692 	pw = isp->isp_osinfo.lun_hash[hfx];
693 	if (pw == NULL) {
694 		return;
695 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
696 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
697 	} else {
698 		lw = pw;
699 		pw = lw->next;
700 		while (pw) {
701 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
702 				lw->next = pw->next;
703 				break;
704 			}
705 			lw = pw;
706 			pw = pw->next;
707 		}
708 		if (pw == NULL) {
709 			return;
710 		}
711 	}
712 	free(tptr, M_DEVBUF);
713 }
714 
715 /*
716  * Enable luns.
717  */
718 static int
719 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
720 {
721 	struct ccb_en_lun *cel = &ccb->cel;
722 	tstate_t *tptr;
723 	u_int32_t seq;
724 	int bus, cmd, av, wildcard, tm_on;
725 	lun_id_t lun;
726 	target_id_t tgt;
727 
728 	bus = XS_CHANNEL(ccb);
729 	if (bus > 1) {
730 		xpt_print_path(ccb->ccb_h.path);
731 		printf("illegal bus %d\n", bus);
732 		ccb->ccb_h.status = CAM_PATH_INVALID;
733 		return (-1);
734 	}
735 	tgt = ccb->ccb_h.target_id;
736 	lun = ccb->ccb_h.target_lun;
737 
738 	isp_prt(isp, ISP_LOGTDEBUG0,
739 	    "isp_en_lun: %sabling lun 0x%x on channel %d",
740 	    cel->enable? "en" : "dis", lun, bus);
741 
742 
743 	if ((lun != CAM_LUN_WILDCARD) &&
744 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
745 		ccb->ccb_h.status = CAM_LUN_INVALID;
746 		return (-1);
747 	}
748 
749 	if (IS_SCSI(isp)) {
750 		sdparam *sdp = isp->isp_param;
751 		sdp += bus;
752 		if (tgt != CAM_TARGET_WILDCARD &&
753 		    tgt != sdp->isp_initiator_id) {
754 			ccb->ccb_h.status = CAM_TID_INVALID;
755 			return (-1);
756 		}
757 	} else {
758 		/*
759 		 * There's really no point in doing this yet w/o multi-tid
760 		 * capability. Even then, it's problematic.
761 		 */
762 #if	0
763 		if (tgt != CAM_TARGET_WILDCARD &&
764 		    tgt != FCPARAM(isp)->isp_iid) {
765 			ccb->ccb_h.status = CAM_TID_INVALID;
766 			return (-1);
767 		}
768 #endif
769 		/*
770 		 * This is as a good a place as any to check f/w capabilities.
771 		 */
772 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
773 			isp_prt(isp, ISP_LOGERR,
774 			    "firmware does not support target mode");
775 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
776 			return (-1);
777 		}
778 		/*
779 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
780 		 * XXX: dorks with our already fragile enable/disable code.
781 		 */
782 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
783 			isp_prt(isp, ISP_LOGERR,
784 			    "firmware not SCCLUN capable");
785 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
786 			return (-1);
787 		}
788 	}
789 
790 	if (tgt == CAM_TARGET_WILDCARD) {
791 		if (lun == CAM_LUN_WILDCARD) {
792 			wildcard = 1;
793 		} else {
794 			ccb->ccb_h.status = CAM_LUN_INVALID;
795 			return (-1);
796 		}
797 	} else {
798 		wildcard = 0;
799 	}
800 
801 	tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0;
802 
803 	/*
804 	 * Next check to see whether this is a target/lun wildcard action.
805 	 *
806 	 * If so, we know that we can accept commands for luns that haven't
807 	 * been enabled yet and send them upstream. Otherwise, we have to
808 	 * handle them locally (if we see them at all).
809 	 */
810 
811 	if (wildcard) {
812 		tptr = &isp->isp_osinfo.tsdflt[bus];
813 		if (cel->enable) {
814 			if (tm_on) {
815 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
816 				return (-1);
817 			}
818 			ccb->ccb_h.status =
819 			    xpt_create_path(&tptr->owner, NULL,
820 			    xpt_path_path_id(ccb->ccb_h.path),
821 			    xpt_path_target_id(ccb->ccb_h.path),
822 			    xpt_path_lun_id(ccb->ccb_h.path));
823 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
824 				return (-1);
825 			}
826 			SLIST_INIT(&tptr->atios);
827 			SLIST_INIT(&tptr->inots);
828 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
829 		} else {
830 			if (tm_on == 0) {
831 				ccb->ccb_h.status = CAM_REQ_CMP;
832 				return (-1);
833 			}
834 			if (tptr->hold) {
835 				ccb->ccb_h.status = CAM_SCSI_BUSY;
836 				return (-1);
837 			}
838 			xpt_free_path(tptr->owner);
839 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
840 		}
841 	}
842 
843 	/*
844 	 * Now check to see whether this bus needs to be
845 	 * enabled/disabled with respect to target mode.
846 	 */
847 	av = bus << 31;
848 	if (cel->enable && tm_on == 0) {
849 		av |= ENABLE_TARGET_FLAG;
850 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
851 		if (av) {
852 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
853 			if (wildcard) {
854 				isp->isp_osinfo.tmflags[bus] &=
855 				    ~TM_WILDCARD_ENABLED;
856 				xpt_free_path(tptr->owner);
857 			}
858 			return (-1);
859 		}
860 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
861 		isp_prt(isp, ISP_LOGINFO,
862 		    "Target Mode enabled on channel %d", bus);
863 	} else if (cel->enable == 0 && tm_on && wildcard) {
864 		if (are_any_luns_enabled(isp, bus)) {
865 			ccb->ccb_h.status = CAM_SCSI_BUSY;
866 			return (-1);
867 		}
868 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
869 		if (av) {
870 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
871 			return (-1);
872 		}
873 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
874 		isp_prt(isp, ISP_LOGINFO,
875 		    "Target Mode disabled on channel %d", bus);
876 	}
877 
878 	if (wildcard) {
879 		ccb->ccb_h.status = CAM_REQ_CMP;
880 		return (-1);
881 	}
882 
883 	/*
884 	 * Find an empty slot
885 	 */
886 	for (seq = 0; seq < NLEACT; seq++) {
887 		if (isp->isp_osinfo.leact[seq] == 0) {
888 			break;
889 		}
890 	}
891 	if (seq >= NLEACT) {
892 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
893 		return (-1);
894 
895 	}
896 	isp->isp_osinfo.leact[seq] = ccb;
897 
898 	if (cel->enable) {
899 		ccb->ccb_h.status =
900 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
901 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
902 			isp->isp_osinfo.leact[seq] = 0;
903 			return (-1);
904 		}
905 	} else {
906 		tptr = get_lun_statep(isp, bus, lun);
907 		if (tptr == NULL) {
908 			ccb->ccb_h.status = CAM_LUN_INVALID;
909 			return (-1);
910 		}
911 	}
912 
913 	if (cel->enable) {
914 		int c, n, ulun = lun;
915 
916 		cmd = RQSTYPE_ENABLE_LUN;
917 		c = DFLT_CMND_CNT;
918 		n = DFLT_INOT_CNT;
919 		if (IS_FC(isp) && lun != 0) {
920 			cmd = RQSTYPE_MODIFY_LUN;
921 			n = 0;
922 			/*
923 		 	 * For SCC firmware, we only deal with setting
924 			 * (enabling or modifying) lun 0.
925 			 */
926 			ulun = 0;
927 		}
928 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
929 			rls_lun_statep(isp, tptr);
930 			ccb->ccb_h.status = CAM_REQ_INPROG;
931 			return (seq);
932 		}
933 	} else {
934 		int c, n, ulun = lun;
935 
936 		cmd = -RQSTYPE_MODIFY_LUN;
937 		c = DFLT_CMND_CNT;
938 		n = DFLT_INOT_CNT;
939 		if (IS_FC(isp) && lun != 0) {
940 			n = 0;
941 			/*
942 		 	 * For SCC firmware, we only deal with setting
943 			 * (enabling or modifying) lun 0.
944 			 */
945 			ulun = 0;
946 		}
947 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
948 			rls_lun_statep(isp, tptr);
949 			ccb->ccb_h.status = CAM_REQ_INPROG;
950 			return (seq);
951 		}
952 	}
953 	rls_lun_statep(isp, tptr);
954 	xpt_print_path(ccb->ccb_h.path);
955 	printf("isp_lun_cmd failed\n");
956 	isp->isp_osinfo.leact[seq] = 0;
957 	ccb->ccb_h.status = CAM_REQ_CMP_ERR;
958 	return (-1);
959 }
960 
961 static void
962 isp_ledone(struct ispsoftc *isp, lun_entry_t *lep)
963 {
964 	const char lfmt[] = "lun %d now %sabled for target mode on channel %d";
965 	union ccb *ccb;
966 	u_int32_t seq;
967 	tstate_t *tptr;
968 	int av;
969 	struct ccb_en_lun *cel;
970 
971 	seq = lep->le_reserved - 1;
972 	if (seq >= NLEACT) {
973 		isp_prt(isp, ISP_LOGERR,
974 		    "seq out of range (%u) in isp_ledone", seq);
975 		return;
976 	}
977 	ccb = isp->isp_osinfo.leact[seq];
978 	if (ccb == 0) {
979 		isp_prt(isp, ISP_LOGERR,
980 		    "no ccb for seq %u in isp_ledone", seq);
981 		return;
982 	}
983 	cel = &ccb->cel;
984 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb));
985 	if (tptr == NULL) {
986 		xpt_print_path(ccb->ccb_h.path);
987 		printf("null tptr in isp_ledone\n");
988 		isp->isp_osinfo.leact[seq] = 0;
989 		return;
990 	}
991 
992 	if (lep->le_status != LUN_OK) {
993 		xpt_print_path(ccb->ccb_h.path);
994 		printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status);
995 err:
996 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
997 		xpt_print_path(ccb->ccb_h.path);
998 		rls_lun_statep(isp, tptr);
999 		isp->isp_osinfo.leact[seq] = 0;
1000 		ISPLOCK_2_CAMLOCK(isp);
1001 		xpt_done(ccb);
1002 		CAMLOCK_2_ISPLOCK(isp);
1003 		return;
1004 	} else {
1005 		isp_prt(isp, ISP_LOGTDEBUG0,
1006 		    "isp_ledone: ENABLE/MODIFY done okay");
1007 	}
1008 
1009 
1010 	if (cel->enable) {
1011 		ccb->ccb_h.status = CAM_REQ_CMP;
1012 		isp_prt(isp, /* ISP_LOGINFO */ ISP_LOGALL, lfmt,
1013 		    XS_LUN(ccb), "en", XS_CHANNEL(ccb));
1014 		rls_lun_statep(isp, tptr);
1015 		isp->isp_osinfo.leact[seq] = 0;
1016 		ISPLOCK_2_CAMLOCK(isp);
1017 		xpt_done(ccb);
1018 		CAMLOCK_2_ISPLOCK(isp);
1019 		return;
1020 	}
1021 
1022 	if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) {
1023 		if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb),
1024 		    XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) {
1025 			xpt_print_path(ccb->ccb_h.path);
1026 			printf("isp_ledone: isp_lun_cmd failed\n");
1027 			goto err;
1028 		}
1029 		rls_lun_statep(isp, tptr);
1030 		return;
1031 	}
1032 
1033 	isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb));
1034 	rls_lun_statep(isp, tptr);
1035 	destroy_lun_state(isp, tptr);
1036 	ccb->ccb_h.status = CAM_REQ_CMP;
1037 	isp->isp_osinfo.leact[seq] = 0;
1038 	ISPLOCK_2_CAMLOCK(isp);
1039 	xpt_done(ccb);
1040 	CAMLOCK_2_ISPLOCK(isp);
1041 	if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) {
1042 		int bus = XS_CHANNEL(ccb);
1043 		av = bus << 31;
1044 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1045 		if (av) {
1046 			isp_prt(isp, ISP_LOGWARN,
1047 			    "disable target mode on channel %d failed", bus);
1048 		} else {
1049 			isp_prt(isp, ISP_LOGINFO,
1050 			    "Target Mode disabled on channel %d", bus);
1051 		}
1052 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1053 	}
1054 }
1055 
1056 
1057 static cam_status
1058 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1059 {
1060 	tstate_t *tptr;
1061 	struct ccb_hdr_slist *lp;
1062 	struct ccb_hdr *curelm;
1063 	int found, *ctr;
1064 	union ccb *accb = ccb->cab.abort_ccb;
1065 
1066 	isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb);
1067 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1068 		int badpath = 0;
1069 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1070 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1071 			badpath = 1;
1072 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1073 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1074 			badpath = 1;
1075 		}
1076 		if (badpath) {
1077 			/*
1078 			 * Being restrictive about target ids is really about
1079 			 * making sure we're aborting for the right multi-tid
1080 			 * path. This doesn't really make much sense at present.
1081 			 */
1082 #if	0
1083 			return (CAM_PATH_INVALID);
1084 #endif
1085 		}
1086 	}
1087 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1088 	if (tptr == NULL) {
1089 		isp_prt(isp, ISP_LOGTDEBUG0,
1090 		    "isp_abort_tgt_ccb: can't get statep");
1091 		return (CAM_PATH_INVALID);
1092 	}
1093 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1094 		lp = &tptr->atios;
1095 		ctr = &tptr->atio_count;
1096 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1097 		lp = &tptr->inots;
1098 		ctr = &tptr->inot_count;
1099 	} else {
1100 		rls_lun_statep(isp, tptr);
1101 		isp_prt(isp, ISP_LOGTDEBUG0,
1102 		    "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code);
1103 		return (CAM_UA_ABORT);
1104 	}
1105 	curelm = SLIST_FIRST(lp);
1106 	found = 0;
1107 	if (curelm == &accb->ccb_h) {
1108 		found = 1;
1109 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1110 	} else {
1111 		while(curelm != NULL) {
1112 			struct ccb_hdr *nextelm;
1113 
1114 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1115 			if (nextelm == &accb->ccb_h) {
1116 				found = 1;
1117 				SLIST_NEXT(curelm, sim_links.sle) =
1118 				    SLIST_NEXT(nextelm, sim_links.sle);
1119 				break;
1120 			}
1121 			curelm = nextelm;
1122 		}
1123 	}
1124 	rls_lun_statep(isp, tptr);
1125 	if (found) {
1126 		*ctr--;
1127 		accb->ccb_h.status = CAM_REQ_ABORTED;
1128 		xpt_done(accb);
1129 		return (CAM_REQ_CMP);
1130 	}
1131 	isp_prt(isp, ISP_LOGTDEBUG0,
1132 	    "isp_abort_tgt_ccb: CCB %p not found\n", ccb);
1133 	return (CAM_PATH_INVALID);
1134 }
1135 
1136 static cam_status
1137 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1138 {
1139 	void *qe;
1140 	struct ccb_scsiio *cso = &ccb->csio;
1141 	u_int16_t *hp, save_handle;
1142 	u_int16_t nxti, optr;
1143 	u_int8_t local[QENTRY_LEN];
1144 
1145 
1146 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1147 		xpt_print_path(ccb->ccb_h.path);
1148 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1149 		return (CAM_RESRC_UNAVAIL);
1150 	}
1151 	bzero(local, QENTRY_LEN);
1152 
1153 	/*
1154 	 * We're either moving data or completing a command here.
1155 	 */
1156 
1157 	if (IS_FC(isp)) {
1158 		atio_private_data_t *atp;
1159 		ct2_entry_t *cto = (ct2_entry_t *) local;
1160 
1161 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1162 		cto->ct_header.rqs_entry_count = 1;
1163 		cto->ct_iid = cso->init_id;
1164 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1165 			cto->ct_lun = ccb->ccb_h.target_lun;
1166 		}
1167 
1168 		atp = isp_get_atpd(isp, cso->tag_id);
1169 		if (atp == NULL) {
1170 			isp_prt(isp, ISP_LOGERR,
1171 			    "cannot find private data adjunct for tag %x",
1172 			    cso->tag_id);
1173 			return (-1);
1174 		}
1175 
1176 		cto->ct_rxid = cso->tag_id;
1177 		if (cso->dxfer_len == 0) {
1178 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1179 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1180 				cto->ct_flags |= CT2_SENDSTATUS;
1181 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1182 				cto->ct_resid =
1183 				    atp->orig_datalen - atp->bytes_xfered;
1184 				if (cto->ct_resid < 0) {
1185 					cto->rsp.m1.ct_scsi_status |=
1186 					    CT2_DATA_OVER;
1187 				} else if (cto->ct_resid > 0) {
1188 					cto->rsp.m1.ct_scsi_status |=
1189 					    CT2_DATA_UNDER;
1190 				}
1191 			}
1192 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1193 				int m = min(cso->sense_len, MAXRESPLEN);
1194 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1195 				cto->rsp.m1.ct_senselen = m;
1196 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1197 			}
1198 		} else {
1199 			cto->ct_flags |= CT2_FLAG_MODE0;
1200 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1201 				cto->ct_flags |= CT2_DATA_IN;
1202 			} else {
1203 				cto->ct_flags |= CT2_DATA_OUT;
1204 			}
1205 			cto->ct_reloff = atp->bytes_xfered;
1206 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1207 				cto->ct_flags |= CT2_SENDSTATUS;
1208 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1209 				cto->ct_resid =
1210 				    atp->orig_datalen -
1211 				    (atp->bytes_xfered + cso->dxfer_len);
1212 				if (cto->ct_resid < 0) {
1213 					cto->rsp.m0.ct_scsi_status |=
1214 					    CT2_DATA_OVER;
1215 				} else if (cto->ct_resid > 0) {
1216 					cto->rsp.m0.ct_scsi_status |=
1217 					    CT2_DATA_UNDER;
1218 				}
1219 			} else {
1220 				atp->last_xframt = cso->dxfer_len;
1221 			}
1222 			/*
1223 			 * If we're sending data and status back together,
1224 			 * we can't also send back sense data as well.
1225 			 */
1226 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1227 		}
1228 
1229 		if (cto->ct_flags & CT2_SENDSTATUS) {
1230 			isp_prt(isp, ISP_LOGTDEBUG0,
1231 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1232 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1233 			    cso->dxfer_len, cto->ct_resid);
1234 			cto->ct_flags |= CT2_CCINCR;
1235 			atp->state = ATPD_STATE_LAST_CTIO;
1236 		} else
1237 			atp->state = ATPD_STATE_CTIO;
1238 		cto->ct_timeout = 10;
1239 		hp = &cto->ct_syshandle;
1240 	} else {
1241 		ct_entry_t *cto = (ct_entry_t *) local;
1242 
1243 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1244 		cto->ct_header.rqs_entry_count = 1;
1245 		cto->ct_iid = cso->init_id;
1246 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1247 		cto->ct_tgt = ccb->ccb_h.target_id;
1248 		cto->ct_lun = ccb->ccb_h.target_lun;
1249 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1250 		if (AT_HAS_TAG(cso->tag_id)) {
1251 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1252 			cto->ct_flags |= CT_TQAE;
1253 		}
1254 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1255 			cto->ct_flags |= CT_NODISC;
1256 		}
1257 		if (cso->dxfer_len == 0) {
1258 			cto->ct_flags |= CT_NO_DATA;
1259 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1260 			cto->ct_flags |= CT_DATA_IN;
1261 		} else {
1262 			cto->ct_flags |= CT_DATA_OUT;
1263 		}
1264 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1265 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1266 			cto->ct_scsi_status = cso->scsi_status;
1267 			cto->ct_resid = cso->resid;
1268 			isp_prt(isp, ISP_LOGTDEBUG0,
1269 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1270 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1271 			    cso->tag_id);
1272 		}
1273 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1274 		cto->ct_timeout = 10;
1275 		hp = &cto->ct_syshandle;
1276 	}
1277 
1278 	if (isp_save_xs_tgt(isp, ccb, hp)) {
1279 		xpt_print_path(ccb->ccb_h.path);
1280 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1281 		return (CAM_RESRC_UNAVAIL);
1282 	}
1283 
1284 
1285 	/*
1286 	 * Call the dma setup routines for this entry (and any subsequent
1287 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1288 	 * new things to play with. As with isp_start's usage of DMA setup,
1289 	 * any swizzling is done in the machine dependent layer. Because
1290 	 * of this, we put the request onto the queue area first in native
1291 	 * format.
1292 	 */
1293 
1294 	save_handle = *hp;
1295 
1296 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1297 	case CMD_QUEUED:
1298 		ISP_ADD_REQUEST(isp, nxti);
1299 		return (CAM_REQ_INPROG);
1300 
1301 	case CMD_EAGAIN:
1302 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1303 		isp_destroy_tgt_handle(isp, save_handle);
1304 		return (CAM_RESRC_UNAVAIL);
1305 
1306 	default:
1307 		isp_destroy_tgt_handle(isp, save_handle);
1308 		return (XS_ERR(ccb));
1309 	}
1310 }
1311 
1312 static void
1313 isp_refire_putback_atio(void *arg)
1314 {
1315 	int s = splcam();
1316 	isp_target_putback_atio(arg);
1317 	splx(s);
1318 }
1319 
1320 static void
1321 isp_target_putback_atio(union ccb *ccb)
1322 {
1323 	struct ispsoftc *isp;
1324 	struct ccb_scsiio *cso;
1325 	u_int16_t nxti, optr;
1326 	void *qe;
1327 
1328 	isp = XS_ISP(ccb);
1329 
1330 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1331 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1332 		isp_prt(isp, ISP_LOGWARN,
1333 		    "isp_target_putback_atio: Request Queue Overflow");
1334 		return;
1335 	}
1336 	bzero(qe, QENTRY_LEN);
1337 	cso = &ccb->csio;
1338 	if (IS_FC(isp)) {
1339 		at2_entry_t local, *at = &local;
1340 		MEMZERO(at, sizeof (at2_entry_t));
1341 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1342 		at->at_header.rqs_entry_count = 1;
1343 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1344 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1345 		} else {
1346 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1347 		}
1348 		at->at_status = CT_OK;
1349 		at->at_rxid = cso->tag_id;
1350 		at->at_iid = cso->ccb_h.target_id;
1351 		isp_put_atio2(isp, at, qe);
1352 	} else {
1353 		at_entry_t local, *at = &local;
1354 		MEMZERO(at, sizeof (at_entry_t));
1355 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1356 		at->at_header.rqs_entry_count = 1;
1357 		at->at_iid = cso->init_id;
1358 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1359 		at->at_tgt = cso->ccb_h.target_id;
1360 		at->at_lun = cso->ccb_h.target_lun;
1361 		at->at_status = CT_OK;
1362 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1363 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1364 		isp_put_atio(isp, at, qe);
1365 	}
1366 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1367 	ISP_ADD_REQUEST(isp, nxti);
1368 	isp_complete_ctio(ccb);
1369 }
1370 
1371 static void
1372 isp_complete_ctio(union ccb *ccb)
1373 {
1374 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1375 		ccb->ccb_h.status |= CAM_REQ_CMP;
1376 	}
1377 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1378 	xpt_done(ccb);
1379 }
1380 
1381 /*
1382  * Handle ATIO stuff that the generic code can't.
1383  * This means handling CDBs.
1384  */
1385 
1386 static int
1387 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1388 {
1389 	tstate_t *tptr;
1390 	int status, bus, iswildcard;
1391 	struct ccb_accept_tio *atiop;
1392 
1393 	/*
1394 	 * The firmware status (except for the QLTM_SVALID bit)
1395 	 * indicates why this ATIO was sent to us.
1396 	 *
1397 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1398 	 *
1399 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1400 	 * we're still connected on the SCSI bus.
1401 	 */
1402 	status = aep->at_status;
1403 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1404 		/*
1405 		 * Bus Phase Sequence error. We should have sense data
1406 		 * suggested by the f/w. I'm not sure quite yet what
1407 		 * to do about this for CAM.
1408 		 */
1409 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1410 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1411 		return (0);
1412 	}
1413 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1414 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1415 		    status);
1416 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1417 		return (0);
1418 	}
1419 
1420 	bus = GET_BUS_VAL(aep->at_iid);
1421 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1422 	if (tptr == NULL) {
1423 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1424 		if (tptr == NULL) {
1425 			isp_endcmd(isp, aep,
1426 			    SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1427 			    (0x5 << 12) | (0x25 << 16), 0);
1428 			return (0);
1429 		}
1430 		iswildcard = 1;
1431 	} else {
1432 		iswildcard = 0;
1433 	}
1434 
1435 	if (tptr == NULL) {
1436 		/*
1437 		 * Because we can't autofeed sense data back with
1438 		 * a command for parallel SCSI, we can't give back
1439 		 * a CHECK CONDITION. We'll give back a BUSY status
1440 		 * instead. This works out okay because the only
1441 		 * time we should, in fact, get this, is in the
1442 		 * case that somebody configured us without the
1443 		 * blackhole driver, so they get what they deserve.
1444 		 */
1445 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1446 		return (0);
1447 	}
1448 
1449 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1450 	if (atiop == NULL) {
1451 		/*
1452 		 * Because we can't autofeed sense data back with
1453 		 * a command for parallel SCSI, we can't give back
1454 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1455 		 * instead. This works out okay because the only time we
1456 		 * should, in fact, get this, is in the case that we've
1457 		 * run out of ATIOS.
1458 		 */
1459 		xpt_print_path(tptr->owner);
1460 		isp_prt(isp, ISP_LOGWARN,
1461 		    "no ATIOS for lun %d from initiator %d on channel %d",
1462 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1463 		if (aep->at_flags & AT_TQAE)
1464 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1465 		else
1466 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1467 		rls_lun_statep(isp, tptr);
1468 		return (0);
1469 	}
1470 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1471 	tptr->atio_count--;
1472 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1473 	    aep->at_lun, tptr->atio_count);
1474 	if (iswildcard) {
1475 		atiop->ccb_h.target_id = aep->at_tgt;
1476 		atiop->ccb_h.target_lun = aep->at_lun;
1477 	}
1478 	if (aep->at_flags & AT_NODISC) {
1479 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1480 	} else {
1481 		atiop->ccb_h.flags = 0;
1482 	}
1483 
1484 	if (status & QLTM_SVALID) {
1485 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1486 		atiop->sense_len = amt;
1487 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1488 	} else {
1489 		atiop->sense_len = 0;
1490 	}
1491 
1492 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1493 	atiop->cdb_len = aep->at_cdblen;
1494 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1495 	atiop->ccb_h.status = CAM_CDB_RECVD;
1496 	/*
1497 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1498 	 * and the handle (which we have to preserve).
1499 	 */
1500 	AT_MAKE_TAGID(atiop->tag_id, 0, aep);
1501 	if (aep->at_flags & AT_TQAE) {
1502 		atiop->tag_action = aep->at_tag_type;
1503 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1504 	}
1505 	xpt_done((union ccb*)atiop);
1506 	isp_prt(isp, ISP_LOGTDEBUG0,
1507 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1508 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1509 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1510 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1511 	    "nondisc" : "disconnecting");
1512 	rls_lun_statep(isp, tptr);
1513 	return (0);
1514 }
1515 
1516 static int
1517 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1518 {
1519 	lun_id_t lun;
1520 	tstate_t *tptr;
1521 	struct ccb_accept_tio *atiop;
1522 	atio_private_data_t *atp;
1523 
1524 	/*
1525 	 * The firmware status (except for the QLTM_SVALID bit)
1526 	 * indicates why this ATIO was sent to us.
1527 	 *
1528 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1529 	 */
1530 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1531 		isp_prt(isp, ISP_LOGWARN,
1532 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1533 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1534 		return (0);
1535 	}
1536 
1537 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1538 		lun = aep->at_scclun;
1539 	} else {
1540 		lun = aep->at_lun;
1541 	}
1542 	tptr = get_lun_statep(isp, 0, lun);
1543 	if (tptr == NULL) {
1544 		isp_prt(isp, ISP_LOGTDEBUG0,
1545 		    "[0x%x] no state pointer for lun %d", aep->at_rxid, lun);
1546 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1547 		if (tptr == NULL) {
1548 			isp_endcmd(isp, aep,
1549 			    SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1550 			    (0x5 << 12) | (0x25 << 16), 0);
1551 			return (0);
1552 		}
1553 	}
1554 
1555 	atp = isp_get_atpd(isp, 0);
1556 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1557 	if (atiop == NULL || atp == NULL) {
1558 
1559 		/*
1560 		 * Because we can't autofeed sense data back with
1561 		 * a command for parallel SCSI, we can't give back
1562 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1563 		 * instead. This works out okay because the only time we
1564 		 * should, in fact, get this, is in the case that we've
1565 		 * run out of ATIOS.
1566 		 */
1567 		xpt_print_path(tptr->owner);
1568 		isp_prt(isp, ISP_LOGWARN,
1569 		    "no %s for lun %d from initiator %d",
1570 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1571 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1572 		rls_lun_statep(isp, tptr);
1573 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1574 		return (0);
1575 	}
1576 	atp->state = ATPD_STATE_ATIO;
1577 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1578 	tptr->atio_count--;
1579 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1580 	    lun, tptr->atio_count);
1581 
1582 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1583 		atiop->ccb_h.target_id =
1584 		    ((fcparam *)isp->isp_param)->isp_loopid;
1585 		atiop->ccb_h.target_lun = lun;
1586 	}
1587 	/*
1588 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1589 	 */
1590 	atiop->sense_len = 0;
1591 
1592 	atiop->init_id = aep->at_iid;
1593 	atiop->cdb_len = ATIO2_CDBLEN;
1594 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1595 	atiop->ccb_h.status = CAM_CDB_RECVD;
1596 	atiop->tag_id = aep->at_rxid;
1597 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1598 	case ATIO2_TC_ATTR_SIMPLEQ:
1599 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1600 		break;
1601         case ATIO2_TC_ATTR_HEADOFQ:
1602 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1603 		break;
1604         case ATIO2_TC_ATTR_ORDERED:
1605 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1606 		break;
1607         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1608 	case ATIO2_TC_ATTR_UNTAGGED:
1609 	default:
1610 		atiop->tag_action = 0;
1611 		break;
1612 	}
1613 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1614 
1615 	atp->tag = atiop->tag_id;
1616 	atp->lun = lun;
1617 	atp->orig_datalen = aep->at_datalen;
1618 	atp->last_xframt = 0;
1619 	atp->bytes_xfered = 0;
1620 	atp->state = ATPD_STATE_CAM;
1621 	ISPLOCK_2_CAMLOCK(siP);
1622 	xpt_done((union ccb*)atiop);
1623 
1624 	isp_prt(isp, ISP_LOGTDEBUG0,
1625 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1626 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1627 	    lun, aep->at_taskflags, aep->at_datalen);
1628 	rls_lun_statep(isp, tptr);
1629 	return (0);
1630 }
1631 
1632 static int
1633 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1634 {
1635 	union ccb *ccb;
1636 	int sentstatus, ok, notify_cam, resid = 0;
1637 	u_int16_t tval;
1638 
1639 	/*
1640 	 * CTIO and CTIO2 are close enough....
1641 	 */
1642 
1643 	ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle);
1644 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1645 	isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1646 
1647 	if (IS_FC(isp)) {
1648 		ct2_entry_t *ct = arg;
1649 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1650 		if (atp == NULL) {
1651 			isp_prt(isp, ISP_LOGERR,
1652 			    "cannot find adjunct for %x after I/O",
1653 			    ct->ct_rxid);
1654 			return (0);
1655 		}
1656 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1657 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1658 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1659 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1660 		}
1661 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1662 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1663 			resid = ct->ct_resid;
1664 			atp->bytes_xfered += (atp->last_xframt - resid);
1665 			atp->last_xframt = 0;
1666 		}
1667 		if (sentstatus || !ok) {
1668 			atp->tag = 0;
1669 		}
1670 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1671 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1672 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1673 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1674 		    resid, sentstatus? "FIN" : "MID");
1675 		tval = ct->ct_rxid;
1676 
1677 		/* XXX: should really come after isp_complete_ctio */
1678 		atp->state = ATPD_STATE_PDON;
1679 	} else {
1680 		ct_entry_t *ct = arg;
1681 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1682 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1683 		/*
1684 		 * We *ought* to be able to get back to the original ATIO
1685 		 * here, but for some reason this gets lost. It's just as
1686 		 * well because it's squirrelled away as part of periph
1687 		 * private data.
1688 		 *
1689 		 * We can live without it as long as we continue to use
1690 		 * the auto-replenish feature for CTIOs.
1691 		 */
1692 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1693 		if (ct->ct_status & QLTM_SVALID) {
1694 			char *sp = (char *)ct;
1695 			sp += CTIO_SENSE_OFFSET;
1696 			ccb->csio.sense_len =
1697 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1698 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1699 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1700 		}
1701 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1702 			resid = ct->ct_resid;
1703 		}
1704 		isp_prt(isp, ISP_LOGTDEBUG0,
1705 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1706 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1707 		    ct->ct_status, ct->ct_flags, resid,
1708 		    sentstatus? "FIN" : "MID");
1709 		tval = ct->ct_fwhandle;
1710 	}
1711 	ccb->csio.resid += resid;
1712 
1713 	/*
1714 	 * We're here either because intermediate data transfers are done
1715 	 * and/or the final status CTIO (which may have joined with a
1716 	 * Data Transfer) is done.
1717 	 *
1718 	 * In any case, for this platform, the upper layers figure out
1719 	 * what to do next, so all we do here is collect status and
1720 	 * pass information along. Any DMA handles have already been
1721 	 * freed.
1722 	 */
1723 	if (notify_cam == 0) {
1724 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1725 		return (0);
1726 	}
1727 
1728 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1729 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1730 
1731 	if (!ok) {
1732 		isp_target_putback_atio(ccb);
1733 	} else {
1734 		isp_complete_ctio(ccb);
1735 
1736 	}
1737 	return (0);
1738 }
1739 
1740 static void
1741 isp_handle_platform_ctio_fastpost(struct ispsoftc *isp, u_int32_t token)
1742 {
1743 	union ccb *ccb;
1744 	ccb = isp_find_xs_tgt(isp, token & 0xffff);
1745 	KASSERT((ccb != NULL),
1746 	    ("null ccb in isp_handle_platform_ctio_fastpost"));
1747 	isp_destroy_tgt_handle(isp, token & 0xffff);
1748 	isp_prt(isp, ISP_LOGTDEBUG1, "CTIOx[%x] fastpost complete",
1749 	    token & 0xffff);
1750 	isp_complete_ctio(ccb);
1751 }
1752 
1753 static int
1754 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1755 {
1756 	return (0);	/* XXXX */
1757 }
1758 
1759 static int
1760 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1761 {
1762 
1763 	switch (inp->in_status) {
1764 	case IN_PORT_LOGOUT:
1765 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1766 		   inp->in_iid);
1767 		break;
1768 	case IN_PORT_CHANGED:
1769 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1770 		   inp->in_iid);
1771 		break;
1772 	case IN_GLOBAL_LOGO:
1773 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1774 		break;
1775 	case IN_ABORT_TASK:
1776 	{
1777 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1778 		struct ccb_immed_notify *inot = NULL;
1779 
1780 		if (atp) {
1781 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1782 			if (tptr) {
1783 				inot = (struct ccb_immed_notify *)
1784 				    SLIST_FIRST(&tptr->inots);
1785 				if (inot) {
1786 					tptr->inot_count--;
1787 					SLIST_REMOVE_HEAD(&tptr->inots,
1788 					    sim_links.sle);
1789 					isp_prt(isp, ISP_LOGTDEBUG0,
1790 					    "Take FREE INOT count now %d",
1791 					    tptr->inot_count);
1792 				}
1793 			}
1794 			isp_prt(isp, ISP_LOGWARN,
1795 			   "abort task RX_ID %x IID %d state %d",
1796 			   inp->in_seqid, inp->in_iid, atp->state);
1797 		} else {
1798 			isp_prt(isp, ISP_LOGWARN,
1799 			   "abort task RX_ID %x from iid %d, state unknown",
1800 			   inp->in_seqid, inp->in_iid);
1801 		}
1802 		if (inot) {
1803 			inot->initiator_id = inp->in_iid;
1804 			inot->sense_len = 0;
1805 			inot->message_args[0] = MSG_ABORT_TAG;
1806 			inot->message_args[1] = inp->in_seqid & 0xff;
1807 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1808 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1809 			xpt_done((union ccb *)inot);
1810 		}
1811 		break;
1812 	}
1813 	default:
1814 		break;
1815 	}
1816 	return (0);
1817 }
1818 #endif
1819 
1820 static void
1821 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1822 {
1823 	struct cam_sim *sim;
1824 	struct ispsoftc *isp;
1825 
1826 	sim = (struct cam_sim *)cbarg;
1827 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1828 	switch (code) {
1829 	case AC_LOST_DEVICE:
1830 		if (IS_SCSI(isp)) {
1831 			u_int16_t oflags, nflags;
1832 			sdparam *sdp = isp->isp_param;
1833 			int tgt;
1834 
1835 			tgt = xpt_path_target_id(path);
1836 			if (tgt >= 0) {
1837 				sdp += cam_sim_bus(sim);
1838 				ISP_LOCK(isp);
1839 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1840 #ifndef	ISP_TARGET_MODE
1841 				nflags &= DPARM_SAFE_DFLT;
1842 				if (isp->isp_loaded_fw) {
1843 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1844 				}
1845 #else
1846 				nflags = DPARM_DEFAULT;
1847 #endif
1848 				oflags = sdp->isp_devparam[tgt].goal_flags;
1849 				sdp->isp_devparam[tgt].goal_flags = nflags;
1850 				sdp->isp_devparam[tgt].dev_update = 1;
1851 				isp->isp_update |= (1 << cam_sim_bus(sim));
1852 				(void) isp_control(isp,
1853 				    ISPCTL_UPDATE_PARAMS, NULL);
1854 				sdp->isp_devparam[tgt].goal_flags = oflags;
1855 				ISP_UNLOCK(isp);
1856 			}
1857 		}
1858 		break;
1859 	default:
1860 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1861 		break;
1862 	}
1863 }
1864 
1865 static void
1866 isp_poll(struct cam_sim *sim)
1867 {
1868 	struct ispsoftc *isp = cam_sim_softc(sim);
1869 	u_int16_t isr, sema, mbox;
1870 
1871 	ISP_LOCK(isp);
1872 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1873 		isp_intr(isp, isr, sema, mbox);
1874 	}
1875 	ISP_UNLOCK(isp);
1876 }
1877 
1878 
1879 static void
1880 isp_watchdog(void *arg)
1881 {
1882 	XS_T *xs = arg;
1883 	struct ispsoftc *isp = XS_ISP(xs);
1884 	u_int32_t handle;
1885 	int iok;
1886 
1887 	/*
1888 	 * We've decided this command is dead. Make sure we're not trying
1889 	 * to kill a command that's already dead by getting it's handle and
1890 	 * and seeing whether it's still alive.
1891 	 */
1892 	ISP_LOCK(isp);
1893 	iok = isp->isp_osinfo.intsok;
1894 	isp->isp_osinfo.intsok = 0;
1895 	handle = isp_find_handle(isp, xs);
1896 	if (handle) {
1897 		u_int16_t isr, sema, mbox;
1898 
1899 		if (XS_CMD_DONE_P(xs)) {
1900 			isp_prt(isp, ISP_LOGDEBUG1,
1901 			    "watchdog found done cmd (handle 0x%x)", handle);
1902 			ISP_UNLOCK(isp);
1903 			return;
1904 		}
1905 
1906 		if (XS_CMD_WDOG_P(xs)) {
1907 			isp_prt(isp, ISP_LOGDEBUG2,
1908 			    "recursive watchdog (handle 0x%x)", handle);
1909 			ISP_UNLOCK(isp);
1910 			return;
1911 		}
1912 
1913 		XS_CMD_S_WDOG(xs);
1914 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1915 			isp_intr(isp, isr, sema, mbox);
1916 		}
1917 		if (XS_CMD_DONE_P(xs)) {
1918 			isp_prt(isp, ISP_LOGDEBUG2,
1919 			    "watchdog cleanup for handle 0x%x", handle);
1920 			xpt_done((union ccb *) xs);
1921 		} else if (XS_CMD_GRACE_P(xs)) {
1922 			/*
1923 			 * Make sure the command is *really* dead before we
1924 			 * release the handle (and DMA resources) for reuse.
1925 			 */
1926 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1927 
1928 			/*
1929 			 * After this point, the comamnd is really dead.
1930 			 */
1931 			if (XS_XFRLEN(xs)) {
1932 				ISP_DMAFREE(isp, xs, handle);
1933                 	}
1934 			isp_destroy_handle(isp, handle);
1935 			xpt_print_path(xs->ccb_h.path);
1936 			isp_prt(isp, ISP_LOGWARN,
1937 			    "watchdog timeout for handle 0x%x", handle);
1938 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1939 			XS_CMD_C_WDOG(xs);
1940 			isp_done(xs);
1941 		} else {
1942 			u_int16_t nxti, optr;
1943 			ispreq_t local, *mp= &local, *qe;
1944 
1945 			XS_CMD_C_WDOG(xs);
1946 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1947 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1948 				ISP_UNLOCK(isp);
1949 				return;
1950 			}
1951 			XS_CMD_S_GRACE(xs);
1952 			MEMZERO((void *) mp, sizeof (*mp));
1953 			mp->req_header.rqs_entry_count = 1;
1954 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1955 			mp->req_modifier = SYNC_ALL;
1956 			mp->req_target = XS_CHANNEL(xs) << 7;
1957 			isp_put_request(isp, mp, qe);
1958 			ISP_ADD_REQUEST(isp, nxti);
1959 		}
1960 	} else {
1961 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1962 	}
1963 	isp->isp_osinfo.intsok = iok;
1964 	ISP_UNLOCK(isp);
1965 }
1966 
1967 static void
1968 isp_kthread(void *arg)
1969 {
1970 	struct ispsoftc *isp = arg;
1971 
1972 #ifdef	ISP_SMPLOCK
1973 	mtx_lock(&isp->isp_lock);
1974 #else
1975 	mtx_lock(&Giant);
1976 #endif
1977 	/*
1978 	 * The first loop is for our usage where we have yet to have
1979 	 * gotten good fibre channel state.
1980 	 */
1981 	for (;;) {
1982 		int wasfrozen;
1983 
1984 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1985 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1986 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1987 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1988 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1989 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1990 				    isp->isp_osinfo.ktmature == 0) {
1991 					break;
1992 				}
1993 			}
1994 #ifdef	ISP_SMPLOCK
1995 			msleep(isp_kthread, &isp->isp_lock,
1996 			    PRIBIO, "isp_fcthrd", hz);
1997 #else
1998 			(void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1999 #endif
2000 		}
2001 
2002 		/*
2003 		 * Even if we didn't get good loop state we may be
2004 		 * unfreezing the SIMQ so that we can kill off
2005 		 * commands (if we've never seen loop before, for example).
2006 		 */
2007 		isp->isp_osinfo.ktmature = 1;
2008 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2009 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2010 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2011 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2012 			ISPLOCK_2_CAMLOCK(isp);
2013 			xpt_release_simq(isp->isp_sim, 1);
2014 			CAMLOCK_2_ISPLOCK(isp);
2015 		}
2016 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2017 #ifdef	ISP_SMPLOCK
2018 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2019 #else
2020 		(void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2021 #endif
2022 	}
2023 }
2024 
2025 static void
2026 isp_action(struct cam_sim *sim, union ccb *ccb)
2027 {
2028 	int bus, tgt, error;
2029 	struct ispsoftc *isp;
2030 	struct ccb_trans_settings *cts;
2031 
2032 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2033 
2034 	isp = (struct ispsoftc *)cam_sim_softc(sim);
2035 	ccb->ccb_h.sim_priv.entries[0].field = 0;
2036 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2037 	if (isp->isp_state != ISP_RUNSTATE &&
2038 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
2039 		CAMLOCK_2_ISPLOCK(isp);
2040 		isp_init(isp);
2041 		if (isp->isp_state != ISP_INITSTATE) {
2042 			ISP_UNLOCK(isp);
2043 			/*
2044 			 * Lie. Say it was a selection timeout.
2045 			 */
2046 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2047 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2048 			xpt_done(ccb);
2049 			return;
2050 		}
2051 		isp->isp_state = ISP_RUNSTATE;
2052 		ISPLOCK_2_CAMLOCK(isp);
2053 	}
2054 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2055 
2056 
2057 	switch (ccb->ccb_h.func_code) {
2058 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2059 		/*
2060 		 * Do a couple of preliminary checks...
2061 		 */
2062 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2063 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2064 				ccb->ccb_h.status = CAM_REQ_INVALID;
2065 				xpt_done(ccb);
2066 				break;
2067 			}
2068 		}
2069 #ifdef	DIAGNOSTIC
2070 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2071 			ccb->ccb_h.status = CAM_PATH_INVALID;
2072 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2073 			ccb->ccb_h.status = CAM_PATH_INVALID;
2074 		}
2075 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2076 			isp_prt(isp, ISP_LOGERR,
2077 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2078 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2079 			xpt_done(ccb);
2080 			break;
2081 		}
2082 #endif
2083 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2084 		CAMLOCK_2_ISPLOCK(isp);
2085 		error = isp_start((XS_T *) ccb);
2086 		switch (error) {
2087 		case CMD_QUEUED:
2088 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2089 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2090 				u_int64_t ticks = (u_int64_t) hz;
2091 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2092 					ticks = 60 * 1000 * ticks;
2093 				else
2094 					ticks = ccb->ccb_h.timeout * hz;
2095 				ticks = ((ticks + 999) / 1000) + hz + hz;
2096 				if (ticks >= 0x80000000) {
2097 					isp_prt(isp, ISP_LOGERR,
2098 					    "timeout overflow");
2099 					ticks = 0x7fffffff;
2100 				}
2101 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2102 				    (caddr_t)ccb, (int)ticks);
2103 			} else {
2104 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2105 			}
2106 			ISPLOCK_2_CAMLOCK(isp);
2107 			break;
2108 		case CMD_RQLATER:
2109 			/*
2110 			 * This can only happen for Fibre Channel
2111 			 */
2112 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2113 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2114 			    isp->isp_osinfo.ktmature) {
2115 				ISPLOCK_2_CAMLOCK(isp);
2116 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2117 				xpt_done(ccb);
2118 				break;
2119 			}
2120 #ifdef	ISP_SMPLOCK
2121 			cv_signal(&isp->isp_osinfo.kthread_cv);
2122 #else
2123 			wakeup(&isp->isp_osinfo.kthread_cv);
2124 #endif
2125 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2126 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2127 			ISPLOCK_2_CAMLOCK(isp);
2128 			xpt_done(ccb);
2129 			break;
2130 		case CMD_EAGAIN:
2131 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2132 			ISPLOCK_2_CAMLOCK(isp);
2133 			xpt_done(ccb);
2134 			break;
2135 		case CMD_COMPLETE:
2136 			isp_done((struct ccb_scsiio *) ccb);
2137 			ISPLOCK_2_CAMLOCK(isp);
2138 			break;
2139 		default:
2140 			isp_prt(isp, ISP_LOGERR,
2141 			    "What's this? 0x%x at %d in file %s",
2142 			    error, __LINE__, __FILE__);
2143 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2144 			xpt_done(ccb);
2145 			ISPLOCK_2_CAMLOCK(isp);
2146 		}
2147 		break;
2148 
2149 #ifdef	ISP_TARGET_MODE
2150 	case XPT_EN_LUN:		/* Enable LUN as a target */
2151 	{
2152 		int seq, iok, i;
2153 		CAMLOCK_2_ISPLOCK(isp);
2154 		iok = isp->isp_osinfo.intsok;
2155 		isp->isp_osinfo.intsok = 0;
2156 		seq = isp_en_lun(isp, ccb);
2157 		if (seq < 0) {
2158 			isp->isp_osinfo.intsok = iok;
2159 			ISPLOCK_2_CAMLOCK(isp);
2160 			xpt_done(ccb);
2161 			break;
2162 		}
2163 		for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) {
2164 			u_int16_t isr, sema, mbox;
2165 			if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
2166 				isp_intr(isp, isr, sema, mbox);
2167 			}
2168 			DELAY(1000);
2169 		}
2170 		isp->isp_osinfo.intsok = iok;
2171 		ISPLOCK_2_CAMLOCK(isp);
2172 		break;
2173 	}
2174 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2175 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2176 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2177 	{
2178 		tstate_t *tptr =
2179 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2180 		if (tptr == NULL) {
2181 			ccb->ccb_h.status = CAM_LUN_INVALID;
2182 			xpt_done(ccb);
2183 			break;
2184 		}
2185 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2186 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2187 		ccb->ccb_h.flags = 0;
2188 
2189 		CAMLOCK_2_ISPLOCK(isp);
2190 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2191 			/*
2192 			 * Note that the command itself may not be done-
2193 			 * it may not even have had the first CTIO sent.
2194 			 */
2195 			tptr->atio_count++;
2196 			isp_prt(isp, ISP_LOGTDEBUG0,
2197 			    "Put FREE ATIO, lun %d, count now %d",
2198 			    ccb->ccb_h.target_lun, tptr->atio_count);
2199 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2200 			    sim_links.sle);
2201 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2202 			tptr->inot_count++;
2203 			isp_prt(isp, ISP_LOGTDEBUG0,
2204 			    "Put FREE INOT, lun %d, count now %d",
2205 			    ccb->ccb_h.target_lun, tptr->inot_count);
2206 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2207 			    sim_links.sle);
2208 		} else {
2209 			isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");;
2210 		}
2211 		rls_lun_statep(isp, tptr);
2212 		ccb->ccb_h.status = CAM_REQ_INPROG;
2213 		ISPLOCK_2_CAMLOCK(isp);
2214 		break;
2215 	}
2216 	case XPT_CONT_TARGET_IO:
2217 	{
2218 		CAMLOCK_2_ISPLOCK(isp);
2219 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2220 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2221 			isp_prt(isp, ISP_LOGWARN,
2222 			    "XPT_CONT_TARGET_IO: status 0x%x",
2223 			    ccb->ccb_h.status);
2224 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2225 			ISPLOCK_2_CAMLOCK(isp);
2226 			xpt_done(ccb);
2227 		} else {
2228 			ISPLOCK_2_CAMLOCK(isp);
2229 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2230 		}
2231 		break;
2232 	}
2233 #endif
2234 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2235 
2236 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2237 		tgt = ccb->ccb_h.target_id;
2238 		tgt |= (bus << 16);
2239 
2240 		CAMLOCK_2_ISPLOCK(isp);
2241 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2242 		ISPLOCK_2_CAMLOCK(isp);
2243 		if (error) {
2244 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2245 		} else {
2246 			ccb->ccb_h.status = CAM_REQ_CMP;
2247 		}
2248 		xpt_done(ccb);
2249 		break;
2250 	case XPT_ABORT:			/* Abort the specified CCB */
2251 	{
2252 		union ccb *accb = ccb->cab.abort_ccb;
2253 		CAMLOCK_2_ISPLOCK(isp);
2254 		switch (accb->ccb_h.func_code) {
2255 #ifdef	ISP_TARGET_MODE
2256 		case XPT_ACCEPT_TARGET_IO:
2257 		case XPT_IMMED_NOTIFY:
2258         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2259 			break;
2260 		case XPT_CONT_TARGET_IO:
2261 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2262 			ccb->ccb_h.status = CAM_UA_ABORT;
2263 			break;
2264 #endif
2265 		case XPT_SCSI_IO:
2266 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2267 			if (error) {
2268 				ccb->ccb_h.status = CAM_UA_ABORT;
2269 			} else {
2270 				ccb->ccb_h.status = CAM_REQ_CMP;
2271 			}
2272 			break;
2273 		default:
2274 			ccb->ccb_h.status = CAM_REQ_INVALID;
2275 			break;
2276 		}
2277 		ISPLOCK_2_CAMLOCK(isp);
2278 		xpt_done(ccb);
2279 		break;
2280 	}
2281 #ifdef	CAM_NEW_TRAN_CODE
2282 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2283 #else
2284 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2285 #endif
2286 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2287 		cts = &ccb->cts;
2288 		if (!IS_CURRENT_SETTINGS(cts)) {
2289 			ccb->ccb_h.status = CAM_REQ_INVALID;
2290 			xpt_done(ccb);
2291 			break;
2292 		}
2293 		tgt = cts->ccb_h.target_id;
2294 		CAMLOCK_2_ISPLOCK(isp);
2295 		if (IS_SCSI(isp)) {
2296 #ifndef	CAM_NEW_TRAN_CODE
2297 			sdparam *sdp = isp->isp_param;
2298 			u_int16_t *dptr;
2299 
2300 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2301 
2302 			sdp += bus;
2303 			/*
2304 			 * We always update (internally) from goal_flags
2305 			 * so any request to change settings just gets
2306 			 * vectored to that location.
2307 			 */
2308 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2309 
2310 			/*
2311 			 * Note that these operations affect the
2312 			 * the goal flags (goal_flags)- not
2313 			 * the current state flags. Then we mark
2314 			 * things so that the next operation to
2315 			 * this HBA will cause the update to occur.
2316 			 */
2317 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2318 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2319 					*dptr |= DPARM_DISC;
2320 				} else {
2321 					*dptr &= ~DPARM_DISC;
2322 				}
2323 			}
2324 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2325 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2326 					*dptr |= DPARM_TQING;
2327 				} else {
2328 					*dptr &= ~DPARM_TQING;
2329 				}
2330 			}
2331 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2332 				switch (cts->bus_width) {
2333 				case MSG_EXT_WDTR_BUS_16_BIT:
2334 					*dptr |= DPARM_WIDE;
2335 					break;
2336 				default:
2337 					*dptr &= ~DPARM_WIDE;
2338 				}
2339 			}
2340 			/*
2341 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2342 			 * of nonzero will cause us to go to the
2343 			 * selected (from NVRAM) maximum value for
2344 			 * this device. At a later point, we'll
2345 			 * allow finer control.
2346 			 */
2347 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2348 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2349 			    (cts->sync_offset > 0)) {
2350 				*dptr |= DPARM_SYNC;
2351 			} else {
2352 				*dptr &= ~DPARM_SYNC;
2353 			}
2354 			*dptr |= DPARM_SAFE_DFLT;
2355 #else
2356 			struct ccb_trans_settings_scsi *scsi =
2357 			    &cts->proto_specific.scsi;
2358 			struct ccb_trans_settings_spi *spi =
2359 			    &cts->xport_specific.spi;
2360 			sdparam *sdp = isp->isp_param;
2361 			u_int16_t *dptr;
2362 
2363 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2364 			sdp += bus;
2365 			/*
2366 			 * We always update (internally) from goal_flags
2367 			 * so any request to change settings just gets
2368 			 * vectored to that location.
2369 			 */
2370 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2371 
2372 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2373 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2374 					*dptr |= DPARM_DISC;
2375 				else
2376 					*dptr &= ~DPARM_DISC;
2377 			}
2378 
2379 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2380 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2381 					*dptr |= DPARM_TQING;
2382 				else
2383 					*dptr &= ~DPARM_TQING;
2384 			}
2385 
2386 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2387 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2388 					*dptr |= DPARM_WIDE;
2389 				else
2390 					*dptr &= ~DPARM_WIDE;
2391 			}
2392 
2393 			/*
2394 			 * XXX: FIX ME
2395 			 */
2396 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2397 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2398 			    (spi->sync_period && spi->sync_offset)) {
2399 				*dptr |= DPARM_SYNC;
2400 				/*
2401 				 * XXX: CHECK FOR LEGALITY
2402 				 */
2403 				sdp->isp_devparam[tgt].goal_period =
2404 				    spi->sync_period;
2405 				sdp->isp_devparam[tgt].goal_offset =
2406 				    spi->sync_offset;
2407 			} else {
2408 				*dptr &= ~DPARM_SYNC;
2409 			}
2410 #endif
2411 			isp_prt(isp, ISP_LOGDEBUG0,
2412 			    "SET bus %d targ %d to flags %x off %x per %x",
2413 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2414 			    sdp->isp_devparam[tgt].goal_offset,
2415 			    sdp->isp_devparam[tgt].goal_period);
2416 			sdp->isp_devparam[tgt].dev_update = 1;
2417 			isp->isp_update |= (1 << bus);
2418 		}
2419 		ISPLOCK_2_CAMLOCK(isp);
2420 		ccb->ccb_h.status = CAM_REQ_CMP;
2421 		xpt_done(ccb);
2422 		break;
2423 	case XPT_GET_TRAN_SETTINGS:
2424 		cts = &ccb->cts;
2425 		tgt = cts->ccb_h.target_id;
2426 		CAMLOCK_2_ISPLOCK(isp);
2427 		if (IS_FC(isp)) {
2428 #ifndef	CAM_NEW_TRAN_CODE
2429 			/*
2430 			 * a lot of normal SCSI things don't make sense.
2431 			 */
2432 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2433 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2434 			/*
2435 			 * How do you measure the width of a high
2436 			 * speed serial bus? Well, in bytes.
2437 			 *
2438 			 * Offset and period make no sense, though, so we set
2439 			 * (above) a 'base' transfer speed to be gigabit.
2440 			 */
2441 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2442 #else
2443 			fcparam *fcp = isp->isp_param;
2444 			struct ccb_trans_settings_fc *fc =
2445 			    &cts->xport_specific.fc;
2446 
2447 			cts->protocol = PROTO_SCSI;
2448 			cts->protocol_version = SCSI_REV_2;
2449 			cts->transport = XPORT_FC;
2450 			cts->transport_version = 0;
2451 
2452 			fc->valid = CTS_FC_VALID_SPEED;
2453 			if (fcp->isp_gbspeed == 2)
2454 				fc->bitrate = 200000;
2455 			else
2456 				fc->bitrate = 100000;
2457 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2458 				struct lportdb *lp = &fcp->portdb[tgt];
2459 				fc->wwnn = lp->node_wwn;
2460 				fc->wwpn = lp->port_wwn;
2461 				fc->port = lp->portid;
2462 				fc->valid |= CTS_FC_VALID_WWNN |
2463 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2464 			}
2465 #endif
2466 		} else {
2467 #ifdef	CAM_NEW_TRAN_CODE
2468 			struct ccb_trans_settings_scsi *scsi =
2469 			    &cts->proto_specific.scsi;
2470 			struct ccb_trans_settings_spi *spi =
2471 			    &cts->xport_specific.spi;
2472 #endif
2473 			sdparam *sdp = isp->isp_param;
2474 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2475 			u_int16_t dval, pval, oval;
2476 
2477 			sdp += bus;
2478 
2479 			if (IS_CURRENT_SETTINGS(cts)) {
2480 				sdp->isp_devparam[tgt].dev_refresh = 1;
2481 				isp->isp_update |= (1 << bus);
2482 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2483 				    NULL);
2484 				dval = sdp->isp_devparam[tgt].actv_flags;
2485 				oval = sdp->isp_devparam[tgt].actv_offset;
2486 				pval = sdp->isp_devparam[tgt].actv_period;
2487 			} else {
2488 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2489 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2490 				pval = sdp->isp_devparam[tgt].nvrm_period;
2491 			}
2492 
2493 #ifndef	CAM_NEW_TRAN_CODE
2494 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2495 
2496 			if (dval & DPARM_DISC) {
2497 				cts->flags |= CCB_TRANS_DISC_ENB;
2498 			}
2499 			if (dval & DPARM_TQING) {
2500 				cts->flags |= CCB_TRANS_TAG_ENB;
2501 			}
2502 			if (dval & DPARM_WIDE) {
2503 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2504 			} else {
2505 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2506 			}
2507 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2508 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2509 
2510 			if ((dval & DPARM_SYNC) && oval != 0) {
2511 				cts->sync_period = pval;
2512 				cts->sync_offset = oval;
2513 				cts->valid |=
2514 				    CCB_TRANS_SYNC_RATE_VALID |
2515 				    CCB_TRANS_SYNC_OFFSET_VALID;
2516 			}
2517 #else
2518 			cts->protocol = PROTO_SCSI;
2519 			cts->protocol_version = SCSI_REV_2;
2520 			cts->transport = XPORT_SPI;
2521 			cts->transport_version = 2;
2522 
2523 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2524 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2525 			if (dval & DPARM_DISC) {
2526 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2527 			}
2528 			if (dval & DPARM_TQING) {
2529 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2530 			}
2531 			if ((dval & DPARM_SYNC) && oval && pval) {
2532 				spi->sync_offset = oval;
2533 				spi->sync_period = pval;
2534 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2535 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2536 			}
2537 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2538 			if (dval & DPARM_WIDE) {
2539 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2540 			} else {
2541 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2542 			}
2543 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2544 				scsi->valid = CTS_SCSI_VALID_TQ;
2545 				spi->valid |= CTS_SPI_VALID_DISC;
2546 			} else {
2547 				scsi->valid = 0;
2548 			}
2549 #endif
2550 			isp_prt(isp, ISP_LOGDEBUG0,
2551 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2552 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2553 			    bus, tgt, dval, oval, pval);
2554 		}
2555 		ISPLOCK_2_CAMLOCK(isp);
2556 		ccb->ccb_h.status = CAM_REQ_CMP;
2557 		xpt_done(ccb);
2558 		break;
2559 
2560 	case XPT_CALC_GEOMETRY:
2561 	{
2562 		struct ccb_calc_geometry *ccg;
2563 
2564 		ccg = &ccb->ccg;
2565 		if (ccg->block_size == 0) {
2566 			isp_prt(isp, ISP_LOGERR,
2567 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2568 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2569 			ccb->ccb_h.status = CAM_REQ_INVALID;
2570 			xpt_done(ccb);
2571 			break;
2572 		}
2573 		cam_calc_geometry(ccg, /*extended*/1);
2574 		xpt_done(ccb);
2575 		break;
2576 	}
2577 	case XPT_RESET_BUS:		/* Reset the specified bus */
2578 		bus = cam_sim_bus(sim);
2579 		CAMLOCK_2_ISPLOCK(isp);
2580 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2581 		ISPLOCK_2_CAMLOCK(isp);
2582 		if (error)
2583 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2584 		else {
2585 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2586 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2587 			else if (isp->isp_path != NULL)
2588 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2589 			ccb->ccb_h.status = CAM_REQ_CMP;
2590 		}
2591 		xpt_done(ccb);
2592 		break;
2593 
2594 	case XPT_TERM_IO:		/* Terminate the I/O process */
2595 		ccb->ccb_h.status = CAM_REQ_INVALID;
2596 		xpt_done(ccb);
2597 		break;
2598 
2599 	case XPT_PATH_INQ:		/* Path routing inquiry */
2600 	{
2601 		struct ccb_pathinq *cpi = &ccb->cpi;
2602 
2603 		cpi->version_num = 1;
2604 #ifdef	ISP_TARGET_MODE
2605 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2606 #else
2607 		cpi->target_sprt = 0;
2608 #endif
2609 		cpi->hba_eng_cnt = 0;
2610 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2611 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2612 		cpi->bus_id = cam_sim_bus(sim);
2613 		if (IS_FC(isp)) {
2614 			cpi->hba_misc = PIM_NOBUSRESET;
2615 			/*
2616 			 * Because our loop ID can shift from time to time,
2617 			 * make our initiator ID out of range of our bus.
2618 			 */
2619 			cpi->initiator_id = cpi->max_target + 1;
2620 
2621 			/*
2622 			 * Set base transfer capabilities for Fibre Channel.
2623 			 * Technically not correct because we don't know
2624 			 * what media we're running on top of- but we'll
2625 			 * look good if we always say 100MB/s.
2626 			 */
2627 			if (FCPARAM(isp)->isp_gbspeed == 2)
2628 				cpi->base_transfer_speed = 200000;
2629 			else
2630 				cpi->base_transfer_speed = 100000;
2631 			cpi->hba_inquiry = PI_TAG_ABLE;
2632 #ifdef	CAM_NEW_TRAN_CODE
2633 			cpi->transport = XPORT_FC;
2634 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2635 #endif
2636 		} else {
2637 			sdparam *sdp = isp->isp_param;
2638 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2639 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2640 			cpi->hba_misc = 0;
2641 			cpi->initiator_id = sdp->isp_initiator_id;
2642 			cpi->base_transfer_speed = 3300;
2643 #ifdef	CAM_NEW_TRAN_CODE
2644 			cpi->transport = XPORT_SPI;
2645 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2646 #endif
2647 		}
2648 #ifdef	CAM_NEW_TRAN_CODE
2649 		cpi->protocol = PROTO_SCSI;
2650 		cpi->protocol_version = SCSI_REV_2;
2651 #endif
2652 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2653 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2654 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2655 		cpi->unit_number = cam_sim_unit(sim);
2656 		cpi->ccb_h.status = CAM_REQ_CMP;
2657 		xpt_done(ccb);
2658 		break;
2659 	}
2660 	default:
2661 		ccb->ccb_h.status = CAM_REQ_INVALID;
2662 		xpt_done(ccb);
2663 		break;
2664 	}
2665 }
2666 
2667 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2668 void
2669 isp_done(struct ccb_scsiio *sccb)
2670 {
2671 	struct ispsoftc *isp = XS_ISP(sccb);
2672 
2673 	if (XS_NOERR(sccb))
2674 		XS_SETERR(sccb, CAM_REQ_CMP);
2675 
2676 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2677 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2678 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2679 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2680 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2681 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2682 		} else {
2683 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2684 		}
2685 	}
2686 
2687 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2688 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2689 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2690 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2691 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2692 			isp_prt(isp, ISP_LOGDEBUG0,
2693 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2694 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2695 			    sccb->ccb_h.status, sccb->scsi_status);
2696 		}
2697 	}
2698 
2699 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2700 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2701 		xpt_print_path(sccb->ccb_h.path);
2702 		isp_prt(isp, ISP_LOGINFO,
2703 		    "cam completion status 0x%x", sccb->ccb_h.status);
2704 	}
2705 
2706 	XS_CMD_S_DONE(sccb);
2707 	if (XS_CMD_WDOG_P(sccb) == 0) {
2708 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2709 		if (XS_CMD_GRACE_P(sccb)) {
2710 			isp_prt(isp, ISP_LOGDEBUG2,
2711 			    "finished command on borrowed time");
2712 		}
2713 		XS_CMD_S_CLEAR(sccb);
2714 		ISPLOCK_2_CAMLOCK(isp);
2715 		xpt_done((union ccb *) sccb);
2716 		CAMLOCK_2_ISPLOCK(isp);
2717 	}
2718 }
2719 
2720 int
2721 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2722 {
2723 	int bus, rv = 0;
2724 	switch (cmd) {
2725 	case ISPASYNC_NEW_TGT_PARAMS:
2726 	{
2727 #ifdef	CAM_NEW_TRAN_CODE
2728 		struct ccb_trans_settings_scsi *scsi;
2729 		struct ccb_trans_settings_spi *spi;
2730 #endif
2731 		int flags, tgt;
2732 		sdparam *sdp = isp->isp_param;
2733 		struct ccb_trans_settings cts;
2734 		struct cam_path *tmppath;
2735 
2736 		bzero(&cts, sizeof (struct ccb_trans_settings));
2737 
2738 		tgt = *((int *)arg);
2739 		bus = (tgt >> 16) & 0xffff;
2740 		tgt &= 0xffff;
2741 		sdp += bus;
2742 		ISPLOCK_2_CAMLOCK(isp);
2743 		if (xpt_create_path(&tmppath, NULL,
2744 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2745 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2746 			CAMLOCK_2_ISPLOCK(isp);
2747 			isp_prt(isp, ISP_LOGWARN,
2748 			    "isp_async cannot make temp path for %d.%d",
2749 			    tgt, bus);
2750 			rv = -1;
2751 			break;
2752 		}
2753 		CAMLOCK_2_ISPLOCK(isp);
2754 		flags = sdp->isp_devparam[tgt].actv_flags;
2755 #ifdef	CAM_NEW_TRAN_CODE
2756 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2757 		cts.protocol = PROTO_SCSI;
2758 		cts.transport = XPORT_SPI;
2759 
2760 		scsi = &cts.proto_specific.scsi;
2761 		spi = &cts.xport_specific.spi;
2762 
2763 		if (flags & DPARM_TQING) {
2764 			scsi->valid |= CTS_SCSI_VALID_TQ;
2765 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2766 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2767 		}
2768 
2769 		if (flags & DPARM_DISC) {
2770 			spi->valid |= CTS_SPI_VALID_DISC;
2771 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2772 		}
2773 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2774 		if (flags & DPARM_WIDE) {
2775 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2776 		} else {
2777 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2778 		}
2779 		if (flags & DPARM_SYNC) {
2780 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2781 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2782 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2783 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2784 		}
2785 #else
2786 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2787 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2788 		if (flags & DPARM_DISC) {
2789 			cts.flags |= CCB_TRANS_DISC_ENB;
2790 		}
2791 		if (flags & DPARM_TQING) {
2792 			cts.flags |= CCB_TRANS_TAG_ENB;
2793 		}
2794 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2795 		cts.bus_width = (flags & DPARM_WIDE)?
2796 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2797 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2798 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2799 		if (flags & DPARM_SYNC) {
2800 			cts.valid |=
2801 			    CCB_TRANS_SYNC_RATE_VALID |
2802 			    CCB_TRANS_SYNC_OFFSET_VALID;
2803 		}
2804 #endif
2805 		isp_prt(isp, ISP_LOGDEBUG2,
2806 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2807 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2808 		    sdp->isp_devparam[tgt].actv_offset, flags);
2809 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2810 		ISPLOCK_2_CAMLOCK(isp);
2811 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2812 		xpt_free_path(tmppath);
2813 		CAMLOCK_2_ISPLOCK(isp);
2814 		break;
2815 	}
2816 	case ISPASYNC_BUS_RESET:
2817 		bus = *((int *)arg);
2818 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2819 		    bus);
2820 		if (bus > 0 && isp->isp_path2) {
2821 			ISPLOCK_2_CAMLOCK(isp);
2822 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2823 			CAMLOCK_2_ISPLOCK(isp);
2824 		} else if (isp->isp_path) {
2825 			ISPLOCK_2_CAMLOCK(isp);
2826 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2827 			CAMLOCK_2_ISPLOCK(isp);
2828 		}
2829 		break;
2830 	case ISPASYNC_LIP:
2831 		if (isp->isp_path) {
2832 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2833 		}
2834 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2835 		break;
2836 	case ISPASYNC_LOOP_RESET:
2837 		if (isp->isp_path) {
2838 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2839 		}
2840 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2841 		break;
2842 	case ISPASYNC_LOOP_DOWN:
2843 		if (isp->isp_path) {
2844 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2845 		}
2846 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2847 		break;
2848 	case ISPASYNC_LOOP_UP:
2849 		/*
2850 		 * Now we just note that Loop has come up. We don't
2851 		 * actually do anything because we're waiting for a
2852 		 * Change Notify before activating the FC cleanup
2853 		 * thread to look at the state of the loop again.
2854 		 */
2855 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2856 		break;
2857 	case ISPASYNC_PROMENADE:
2858 	{
2859 		struct cam_path *tmppath;
2860 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2861 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2862 		static const char *roles[4] = {
2863 		    "(none)", "Target", "Initiator", "Target/Initiator"
2864 		};
2865 		fcparam *fcp = isp->isp_param;
2866 		int tgt = *((int *) arg);
2867 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2868 		struct lportdb *lp = &fcp->portdb[tgt];
2869 
2870 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2871 		    roles[lp->roles & 0x3],
2872 		    (lp->valid)? "Arrived" : "Departed",
2873 		    (u_int32_t) (lp->port_wwn >> 32),
2874 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2875 		    (u_int32_t) (lp->node_wwn >> 32),
2876 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2877 
2878 		ISPLOCK_2_CAMLOCK(isp);
2879 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2880 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2881 			CAMLOCK_2_ISPLOCK(isp);
2882                         break;
2883                 }
2884 		/*
2885 		 * Policy: only announce targets.
2886 		 */
2887 		if (lp->roles & is_tgt_mask) {
2888 			if (lp->valid) {
2889 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2890 			} else {
2891 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2892 			}
2893 		}
2894 		xpt_free_path(tmppath);
2895 		CAMLOCK_2_ISPLOCK(isp);
2896 		break;
2897 	}
2898 	case ISPASYNC_CHANGE_NOTIFY:
2899 		if (arg == ISPASYNC_CHANGE_PDB) {
2900 			isp_prt(isp, ISP_LOGINFO,
2901 			    "Port Database Changed");
2902 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2903 			isp_prt(isp, ISP_LOGINFO,
2904 			    "Name Server Database Changed");
2905 		}
2906 #ifdef	ISP_SMPLOCK
2907 		cv_signal(&isp->isp_osinfo.kthread_cv);
2908 #else
2909 		wakeup(&isp->isp_osinfo.kthread_cv);
2910 #endif
2911 		break;
2912 	case ISPASYNC_FABRIC_DEV:
2913 	{
2914 		int target, base, lim;
2915 		fcparam *fcp = isp->isp_param;
2916 		struct lportdb *lp = NULL;
2917 		struct lportdb *clp = (struct lportdb *) arg;
2918 		char *pt;
2919 
2920 		switch (clp->port_type) {
2921 		case 1:
2922 			pt = "   N_Port";
2923 			break;
2924 		case 2:
2925 			pt = "  NL_Port";
2926 			break;
2927 		case 3:
2928 			pt = "F/NL_Port";
2929 			break;
2930 		case 0x7f:
2931 			pt = "  Nx_Port";
2932 			break;
2933 		case 0x81:
2934 			pt = "  F_port";
2935 			break;
2936 		case 0x82:
2937 			pt = "  FL_Port";
2938 			break;
2939 		case 0x84:
2940 			pt = "   E_port";
2941 			break;
2942 		default:
2943 			pt = " ";
2944 			break;
2945 		}
2946 
2947 		isp_prt(isp, ISP_LOGINFO,
2948 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2949 
2950 		/*
2951 		 * If we don't have an initiator role we bail.
2952 		 *
2953 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2954 		 */
2955 
2956 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2957 			break;
2958 		}
2959 
2960 		/*
2961 		 * Is this entry for us? If so, we bail.
2962 		 */
2963 
2964 		if (fcp->isp_portid == clp->portid) {
2965 			break;
2966 		}
2967 
2968 		/*
2969 		 * Else, the default policy is to find room for it in
2970 		 * our local port database. Later, when we execute
2971 		 * the call to isp_pdb_sync either this newly arrived
2972 		 * or already logged in device will be (re)announced.
2973 		 */
2974 
2975 		if (fcp->isp_topo == TOPO_FL_PORT)
2976 			base = FC_SNS_ID+1;
2977 		else
2978 			base = 0;
2979 
2980 		if (fcp->isp_topo == TOPO_N_PORT)
2981 			lim = 1;
2982 		else
2983 			lim = MAX_FC_TARG;
2984 
2985 		/*
2986 		 * Is it already in our list?
2987 		 */
2988 		for (target = base; target < lim; target++) {
2989 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2990 				continue;
2991 			}
2992 			lp = &fcp->portdb[target];
2993 			if (lp->port_wwn == clp->port_wwn &&
2994 			    lp->node_wwn == clp->node_wwn) {
2995 				lp->fabric_dev = 1;
2996 				break;
2997 			}
2998 		}
2999 		if (target < lim) {
3000 			break;
3001 		}
3002 		for (target = base; target < lim; target++) {
3003 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
3004 				continue;
3005 			}
3006 			lp = &fcp->portdb[target];
3007 			if (lp->port_wwn == 0) {
3008 				break;
3009 			}
3010 		}
3011 		if (target == lim) {
3012 			isp_prt(isp, ISP_LOGWARN,
3013 			    "out of space for fabric devices");
3014 			break;
3015 		}
3016 		lp->port_type = clp->port_type;
3017 		lp->fc4_type = clp->fc4_type;
3018 		lp->node_wwn = clp->node_wwn;
3019 		lp->port_wwn = clp->port_wwn;
3020 		lp->portid = clp->portid;
3021 		lp->fabric_dev = 1;
3022 		break;
3023 	}
3024 #ifdef	ISP_TARGET_MODE
3025 	case ISPASYNC_TARGET_MESSAGE:
3026 	{
3027 		tmd_msg_t *mp = arg;
3028 		isp_prt(isp, ISP_LOGALL,
3029 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3030 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3031 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3032 		    mp->nt_msg[0]);
3033 		break;
3034 	}
3035 	case ISPASYNC_TARGET_EVENT:
3036 	{
3037 		tmd_event_t *ep = arg;
3038 		if (ep->ev_event == ASYNC_CTIO_DONE) {
3039 			/*
3040 			 * ACK the interrupt first
3041 			 */
3042 			ISP_WRITE(isp, BIU_SEMA, 0);
3043 			ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT);
3044 			isp_handle_platform_ctio_fastpost(isp, ep->ev_bus);
3045 			break;
3046 		}
3047 		isp_prt(isp, ISP_LOGALL,
3048 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3049 		break;
3050 	}
3051 	case ISPASYNC_TARGET_ACTION:
3052 		switch (((isphdr_t *)arg)->rqs_entry_type) {
3053 		default:
3054 			isp_prt(isp, ISP_LOGWARN,
3055 			   "event 0x%x for unhandled target action",
3056 			    ((isphdr_t *)arg)->rqs_entry_type);
3057 			break;
3058 		case RQSTYPE_NOTIFY:
3059 			if (IS_SCSI(isp)) {
3060 				rv = isp_handle_platform_notify_scsi(isp,
3061 				    (in_entry_t *) arg);
3062 			} else {
3063 				rv = isp_handle_platform_notify_fc(isp,
3064 				    (in_fcentry_t *) arg);
3065 			}
3066 			break;
3067 		case RQSTYPE_ATIO:
3068 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3069 			break;
3070 		case RQSTYPE_ATIO2:
3071 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3072 			break;
3073 		case RQSTYPE_CTIO2:
3074 		case RQSTYPE_CTIO:
3075 			rv = isp_handle_platform_ctio(isp, arg);
3076 			break;
3077 		case RQSTYPE_ENABLE_LUN:
3078 		case RQSTYPE_MODIFY_LUN:
3079 			isp_ledone(isp, (lun_entry_t *) arg);
3080 			break;
3081 		}
3082 		break;
3083 #endif
3084 	case ISPASYNC_FW_CRASH:
3085 	{
3086 		u_int16_t mbox1, mbox6;
3087 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3088 		if (IS_DUALBUS(isp)) {
3089 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3090 		} else {
3091 			mbox6 = 0;
3092 		}
3093                 isp_prt(isp, ISP_LOGERR,
3094                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3095                     mbox6, mbox1);
3096 #ifdef	ISP_FW_CRASH_DUMP
3097 		/*
3098 		 * XXX: really need a thread to do this right.
3099 		 */
3100 		if (IS_FC(isp)) {
3101 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3102 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3103 			isp_freeze_loopdown(isp, "f/w crash");
3104 			isp_fw_dump(isp);
3105 		}
3106 		isp_reinit(isp);
3107 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3108 #endif
3109 		break;
3110 	}
3111 	case ISPASYNC_UNHANDLED_RESPONSE:
3112 		break;
3113 	default:
3114 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3115 		break;
3116 	}
3117 	return (rv);
3118 }
3119 
3120 
3121 /*
3122  * Locks are held before coming here.
3123  */
3124 void
3125 isp_uninit(struct ispsoftc *isp)
3126 {
3127 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3128 	DISABLE_INTS(isp);
3129 }
3130 
3131 void
3132 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3133 {
3134 	va_list ap;
3135 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3136 		return;
3137 	}
3138 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3139 	va_start(ap, fmt);
3140 	vprintf(fmt, ap);
3141 	va_end(ap);
3142 	printf("\n");
3143 }
3144