xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 71fe318b852b8dfb3e799cb12ef184750f7f8eac)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/module.h>
34 #include <sys/ioccom.h>
35 #include <dev/isp/isp_ioctl.h>
36 
37 
38 MODULE_VERSION(isp, 1);
39 int isp_announced = 0;
40 ispfwfunc *isp_get_firmware_p = NULL;
41 
42 static d_ioctl_t ispioctl;
43 static void isp_intr_enable(void *);
44 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
45 static void isp_poll(struct cam_sim *);
46 static timeout_t isp_watchdog;
47 static void isp_kthread(void *);
48 static void isp_action(struct cam_sim *, union ccb *);
49 
50 
51 #define ISP_CDEV_MAJOR	248
52 static struct cdevsw isp_cdevsw = {
53 	/* open */	nullopen,
54 	/* close */	nullclose,
55 	/* read */	noread,
56 	/* write */	nowrite,
57 	/* ioctl */	ispioctl,
58 	/* poll */	nopoll,
59 	/* mmap */	nommap,
60 	/* strategy */	nostrategy,
61 	/* name */	"isp",
62 	/* maj */	ISP_CDEV_MAJOR,
63 	/* dump */	nodump,
64 	/* psize */	nopsize,
65 	/* flags */	D_TAPE,
66 };
67 
68 static struct ispsoftc *isplist = NULL;
69 
70 void
71 isp_attach(struct ispsoftc *isp)
72 {
73 	int primary, secondary;
74 	struct ccb_setasync csa;
75 	struct cam_devq *devq;
76 	struct cam_sim *sim;
77 	struct cam_path *path;
78 
79 	/*
80 	 * Establish (in case of 12X0) which bus is the primary.
81 	 */
82 
83 	primary = 0;
84 	secondary = 1;
85 
86 	/*
87 	 * Create the device queue for our SIM(s).
88 	 */
89 	devq = cam_simq_alloc(isp->isp_maxcmds);
90 	if (devq == NULL) {
91 		return;
92 	}
93 
94 	/*
95 	 * Construct our SIM entry.
96 	 */
97 	ISPLOCK_2_CAMLOCK(isp);
98 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
99 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
100 	if (sim == NULL) {
101 		cam_simq_free(devq);
102 		CAMLOCK_2_ISPLOCK(isp);
103 		return;
104 	}
105 	CAMLOCK_2_ISPLOCK(isp);
106 
107 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
108 	isp->isp_osinfo.ehook.ich_arg = isp;
109 	ISPLOCK_2_CAMLOCK(isp);
110 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
111 		cam_sim_free(sim, TRUE);
112 		CAMLOCK_2_ISPLOCK(isp);
113 		isp_prt(isp, ISP_LOGERR,
114 		    "could not establish interrupt enable hook");
115 		return;
116 	}
117 
118 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
119 		cam_sim_free(sim, TRUE);
120 		CAMLOCK_2_ISPLOCK(isp);
121 		return;
122 	}
123 
124 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
125 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
126 		xpt_bus_deregister(cam_sim_path(sim));
127 		cam_sim_free(sim, TRUE);
128 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
129 		CAMLOCK_2_ISPLOCK(isp);
130 		return;
131 	}
132 
133 	xpt_setup_ccb(&csa.ccb_h, path, 5);
134 	csa.ccb_h.func_code = XPT_SASYNC_CB;
135 	csa.event_enable = AC_LOST_DEVICE;
136 	csa.callback = isp_cam_async;
137 	csa.callback_arg = sim;
138 	xpt_action((union ccb *)&csa);
139 	CAMLOCK_2_ISPLOCK(isp);
140 	isp->isp_sim = sim;
141 	isp->isp_path = path;
142 	/*
143 	 * Create a kernel thread for fibre channel instances. We
144 	 * don't have dual channel FC cards.
145 	 */
146 	if (IS_FC(isp)) {
147 		ISPLOCK_2_CAMLOCK(isp);
148 		/* XXX: LOCK VIOLATION */
149 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
150 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
151 		    RFHIGHPID, 0, "%s: fc_thrd",
152 		    device_get_nameunit(isp->isp_dev))) {
153 			xpt_bus_deregister(cam_sim_path(sim));
154 			cam_sim_free(sim, TRUE);
155 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
156 			CAMLOCK_2_ISPLOCK(isp);
157 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
158 			return;
159 		}
160 		CAMLOCK_2_ISPLOCK(isp);
161 	}
162 
163 
164 	/*
165 	 * If we have a second channel, construct SIM entry for that.
166 	 */
167 	if (IS_DUALBUS(isp)) {
168 		ISPLOCK_2_CAMLOCK(isp);
169 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
170 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
171 		if (sim == NULL) {
172 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
173 			xpt_free_path(isp->isp_path);
174 			cam_simq_free(devq);
175 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
176 			return;
177 		}
178 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
179 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
180 			xpt_free_path(isp->isp_path);
181 			cam_sim_free(sim, TRUE);
182 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
183 			CAMLOCK_2_ISPLOCK(isp);
184 			return;
185 		}
186 
187 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
188 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
189 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
190 			xpt_free_path(isp->isp_path);
191 			xpt_bus_deregister(cam_sim_path(sim));
192 			cam_sim_free(sim, TRUE);
193 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
194 			CAMLOCK_2_ISPLOCK(isp);
195 			return;
196 		}
197 
198 		xpt_setup_ccb(&csa.ccb_h, path, 5);
199 		csa.ccb_h.func_code = XPT_SASYNC_CB;
200 		csa.event_enable = AC_LOST_DEVICE;
201 		csa.callback = isp_cam_async;
202 		csa.callback_arg = sim;
203 		xpt_action((union ccb *)&csa);
204 		CAMLOCK_2_ISPLOCK(isp);
205 		isp->isp_sim2 = sim;
206 		isp->isp_path2 = path;
207 	}
208 
209 #ifdef	ISP_TARGET_MODE
210 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
211 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
212 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
213 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
214 #endif
215 	/*
216 	 * Create device nodes
217 	 */
218 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
219 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
220 
221 	if (isp->isp_role != ISP_ROLE_NONE) {
222 		isp->isp_state = ISP_RUNSTATE;
223 		ENABLE_INTS(isp);
224 	}
225 	if (isplist == NULL) {
226 		isplist = isp;
227 	} else {
228 		struct ispsoftc *tmp = isplist;
229 		while (tmp->isp_osinfo.next) {
230 			tmp = tmp->isp_osinfo.next;
231 		}
232 		tmp->isp_osinfo.next = isp;
233 	}
234 
235 }
236 
237 static INLINE void
238 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
239 {
240 	if (isp->isp_osinfo.simqfrozen == 0) {
241 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
242 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
243 		ISPLOCK_2_CAMLOCK(isp);
244 		xpt_freeze_simq(isp->isp_sim, 1);
245 		CAMLOCK_2_ISPLOCK(isp);
246 	} else {
247 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
248 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
249 	}
250 }
251 
252 static int
253 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
254 {
255 	struct ispsoftc *isp;
256 	int retval = ENOTTY;
257 
258 	isp = isplist;
259 	while (isp) {
260 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
261 			break;
262 		}
263 		isp = isp->isp_osinfo.next;
264 	}
265 	if (isp == NULL)
266 		return (ENXIO);
267 
268 	switch (cmd) {
269 #ifdef	ISP_FW_CRASH_DUMP
270 	case ISP_GET_FW_CRASH_DUMP:
271 	{
272 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
273 		size_t sz;
274 
275 		retval = 0;
276 		if (IS_2200(isp))
277 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
278 		else
279 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
280 		ISP_LOCK(isp);
281 		if (ptr && *ptr) {
282 			void *uaddr = *((void **) addr);
283 			if (copyout(ptr, uaddr, sz)) {
284 				retval = EFAULT;
285 			} else {
286 				*ptr = 0;
287 			}
288 		} else {
289 			retval = ENXIO;
290 		}
291 		ISP_UNLOCK(isp);
292 		break;
293 	}
294 
295 	case ISP_FORCE_CRASH_DUMP:
296 		ISP_LOCK(isp);
297 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
298 		isp_fw_dump(isp);
299 		isp_reinit(isp);
300 		ISP_UNLOCK(isp);
301 		retval = 0;
302 		break;
303 #endif
304 	case ISP_SDBLEV:
305 	{
306 		int olddblev = isp->isp_dblev;
307 		isp->isp_dblev = *(int *)addr;
308 		*(int *)addr = olddblev;
309 		retval = 0;
310 		break;
311 	}
312 	case ISP_RESETHBA:
313 		ISP_LOCK(isp);
314 		isp_reinit(isp);
315 		ISP_UNLOCK(isp);
316 		retval = 0;
317 		break;
318 	case ISP_RESCAN:
319 		if (IS_FC(isp)) {
320 			ISP_LOCK(isp);
321 			if (isp_fc_runstate(isp, 5 * 1000000)) {
322 				retval = EIO;
323 			} else {
324 				retval = 0;
325 			}
326 			ISP_UNLOCK(isp);
327 		}
328 		break;
329 	case ISP_FC_LIP:
330 		if (IS_FC(isp)) {
331 			ISP_LOCK(isp);
332 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
333 				retval = EIO;
334 			} else {
335 				retval = 0;
336 			}
337 			ISP_UNLOCK(isp);
338 		}
339 		break;
340 	case ISP_FC_GETDINFO:
341 	{
342 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
343 		struct lportdb *lp;
344 
345 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
346 			retval = EINVAL;
347 			break;
348 		}
349 		ISP_LOCK(isp);
350 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
351 		if (lp->valid) {
352 			ifc->loopid = lp->loopid;
353 			ifc->portid = lp->portid;
354 			ifc->node_wwn = lp->node_wwn;
355 			ifc->port_wwn = lp->port_wwn;
356 			retval = 0;
357 		} else {
358 			retval = ENODEV;
359 		}
360 		ISP_UNLOCK(isp);
361 		break;
362 	}
363 	case ISP_GET_STATS:
364 	{
365 		isp_stats_t *sp = (isp_stats_t *) addr;
366 
367 		MEMZERO(sp, sizeof (*sp));
368 		sp->isp_stat_version = ISP_STATS_VERSION;
369 		sp->isp_type = isp->isp_type;
370 		sp->isp_revision = isp->isp_revision;
371 		ISP_LOCK(isp);
372 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
373 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
374 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
375 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
376 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
377 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
378 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
379 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
380 		ISP_UNLOCK(isp);
381 		retval = 0;
382 		break;
383 	}
384 	case ISP_CLR_STATS:
385 		ISP_LOCK(isp);
386 		isp->isp_intcnt = 0;
387 		isp->isp_intbogus = 0;
388 		isp->isp_intmboxc = 0;
389 		isp->isp_intoasync = 0;
390 		isp->isp_rsltccmplt = 0;
391 		isp->isp_fphccmplt = 0;
392 		isp->isp_rscchiwater = 0;
393 		isp->isp_fpcchiwater = 0;
394 		ISP_UNLOCK(isp);
395 		retval = 0;
396 		break;
397 	case ISP_FC_GETHINFO:
398 	{
399 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
400 		MEMZERO(hba, sizeof (*hba));
401 		ISP_LOCK(isp);
402 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
403 		hba->fc_scsi_supported = 1;
404 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
405 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
406 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
407 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
408 		ISP_UNLOCK(isp);
409 		retval = 0;
410 		break;
411 	}
412 	case ISP_GET_FC_PARAM:
413 	{
414 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
415 
416 		if (!IS_FC(isp)) {
417 			retval = EINVAL;
418 			break;
419 		}
420 		f->parameter = 0;
421 		if (strcmp(f->param_name, "framelength") == 0) {
422 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
423 			retval = 0;
424 			break;
425 		}
426 		if (strcmp(f->param_name, "exec_throttle") == 0) {
427 			f->parameter = FCPARAM(isp)->isp_execthrottle;
428 			retval = 0;
429 			break;
430 		}
431 		if (strcmp(f->param_name, "fullduplex") == 0) {
432 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
433 				f->parameter = 1;
434 			retval = 0;
435 			break;
436 		}
437 		if (strcmp(f->param_name, "loopid") == 0) {
438 			f->parameter = FCPARAM(isp)->isp_loopid;
439 			retval = 0;
440 			break;
441 		}
442 		retval = EINVAL;
443 		break;
444 	}
445 	case ISP_SET_FC_PARAM:
446 	{
447 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
448 		u_int32_t param = f->parameter;
449 
450 		if (!IS_FC(isp)) {
451 			retval = EINVAL;
452 			break;
453 		}
454 		f->parameter = 0;
455 		if (strcmp(f->param_name, "framelength") == 0) {
456 			if (param != 512 && param != 1024 && param != 1024) {
457 				retval = EINVAL;
458 				break;
459 			}
460 			FCPARAM(isp)->isp_maxfrmlen = param;
461 			retval = 0;
462 			break;
463 		}
464 		if (strcmp(f->param_name, "exec_throttle") == 0) {
465 			if (param < 16 || param > 255) {
466 				retval = EINVAL;
467 				break;
468 			}
469 			FCPARAM(isp)->isp_execthrottle = param;
470 			retval = 0;
471 			break;
472 		}
473 		if (strcmp(f->param_name, "fullduplex") == 0) {
474 			if (param != 0 && param != 1) {
475 				retval = EINVAL;
476 				break;
477 			}
478 			if (param) {
479 				FCPARAM(isp)->isp_fwoptions |=
480 				    ICBOPT_FULL_DUPLEX;
481 			} else {
482 				FCPARAM(isp)->isp_fwoptions &=
483 				    ~ICBOPT_FULL_DUPLEX;
484 			}
485 			retval = 0;
486 			break;
487 		}
488 		if (strcmp(f->param_name, "loopid") == 0) {
489 			if (param < 0 || param > 125) {
490 				retval = EINVAL;
491 				break;
492 			}
493 			FCPARAM(isp)->isp_loopid = param;
494 			retval = 0;
495 			break;
496 		}
497 		retval = EINVAL;
498 		break;
499 	}
500 	default:
501 		break;
502 	}
503 	return (retval);
504 }
505 
506 static void
507 isp_intr_enable(void *arg)
508 {
509 	struct ispsoftc *isp = arg;
510 	if (isp->isp_role != ISP_ROLE_NONE) {
511 		ENABLE_INTS(isp);
512 		isp->isp_osinfo.intsok = 1;
513 	}
514 	/* Release our hook so that the boot can continue. */
515 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
516 }
517 
518 /*
519  * Put the target mode functions here, because some are inlines
520  */
521 
522 #ifdef	ISP_TARGET_MODE
523 
524 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
525 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
526 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
527 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
528 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
529 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
530 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
531 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
532 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
533 static cam_status
534 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
535 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
536 static void isp_en_lun(struct ispsoftc *, union ccb *);
537 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
538 static timeout_t isp_refire_putback_atio;
539 static void isp_complete_ctio(union ccb *);
540 static void isp_target_putback_atio(union ccb *);
541 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
542 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
543 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
544 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
545 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
546 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
547 
548 static INLINE int
549 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
550 {
551 	tstate_t *tptr;
552 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
553 	if (tptr == NULL) {
554 		return (0);
555 	}
556 	do {
557 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
558 			return (1);
559 		}
560 	} while ((tptr = tptr->next) != NULL);
561 	return (0);
562 }
563 
564 static INLINE int
565 are_any_luns_enabled(struct ispsoftc *isp, int port)
566 {
567 	int lo, hi;
568 	if (IS_DUALBUS(isp)) {
569 		lo = (port * (LUN_HASH_SIZE >> 1));
570 		hi = lo + (LUN_HASH_SIZE >> 1);
571 	} else {
572 		lo = 0;
573 		hi = LUN_HASH_SIZE;
574 	}
575 	for (lo = 0; lo < hi; lo++) {
576 		if (isp->isp_osinfo.lun_hash[lo]) {
577 			return (1);
578 		}
579 	}
580 	return (0);
581 }
582 
583 static INLINE tstate_t *
584 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
585 {
586 	tstate_t *tptr = NULL;
587 
588 	if (lun == CAM_LUN_WILDCARD) {
589 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
590 			tptr = &isp->isp_osinfo.tsdflt[bus];
591 			tptr->hold++;
592 			return (tptr);
593 		}
594 	} else {
595 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
596 		if (tptr == NULL) {
597 			return (NULL);
598 		}
599 	}
600 
601 	do {
602 		if (tptr->lun == lun && tptr->bus == bus) {
603 			tptr->hold++;
604 			return (tptr);
605 		}
606 	} while ((tptr = tptr->next) != NULL);
607 	return (tptr);
608 }
609 
610 static INLINE void
611 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
612 {
613 	if (tptr->hold)
614 		tptr->hold--;
615 }
616 
617 static INLINE int
618 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
619 {
620 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
621 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
622 #ifdef	ISP_SMPLOCK
623 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
624 			return (-1);
625 		}
626 #else
627 		if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) {
628 			return (-1);
629 		}
630 #endif
631 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
632 	}
633 	return (0);
634 }
635 
636 static INLINE int
637 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
638 {
639 #ifdef	ISP_SMPLOCK
640 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
641 		return (-1);
642 	}
643 #else
644 	if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) {
645 		return (-1);
646 	}
647 #endif
648 	return (0);
649 }
650 
651 static INLINE void
652 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
653 {
654 	isp->isp_osinfo.rstatus[bus] = status;
655 #ifdef	ISP_SMPLOCK
656 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
657 #else
658 	wakeup(&isp->isp_osinfo.tgtcv1[bus]);
659 #endif
660 }
661 
662 static INLINE void
663 isp_vsema_rqe(struct ispsoftc *isp, int bus)
664 {
665 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
666 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
667 #ifdef	ISP_SMPLOCK
668 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
669 #else
670 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
671 #endif
672 	}
673 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
674 }
675 
676 static INLINE atio_private_data_t *
677 isp_get_atpd(struct ispsoftc *isp, int tag)
678 {
679 	atio_private_data_t *atp;
680 	for (atp = isp->isp_osinfo.atpdp;
681 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
682 		if (atp->tag == tag)
683 			return (atp);
684 	}
685 	return (NULL);
686 }
687 
688 static cam_status
689 create_lun_state(struct ispsoftc *isp, int bus,
690     struct cam_path *path, tstate_t **rslt)
691 {
692 	cam_status status;
693 	lun_id_t lun;
694 	int hfx;
695 	tstate_t *tptr, *new;
696 
697 	lun = xpt_path_lun_id(path);
698 	if (lun < 0) {
699 		return (CAM_LUN_INVALID);
700 	}
701 	if (is_lun_enabled(isp, bus, lun)) {
702 		return (CAM_LUN_ALRDY_ENA);
703 	}
704 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
705 	if (new == NULL) {
706 		return (CAM_RESRC_UNAVAIL);
707 	}
708 
709 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
710 	    xpt_path_target_id(path), xpt_path_lun_id(path));
711 	if (status != CAM_REQ_CMP) {
712 		free(new, M_DEVBUF);
713 		return (status);
714 	}
715 	new->bus = bus;
716 	new->lun = lun;
717 	SLIST_INIT(&new->atios);
718 	SLIST_INIT(&new->inots);
719 	new->hold = 1;
720 
721 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
722 	tptr = isp->isp_osinfo.lun_hash[hfx];
723 	if (tptr == NULL) {
724 		isp->isp_osinfo.lun_hash[hfx] = new;
725 	} else {
726 		while (tptr->next)
727 			tptr = tptr->next;
728 		tptr->next = new;
729 	}
730 	*rslt = new;
731 	return (CAM_REQ_CMP);
732 }
733 
734 static INLINE void
735 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
736 {
737 	int hfx;
738 	tstate_t *lw, *pw;
739 
740 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
741 	if (tptr->hold) {
742 		return;
743 	}
744 	pw = isp->isp_osinfo.lun_hash[hfx];
745 	if (pw == NULL) {
746 		return;
747 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
748 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
749 	} else {
750 		lw = pw;
751 		pw = lw->next;
752 		while (pw) {
753 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
754 				lw->next = pw->next;
755 				break;
756 			}
757 			lw = pw;
758 			pw = pw->next;
759 		}
760 		if (pw == NULL) {
761 			return;
762 		}
763 	}
764 	free(tptr, M_DEVBUF);
765 }
766 
767 /*
768  * we enter with our locks held.
769  */
770 static void
771 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
772 {
773 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
774 	struct ccb_en_lun *cel = &ccb->cel;
775 	tstate_t *tptr;
776 	u_int16_t rstat;
777 	int bus, cmd, av, wildcard;
778 	lun_id_t lun;
779 	target_id_t tgt;
780 
781 
782 	bus = XS_CHANNEL(ccb) & 0x1;
783 	tgt = ccb->ccb_h.target_id;
784 	lun = ccb->ccb_h.target_lun;
785 
786 	/*
787 	 * Do some sanity checking first.
788 	 */
789 
790 	if ((lun != CAM_LUN_WILDCARD) &&
791 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
792 		ccb->ccb_h.status = CAM_LUN_INVALID;
793 		return;
794 	}
795 
796 	if (IS_SCSI(isp)) {
797 		sdparam *sdp = isp->isp_param;
798 		sdp += bus;
799 		if (tgt != CAM_TARGET_WILDCARD &&
800 		    tgt != sdp->isp_initiator_id) {
801 			ccb->ccb_h.status = CAM_TID_INVALID;
802 			return;
803 		}
804 	} else {
805 		if (tgt != CAM_TARGET_WILDCARD &&
806 		    tgt != FCPARAM(isp)->isp_iid) {
807 			ccb->ccb_h.status = CAM_TID_INVALID;
808 			return;
809 		}
810 		/*
811 		 * This is as a good a place as any to check f/w capabilities.
812 		 */
813 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
814 			isp_prt(isp, ISP_LOGERR,
815 			    "firmware does not support target mode");
816 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
817 			return;
818 		}
819 		/*
820 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
821 		 * XXX: dorks with our already fragile enable/disable code.
822 		 */
823 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
824 			isp_prt(isp, ISP_LOGERR,
825 			    "firmware not SCCLUN capable");
826 		}
827 	}
828 
829 	if (tgt == CAM_TARGET_WILDCARD) {
830 		if (lun == CAM_LUN_WILDCARD) {
831 			wildcard = 1;
832 		} else {
833 			ccb->ccb_h.status = CAM_LUN_INVALID;
834 			return;
835 		}
836 	} else {
837 		wildcard = 0;
838 	}
839 
840 	/*
841 	 * Next check to see whether this is a target/lun wildcard action.
842 	 *
843 	 * If so, we know that we can accept commands for luns that haven't
844 	 * been enabled yet and send them upstream. Otherwise, we have to
845 	 * handle them locally (if we see them at all).
846 	 */
847 
848 	if (wildcard) {
849 		tptr = &isp->isp_osinfo.tsdflt[bus];
850 		if (cel->enable) {
851 			if (isp->isp_osinfo.tmflags[bus] &
852 			    TM_WILDCARD_ENABLED) {
853 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
854 				return;
855 			}
856 			ccb->ccb_h.status =
857 			    xpt_create_path(&tptr->owner, NULL,
858 			    xpt_path_path_id(ccb->ccb_h.path),
859 			    xpt_path_target_id(ccb->ccb_h.path),
860 			    xpt_path_lun_id(ccb->ccb_h.path));
861 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
862 				return;
863 			}
864 			SLIST_INIT(&tptr->atios);
865 			SLIST_INIT(&tptr->inots);
866 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
867 		} else {
868 			if ((isp->isp_osinfo.tmflags[bus] &
869 			    TM_WILDCARD_ENABLED) == 0) {
870 				ccb->ccb_h.status = CAM_REQ_CMP;
871 				return;
872 			}
873 			if (tptr->hold) {
874 				ccb->ccb_h.status = CAM_SCSI_BUSY;
875 				return;
876 			}
877 			xpt_free_path(tptr->owner);
878 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
879 		}
880 	}
881 
882 	/*
883 	 * Now check to see whether this bus needs to be
884 	 * enabled/disabled with respect to target mode.
885 	 */
886 	av = bus << 31;
887 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
888 		av |= ENABLE_TARGET_FLAG;
889 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
890 		if (av) {
891 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
892 			if (wildcard) {
893 				isp->isp_osinfo.tmflags[bus] &=
894 				    ~TM_WILDCARD_ENABLED;
895 				xpt_free_path(tptr->owner);
896 			}
897 			return;
898 		}
899 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
900 		isp_prt(isp, ISP_LOGINFO,
901 		    "Target Mode enabled on channel %d", bus);
902 	} else if (cel->enable == 0 &&
903 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
904 		if (are_any_luns_enabled(isp, bus)) {
905 			ccb->ccb_h.status = CAM_SCSI_BUSY;
906 			return;
907 		}
908 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
909 		if (av) {
910 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
911 			return;
912 		}
913 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
914 		isp_prt(isp, ISP_LOGINFO,
915 		    "Target Mode disabled on channel %d", bus);
916 	}
917 
918 	if (wildcard) {
919 		ccb->ccb_h.status = CAM_REQ_CMP;
920 		return;
921 	}
922 
923 	if (cel->enable) {
924 		ccb->ccb_h.status =
925 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
926 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
927 			return;
928 		}
929 	} else {
930 		tptr = get_lun_statep(isp, bus, lun);
931 		if (tptr == NULL) {
932 			ccb->ccb_h.status = CAM_LUN_INVALID;
933 			return;
934 		}
935 	}
936 
937 	if (isp_psema_sig_rqe(isp, bus)) {
938 		rls_lun_statep(isp, tptr);
939 		if (cel->enable)
940 			destroy_lun_state(isp, tptr);
941 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
942 		return;
943 	}
944 
945 	if (cel->enable) {
946 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
947 		int c, n, ulun = lun;
948 
949 		cmd = RQSTYPE_ENABLE_LUN;
950 		c = DFLT_CMND_CNT;
951 		n = DFLT_INOT_CNT;
952 		if (IS_FC(isp) && lun != 0) {
953 			cmd = RQSTYPE_MODIFY_LUN;
954 			n = 0;
955 			/*
956 		 	 * For SCC firmware, we only deal with setting
957 			 * (enabling or modifying) lun 0.
958 			 */
959 			ulun = 0;
960 		}
961 		rstat = LUN_ERR;
962 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
963 			xpt_print_path(ccb->ccb_h.path);
964 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
965 			goto out;
966 		}
967 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
968 			xpt_print_path(ccb->ccb_h.path);
969 			isp_prt(isp, ISP_LOGERR,
970 			    "wait for ENABLE/MODIFY LUN timed out");
971 			goto out;
972 		}
973 		rstat = isp->isp_osinfo.rstatus[bus];
974 		if (rstat != LUN_OK) {
975 			xpt_print_path(ccb->ccb_h.path);
976 			isp_prt(isp, ISP_LOGERR,
977 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
978 			goto out;
979 		}
980 	} else {
981 		int c, n, ulun = lun;
982 		u_int32_t seq;
983 
984 		rstat = LUN_ERR;
985 		seq = isp->isp_osinfo.rollinfo++;
986 		cmd = -RQSTYPE_MODIFY_LUN;
987 
988 		c = DFLT_CMND_CNT;
989 		n = DFLT_INOT_CNT;
990 		if (IS_FC(isp) && lun != 0) {
991 			n = 0;
992 			/*
993 		 	 * For SCC firmware, we only deal with setting
994 			 * (enabling or modifying) lun 0.
995 			 */
996 			ulun = 0;
997 		}
998 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
999 			xpt_print_path(ccb->ccb_h.path);
1000 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1001 			goto out;
1002 		}
1003 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1004 			xpt_print_path(ccb->ccb_h.path);
1005 			isp_prt(isp, ISP_LOGERR,
1006 			    "wait for MODIFY LUN timed out");
1007 			goto out;
1008 		}
1009 		rstat = isp->isp_osinfo.rstatus[bus];
1010 		if (rstat != LUN_OK) {
1011 			xpt_print_path(ccb->ccb_h.path);
1012 			isp_prt(isp, ISP_LOGERR,
1013 			    "MODIFY LUN returned 0x%x", rstat);
1014 			goto out;
1015 		}
1016 		if (IS_FC(isp) && lun) {
1017 			goto out;
1018 		}
1019 
1020 		seq = isp->isp_osinfo.rollinfo++;
1021 
1022 		rstat = LUN_ERR;
1023 		cmd = -RQSTYPE_ENABLE_LUN;
1024 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1025 			xpt_print_path(ccb->ccb_h.path);
1026 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1027 			goto out;
1028 		}
1029 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1030 			xpt_print_path(ccb->ccb_h.path);
1031 			isp_prt(isp, ISP_LOGERR,
1032 			     "wait for DISABLE LUN timed out");
1033 			goto out;
1034 		}
1035 		rstat = isp->isp_osinfo.rstatus[bus];
1036 		if (rstat != LUN_OK) {
1037 			xpt_print_path(ccb->ccb_h.path);
1038 			isp_prt(isp, ISP_LOGWARN,
1039 			    "DISABLE LUN returned 0x%x", rstat);
1040 			goto out;
1041 		}
1042 		if (are_any_luns_enabled(isp, bus) == 0) {
1043 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1044 			if (av) {
1045 				isp_prt(isp, ISP_LOGWARN,
1046 				    "disable target mode on channel %d failed",
1047 				    bus);
1048 				goto out;
1049 			}
1050 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1051 			xpt_print_path(ccb->ccb_h.path);
1052 			isp_prt(isp, ISP_LOGINFO,
1053 			    "Target Mode disabled on channel %d", bus);
1054 		}
1055 	}
1056 
1057 out:
1058 	isp_vsema_rqe(isp, bus);
1059 
1060 	if (rstat != LUN_OK) {
1061 		xpt_print_path(ccb->ccb_h.path);
1062 		isp_prt(isp, ISP_LOGWARN,
1063 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1064 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1065 		rls_lun_statep(isp, tptr);
1066 		if (cel->enable)
1067 			destroy_lun_state(isp, tptr);
1068 	} else {
1069 		xpt_print_path(ccb->ccb_h.path);
1070 		isp_prt(isp, ISP_LOGINFO, lfmt,
1071 		    (cel->enable) ? "en" : "dis", bus);
1072 		rls_lun_statep(isp, tptr);
1073 		if (cel->enable == 0) {
1074 			destroy_lun_state(isp, tptr);
1075 		}
1076 		ccb->ccb_h.status = CAM_REQ_CMP;
1077 	}
1078 }
1079 
1080 static cam_status
1081 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1082 {
1083 	tstate_t *tptr;
1084 	struct ccb_hdr_slist *lp;
1085 	struct ccb_hdr *curelm;
1086 	int found;
1087 	union ccb *accb = ccb->cab.abort_ccb;
1088 
1089 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1090 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1091 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1092 			return (CAM_PATH_INVALID);
1093 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1094 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1095 			return (CAM_PATH_INVALID);
1096 		}
1097 	}
1098 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1099 	if (tptr == NULL) {
1100 		return (CAM_PATH_INVALID);
1101 	}
1102 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1103 		lp = &tptr->atios;
1104 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1105 		lp = &tptr->inots;
1106 	} else {
1107 		rls_lun_statep(isp, tptr);
1108 		return (CAM_UA_ABORT);
1109 	}
1110 	curelm = SLIST_FIRST(lp);
1111 	found = 0;
1112 	if (curelm == &accb->ccb_h) {
1113 		found = 1;
1114 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1115 	} else {
1116 		while(curelm != NULL) {
1117 			struct ccb_hdr *nextelm;
1118 
1119 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1120 			if (nextelm == &accb->ccb_h) {
1121 				found = 1;
1122 				SLIST_NEXT(curelm, sim_links.sle) =
1123 				    SLIST_NEXT(nextelm, sim_links.sle);
1124 				break;
1125 			}
1126 			curelm = nextelm;
1127 		}
1128 	}
1129 	rls_lun_statep(isp, tptr);
1130 	if (found) {
1131 		accb->ccb_h.status = CAM_REQ_ABORTED;
1132 		return (CAM_REQ_CMP);
1133 	}
1134 	return(CAM_PATH_INVALID);
1135 }
1136 
1137 static cam_status
1138 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1139 {
1140 	void *qe;
1141 	struct ccb_scsiio *cso = &ccb->csio;
1142 	u_int16_t *hp, save_handle;
1143 	u_int16_t nxti, optr;
1144 	u_int8_t local[QENTRY_LEN];
1145 
1146 
1147 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1148 		xpt_print_path(ccb->ccb_h.path);
1149 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1150 		return (CAM_RESRC_UNAVAIL);
1151 	}
1152 	bzero(local, QENTRY_LEN);
1153 
1154 	/*
1155 	 * We're either moving data or completing a command here.
1156 	 */
1157 
1158 	if (IS_FC(isp)) {
1159 		atio_private_data_t *atp;
1160 		ct2_entry_t *cto = (ct2_entry_t *) local;
1161 
1162 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1163 		cto->ct_header.rqs_entry_count = 1;
1164 		cto->ct_iid = cso->init_id;
1165 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1166 			cto->ct_lun = ccb->ccb_h.target_lun;
1167 		}
1168 
1169 		atp = isp_get_atpd(isp, cso->tag_id);
1170 		if (atp == NULL) {
1171 			isp_prt(isp, ISP_LOGERR,
1172 			    "cannot find private data adjunct for tag %x",
1173 			    cso->tag_id);
1174 			return (-1);
1175 		}
1176 
1177 		cto->ct_rxid = cso->tag_id;
1178 		if (cso->dxfer_len == 0) {
1179 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1180 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1181 				cto->ct_flags |= CT2_SENDSTATUS;
1182 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1183 				cto->ct_resid =
1184 				    atp->orig_datalen - atp->bytes_xfered;
1185 				if (cto->ct_resid < 0) {
1186 					cto->rsp.m1.ct_scsi_status |=
1187 					    CT2_DATA_OVER;
1188 				} else if (cto->ct_resid > 0) {
1189 					cto->rsp.m1.ct_scsi_status |=
1190 					    CT2_DATA_UNDER;
1191 				}
1192 			}
1193 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1194 				int m = min(cso->sense_len, MAXRESPLEN);
1195 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1196 				cto->rsp.m1.ct_senselen = m;
1197 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1198 			}
1199 		} else {
1200 			cto->ct_flags |= CT2_FLAG_MODE0;
1201 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1202 				cto->ct_flags |= CT2_DATA_IN;
1203 			} else {
1204 				cto->ct_flags |= CT2_DATA_OUT;
1205 			}
1206 			cto->ct_reloff = atp->bytes_xfered;
1207 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1208 				cto->ct_flags |= CT2_SENDSTATUS;
1209 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1210 				cto->ct_resid =
1211 				    atp->orig_datalen -
1212 				    (atp->bytes_xfered + cso->dxfer_len);
1213 				if (cto->ct_resid < 0) {
1214 					cto->rsp.m0.ct_scsi_status |=
1215 					    CT2_DATA_OVER;
1216 				} else if (cto->ct_resid > 0) {
1217 					cto->rsp.m0.ct_scsi_status |=
1218 					    CT2_DATA_UNDER;
1219 				}
1220 			} else {
1221 				atp->last_xframt = cso->dxfer_len;
1222 			}
1223 			/*
1224 			 * If we're sending data and status back together,
1225 			 * we can't also send back sense data as well.
1226 			 */
1227 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1228 		}
1229 
1230 		if (cto->ct_flags & CT2_SENDSTATUS) {
1231 			isp_prt(isp, ISP_LOGTDEBUG0,
1232 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1233 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1234 			    cso->dxfer_len, cto->ct_resid);
1235 			cto->ct_flags |= CT2_CCINCR;
1236 			atp->state = ATPD_STATE_LAST_CTIO;
1237 		} else
1238 			atp->state = ATPD_STATE_CTIO;
1239 		cto->ct_timeout = 10;
1240 		hp = &cto->ct_syshandle;
1241 	} else {
1242 		ct_entry_t *cto = (ct_entry_t *) local;
1243 
1244 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1245 		cto->ct_header.rqs_entry_count = 1;
1246 		cto->ct_iid = cso->init_id;
1247 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1248 		cto->ct_tgt = ccb->ccb_h.target_id;
1249 		cto->ct_lun = ccb->ccb_h.target_lun;
1250 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1251 		if (AT_HAS_TAG(cso->tag_id)) {
1252 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1253 			cto->ct_flags |= CT_TQAE;
1254 		}
1255 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1256 			cto->ct_flags |= CT_NODISC;
1257 		}
1258 		if (cso->dxfer_len == 0) {
1259 			cto->ct_flags |= CT_NO_DATA;
1260 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1261 			cto->ct_flags |= CT_DATA_IN;
1262 		} else {
1263 			cto->ct_flags |= CT_DATA_OUT;
1264 		}
1265 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1266 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1267 			cto->ct_scsi_status = cso->scsi_status;
1268 			cto->ct_resid = cso->resid;
1269 			isp_prt(isp, ISP_LOGTDEBUG0,
1270 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1271 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1272 			    cso->tag_id);
1273 		}
1274 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1275 		cto->ct_timeout = 10;
1276 		hp = &cto->ct_syshandle;
1277 	}
1278 
1279 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1280 		xpt_print_path(ccb->ccb_h.path);
1281 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1282 		return (CAM_RESRC_UNAVAIL);
1283 	}
1284 
1285 
1286 	/*
1287 	 * Call the dma setup routines for this entry (and any subsequent
1288 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1289 	 * new things to play with. As with isp_start's usage of DMA setup,
1290 	 * any swizzling is done in the machine dependent layer. Because
1291 	 * of this, we put the request onto the queue area first in native
1292 	 * format.
1293 	 */
1294 
1295 	save_handle = *hp;
1296 
1297 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1298 	case CMD_QUEUED:
1299 		ISP_ADD_REQUEST(isp, nxti);
1300 		return (CAM_REQ_INPROG);
1301 
1302 	case CMD_EAGAIN:
1303 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1304 		isp_destroy_handle(isp, save_handle);
1305 		return (CAM_RESRC_UNAVAIL);
1306 
1307 	default:
1308 		isp_destroy_handle(isp, save_handle);
1309 		return (XS_ERR(ccb));
1310 	}
1311 }
1312 
1313 static void
1314 isp_refire_putback_atio(void *arg)
1315 {
1316 	int s = splcam();
1317 	isp_target_putback_atio(arg);
1318 	splx(s);
1319 }
1320 
1321 static void
1322 isp_target_putback_atio(union ccb *ccb)
1323 {
1324 	struct ispsoftc *isp;
1325 	struct ccb_scsiio *cso;
1326 	u_int16_t nxti, optr;
1327 	void *qe;
1328 
1329 	isp = XS_ISP(ccb);
1330 
1331 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1332 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1333 		isp_prt(isp, ISP_LOGWARN,
1334 		    "isp_target_putback_atio: Request Queue Overflow");
1335 		return;
1336 	}
1337 	bzero(qe, QENTRY_LEN);
1338 	cso = &ccb->csio;
1339 	if (IS_FC(isp)) {
1340 		at2_entry_t local, *at = &local;
1341 		MEMZERO(at, sizeof (at2_entry_t));
1342 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1343 		at->at_header.rqs_entry_count = 1;
1344 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1345 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1346 		} else {
1347 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1348 		}
1349 		at->at_status = CT_OK;
1350 		at->at_rxid = cso->tag_id;
1351 		at->at_iid = cso->ccb_h.target_id;
1352 		isp_put_atio2(isp, at, qe);
1353 	} else {
1354 		at_entry_t local, *at = &local;
1355 		MEMZERO(at, sizeof (at_entry_t));
1356 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1357 		at->at_header.rqs_entry_count = 1;
1358 		at->at_iid = cso->init_id;
1359 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1360 		at->at_tgt = cso->ccb_h.target_id;
1361 		at->at_lun = cso->ccb_h.target_lun;
1362 		at->at_status = CT_OK;
1363 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1364 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1365 		isp_put_atio(isp, at, qe);
1366 	}
1367 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1368 	ISP_ADD_REQUEST(isp, nxti);
1369 	isp_complete_ctio(ccb);
1370 }
1371 
1372 static void
1373 isp_complete_ctio(union ccb *ccb)
1374 {
1375 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1376 		ccb->ccb_h.status |= CAM_REQ_CMP;
1377 	}
1378 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1379 	xpt_done(ccb);
1380 }
1381 
1382 /*
1383  * Handle ATIO stuff that the generic code can't.
1384  * This means handling CDBs.
1385  */
1386 
1387 static int
1388 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1389 {
1390 	tstate_t *tptr;
1391 	int status, bus, iswildcard;
1392 	struct ccb_accept_tio *atiop;
1393 
1394 	/*
1395 	 * The firmware status (except for the QLTM_SVALID bit)
1396 	 * indicates why this ATIO was sent to us.
1397 	 *
1398 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1399 	 *
1400 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1401 	 * we're still connected on the SCSI bus.
1402 	 */
1403 	status = aep->at_status;
1404 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1405 		/*
1406 		 * Bus Phase Sequence error. We should have sense data
1407 		 * suggested by the f/w. I'm not sure quite yet what
1408 		 * to do about this for CAM.
1409 		 */
1410 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1411 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1412 		return (0);
1413 	}
1414 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1415 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1416 		    status);
1417 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1418 		return (0);
1419 	}
1420 
1421 	bus = GET_BUS_VAL(aep->at_iid);
1422 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1423 	if (tptr == NULL) {
1424 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1425 		iswildcard = 1;
1426 	} else {
1427 		iswildcard = 0;
1428 	}
1429 
1430 	if (tptr == NULL) {
1431 		/*
1432 		 * Because we can't autofeed sense data back with
1433 		 * a command for parallel SCSI, we can't give back
1434 		 * a CHECK CONDITION. We'll give back a BUSY status
1435 		 * instead. This works out okay because the only
1436 		 * time we should, in fact, get this, is in the
1437 		 * case that somebody configured us without the
1438 		 * blackhole driver, so they get what they deserve.
1439 		 */
1440 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1441 		return (0);
1442 	}
1443 
1444 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1445 	if (atiop == NULL) {
1446 		/*
1447 		 * Because we can't autofeed sense data back with
1448 		 * a command for parallel SCSI, we can't give back
1449 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1450 		 * instead. This works out okay because the only time we
1451 		 * should, in fact, get this, is in the case that we've
1452 		 * run out of ATIOS.
1453 		 */
1454 		xpt_print_path(tptr->owner);
1455 		isp_prt(isp, ISP_LOGWARN,
1456 		    "no ATIOS for lun %d from initiator %d on channel %d",
1457 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1458 		if (aep->at_flags & AT_TQAE)
1459 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1460 		else
1461 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1462 		rls_lun_statep(isp, tptr);
1463 		return (0);
1464 	}
1465 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1466 	if (iswildcard) {
1467 		atiop->ccb_h.target_id = aep->at_tgt;
1468 		atiop->ccb_h.target_lun = aep->at_lun;
1469 	}
1470 	if (aep->at_flags & AT_NODISC) {
1471 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1472 	} else {
1473 		atiop->ccb_h.flags = 0;
1474 	}
1475 
1476 	if (status & QLTM_SVALID) {
1477 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1478 		atiop->sense_len = amt;
1479 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1480 	} else {
1481 		atiop->sense_len = 0;
1482 	}
1483 
1484 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1485 	atiop->cdb_len = aep->at_cdblen;
1486 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1487 	atiop->ccb_h.status = CAM_CDB_RECVD;
1488 	/*
1489 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1490 	 * and the handle (which we have to preserve).
1491 	 */
1492 	AT_MAKE_TAGID(atiop->tag_id, aep);
1493 	if (aep->at_flags & AT_TQAE) {
1494 		atiop->tag_action = aep->at_tag_type;
1495 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1496 	}
1497 	xpt_done((union ccb*)atiop);
1498 	isp_prt(isp, ISP_LOGTDEBUG0,
1499 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1500 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1501 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1502 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1503 	    "nondisc" : "disconnecting");
1504 	rls_lun_statep(isp, tptr);
1505 	return (0);
1506 }
1507 
1508 static int
1509 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1510 {
1511 	lun_id_t lun;
1512 	tstate_t *tptr;
1513 	struct ccb_accept_tio *atiop;
1514 	atio_private_data_t *atp;
1515 
1516 	/*
1517 	 * The firmware status (except for the QLTM_SVALID bit)
1518 	 * indicates why this ATIO was sent to us.
1519 	 *
1520 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1521 	 */
1522 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1523 		isp_prt(isp, ISP_LOGWARN,
1524 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1525 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1526 		return (0);
1527 	}
1528 
1529 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1530 		lun = aep->at_scclun;
1531 	} else {
1532 		lun = aep->at_lun;
1533 	}
1534 	tptr = get_lun_statep(isp, 0, lun);
1535 	if (tptr == NULL) {
1536 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1537 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1538 	}
1539 
1540 	if (tptr == NULL) {
1541 		/*
1542 		 * What we'd like to know is whether or not we have a listener
1543 		 * upstream that really hasn't configured yet. If we do, then
1544 		 * we can give a more sensible reply here. If not, then we can
1545 		 * reject this out of hand.
1546 		 *
1547 		 * Choices for what to send were
1548 		 *
1549                  *	Not Ready, Unit Not Self-Configured Yet
1550 		 *	(0x2,0x3e,0x00)
1551 		 *
1552 		 * for the former and
1553 		 *
1554 		 *	Illegal Request, Logical Unit Not Supported
1555 		 *	(0x5,0x25,0x00)
1556 		 *
1557 		 * for the latter.
1558 		 *
1559 		 * We used to decide whether there was at least one listener
1560 		 * based upon whether the black hole driver was configured.
1561 		 * However, recent config(8) changes have made this hard to do
1562 		 * at this time.
1563 		 *
1564 		 */
1565 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1566 		return (0);
1567 	}
1568 
1569 	atp = isp_get_atpd(isp, 0);
1570 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1571 	if (atiop == NULL || atp == NULL) {
1572 		/*
1573 		 * Because we can't autofeed sense data back with
1574 		 * a command for parallel SCSI, we can't give back
1575 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1576 		 * instead. This works out okay because the only time we
1577 		 * should, in fact, get this, is in the case that we've
1578 		 * run out of ATIOS.
1579 		 */
1580 		xpt_print_path(tptr->owner);
1581 		isp_prt(isp, ISP_LOGWARN,
1582 		    "no %s for lun %d from initiator %d",
1583 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1584 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1585 		rls_lun_statep(isp, tptr);
1586 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1587 		return (0);
1588 	}
1589 	atp->state = ATPD_STATE_ATIO;
1590 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1591 	tptr->atio_count--;
1592 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1593 	    lun, tptr->atio_count);
1594 
1595 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1596 		atiop->ccb_h.target_id =
1597 		    ((fcparam *)isp->isp_param)->isp_loopid;
1598 		atiop->ccb_h.target_lun = lun;
1599 	}
1600 	/*
1601 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1602 	 */
1603 	atiop->sense_len = 0;
1604 
1605 	atiop->init_id = aep->at_iid;
1606 	atiop->cdb_len = ATIO2_CDBLEN;
1607 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1608 	atiop->ccb_h.status = CAM_CDB_RECVD;
1609 	atiop->tag_id = aep->at_rxid;
1610 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1611 	case ATIO2_TC_ATTR_SIMPLEQ:
1612 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1613 		break;
1614         case ATIO2_TC_ATTR_HEADOFQ:
1615 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1616 		break;
1617         case ATIO2_TC_ATTR_ORDERED:
1618 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1619 		break;
1620         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1621 	case ATIO2_TC_ATTR_UNTAGGED:
1622 	default:
1623 		atiop->tag_action = 0;
1624 		break;
1625 	}
1626 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1627 
1628 	atp->tag = atiop->tag_id;
1629 	atp->lun = lun;
1630 	atp->orig_datalen = aep->at_datalen;
1631 	atp->last_xframt = 0;
1632 	atp->bytes_xfered = 0;
1633 	atp->state = ATPD_STATE_CAM;
1634 	xpt_done((union ccb*)atiop);
1635 
1636 	isp_prt(isp, ISP_LOGTDEBUG0,
1637 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1638 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1639 	    lun, aep->at_taskflags, aep->at_datalen);
1640 	rls_lun_statep(isp, tptr);
1641 	return (0);
1642 }
1643 
1644 static int
1645 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1646 {
1647 	union ccb *ccb;
1648 	int sentstatus, ok, notify_cam, resid = 0;
1649 	u_int16_t tval;
1650 
1651 	/*
1652 	 * CTIO and CTIO2 are close enough....
1653 	 */
1654 
1655 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1656 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1657 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1658 
1659 	if (IS_FC(isp)) {
1660 		ct2_entry_t *ct = arg;
1661 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1662 		if (atp == NULL) {
1663 			isp_prt(isp, ISP_LOGERR,
1664 			    "cannot find adjunct for %x after I/O",
1665 			    ct->ct_rxid);
1666 			return (0);
1667 		}
1668 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1669 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1670 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1671 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1672 		}
1673 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1674 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1675 			resid = ct->ct_resid;
1676 			atp->bytes_xfered += (atp->last_xframt - resid);
1677 			atp->last_xframt = 0;
1678 		}
1679 		if (sentstatus || !ok) {
1680 			atp->tag = 0;
1681 		}
1682 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1683 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1684 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1685 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1686 		    resid, sentstatus? "FIN" : "MID");
1687 		tval = ct->ct_rxid;
1688 
1689 		/* XXX: should really come after isp_complete_ctio */
1690 		atp->state = ATPD_STATE_PDON;
1691 	} else {
1692 		ct_entry_t *ct = arg;
1693 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1694 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1695 		/*
1696 		 * We *ought* to be able to get back to the original ATIO
1697 		 * here, but for some reason this gets lost. It's just as
1698 		 * well because it's squirrelled away as part of periph
1699 		 * private data.
1700 		 *
1701 		 * We can live without it as long as we continue to use
1702 		 * the auto-replenish feature for CTIOs.
1703 		 */
1704 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1705 		if (ct->ct_status & QLTM_SVALID) {
1706 			char *sp = (char *)ct;
1707 			sp += CTIO_SENSE_OFFSET;
1708 			ccb->csio.sense_len =
1709 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1710 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1711 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1712 		}
1713 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1714 			resid = ct->ct_resid;
1715 		}
1716 		isp_prt(isp, ISP_LOGTDEBUG0,
1717 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1718 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1719 		    ct->ct_status, ct->ct_flags, resid,
1720 		    sentstatus? "FIN" : "MID");
1721 		tval = ct->ct_fwhandle;
1722 	}
1723 	ccb->csio.resid += resid;
1724 
1725 	/*
1726 	 * We're here either because intermediate data transfers are done
1727 	 * and/or the final status CTIO (which may have joined with a
1728 	 * Data Transfer) is done.
1729 	 *
1730 	 * In any case, for this platform, the upper layers figure out
1731 	 * what to do next, so all we do here is collect status and
1732 	 * pass information along. Any DMA handles have already been
1733 	 * freed.
1734 	 */
1735 	if (notify_cam == 0) {
1736 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1737 		return (0);
1738 	}
1739 
1740 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1741 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1742 
1743 	if (!ok) {
1744 		isp_target_putback_atio(ccb);
1745 	} else {
1746 		isp_complete_ctio(ccb);
1747 
1748 	}
1749 	return (0);
1750 }
1751 
1752 static int
1753 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1754 {
1755 	return (0);	/* XXXX */
1756 }
1757 
1758 static int
1759 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1760 {
1761 
1762 	switch (inp->in_status) {
1763 	case IN_PORT_LOGOUT:
1764 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1765 		   inp->in_iid);
1766 		break;
1767 	case IN_PORT_CHANGED:
1768 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1769 		   inp->in_iid);
1770 		break;
1771 	case IN_GLOBAL_LOGO:
1772 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1773 		break;
1774 	case IN_ABORT_TASK:
1775 	{
1776 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1777 		struct ccb_immed_notify *inot = NULL;
1778 
1779 		if (atp) {
1780 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1781 			if (tptr) {
1782 				inot = (struct ccb_immed_notify *)
1783 				    SLIST_FIRST(&tptr->inots);
1784 				if (inot) {
1785 					SLIST_REMOVE_HEAD(&tptr->inots,
1786 					    sim_links.sle);
1787 				}
1788 			}
1789 			isp_prt(isp, ISP_LOGWARN,
1790 			   "abort task RX_ID %x IID %d state %d",
1791 			   inp->in_seqid, inp->in_iid, atp->state);
1792 		} else {
1793 			isp_prt(isp, ISP_LOGWARN,
1794 			   "abort task RX_ID %x from iid %d, state unknown",
1795 			   inp->in_seqid, inp->in_iid);
1796 		}
1797 		if (inot) {
1798 			inot->initiator_id = inp->in_iid;
1799 			inot->sense_len = 0;
1800 			inot->message_args[0] = MSG_ABORT_TAG;
1801 			inot->message_args[1] = inp->in_seqid & 0xff;
1802 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1803 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1804 			xpt_done((union ccb *)inot);
1805 		}
1806 		break;
1807 	}
1808 	default:
1809 		break;
1810 	}
1811 	return (0);
1812 }
1813 #endif
1814 
1815 static void
1816 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1817 {
1818 	struct cam_sim *sim;
1819 	struct ispsoftc *isp;
1820 
1821 	sim = (struct cam_sim *)cbarg;
1822 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1823 	switch (code) {
1824 	case AC_LOST_DEVICE:
1825 		if (IS_SCSI(isp)) {
1826 			u_int16_t oflags, nflags;
1827 			sdparam *sdp = isp->isp_param;
1828 			int tgt;
1829 
1830 			tgt = xpt_path_target_id(path);
1831 			if (tgt >= 0) {
1832 				sdp += cam_sim_bus(sim);
1833 				ISP_LOCK(isp);
1834 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1835 #ifndef	ISP_TARGET_MODE
1836 				nflags &= DPARM_SAFE_DFLT;
1837 				if (isp->isp_loaded_fw) {
1838 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1839 				}
1840 #else
1841 				nflags = DPARM_DEFAULT;
1842 #endif
1843 				oflags = sdp->isp_devparam[tgt].goal_flags;
1844 				sdp->isp_devparam[tgt].goal_flags = nflags;
1845 				sdp->isp_devparam[tgt].dev_update = 1;
1846 				isp->isp_update |= (1 << cam_sim_bus(sim));
1847 				(void) isp_control(isp,
1848 				    ISPCTL_UPDATE_PARAMS, NULL);
1849 				sdp->isp_devparam[tgt].goal_flags = oflags;
1850 				ISP_UNLOCK(isp);
1851 			}
1852 		}
1853 		break;
1854 	default:
1855 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1856 		break;
1857 	}
1858 }
1859 
1860 static void
1861 isp_poll(struct cam_sim *sim)
1862 {
1863 	struct ispsoftc *isp = cam_sim_softc(sim);
1864 	u_int16_t isr, sema, mbox;
1865 
1866 	ISP_LOCK(isp);
1867 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1868 		isp_intr(isp, isr, sema, mbox);
1869 	}
1870 	ISP_UNLOCK(isp);
1871 }
1872 
1873 
1874 static void
1875 isp_watchdog(void *arg)
1876 {
1877 	XS_T *xs = arg;
1878 	struct ispsoftc *isp = XS_ISP(xs);
1879 	u_int32_t handle;
1880 	int iok;
1881 
1882 	/*
1883 	 * We've decided this command is dead. Make sure we're not trying
1884 	 * to kill a command that's already dead by getting it's handle and
1885 	 * and seeing whether it's still alive.
1886 	 */
1887 	ISP_LOCK(isp);
1888 	iok = isp->isp_osinfo.intsok;
1889 	isp->isp_osinfo.intsok = 0;
1890 	handle = isp_find_handle(isp, xs);
1891 	if (handle) {
1892 		u_int16_t isr, sema, mbox;
1893 
1894 		if (XS_CMD_DONE_P(xs)) {
1895 			isp_prt(isp, ISP_LOGDEBUG1,
1896 			    "watchdog found done cmd (handle 0x%x)", handle);
1897 			ISP_UNLOCK(isp);
1898 			return;
1899 		}
1900 
1901 		if (XS_CMD_WDOG_P(xs)) {
1902 			isp_prt(isp, ISP_LOGDEBUG2,
1903 			    "recursive watchdog (handle 0x%x)", handle);
1904 			ISP_UNLOCK(isp);
1905 			return;
1906 		}
1907 
1908 		XS_CMD_S_WDOG(xs);
1909 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1910 			isp_intr(isp, isr, sema, mbox);
1911 		}
1912 		if (XS_CMD_DONE_P(xs)) {
1913 			isp_prt(isp, ISP_LOGDEBUG2,
1914 			    "watchdog cleanup for handle 0x%x", handle);
1915 			xpt_done((union ccb *) xs);
1916 		} else if (XS_CMD_GRACE_P(xs)) {
1917 			/*
1918 			 * Make sure the command is *really* dead before we
1919 			 * release the handle (and DMA resources) for reuse.
1920 			 */
1921 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1922 
1923 			/*
1924 			 * After this point, the comamnd is really dead.
1925 			 */
1926 			if (XS_XFRLEN(xs)) {
1927 				ISP_DMAFREE(isp, xs, handle);
1928                 	}
1929 			isp_destroy_handle(isp, handle);
1930 			xpt_print_path(xs->ccb_h.path);
1931 			isp_prt(isp, ISP_LOGWARN,
1932 			    "watchdog timeout for handle 0x%x", handle);
1933 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1934 			XS_CMD_C_WDOG(xs);
1935 			isp_done(xs);
1936 		} else {
1937 			u_int16_t nxti, optr;
1938 			ispreq_t local, *mp= &local, *qe;
1939 
1940 			XS_CMD_C_WDOG(xs);
1941 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1942 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1943 				ISP_UNLOCK(isp);
1944 				return;
1945 			}
1946 			XS_CMD_S_GRACE(xs);
1947 			MEMZERO((void *) mp, sizeof (*mp));
1948 			mp->req_header.rqs_entry_count = 1;
1949 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1950 			mp->req_modifier = SYNC_ALL;
1951 			mp->req_target = XS_CHANNEL(xs) << 7;
1952 			isp_put_request(isp, mp, qe);
1953 			ISP_ADD_REQUEST(isp, nxti);
1954 		}
1955 	} else {
1956 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1957 	}
1958 	isp->isp_osinfo.intsok = iok;
1959 	ISP_UNLOCK(isp);
1960 }
1961 
1962 static void
1963 isp_kthread(void *arg)
1964 {
1965 	struct ispsoftc *isp = arg;
1966 
1967 #ifdef	ISP_SMPLOCK
1968 	mtx_lock(&isp->isp_lock);
1969 #else
1970 	mtx_lock(&Giant);
1971 #endif
1972 	/*
1973 	 * The first loop is for our usage where we have yet to have
1974 	 * gotten good fibre channel state.
1975 	 */
1976 	for (;;) {
1977 		int wasfrozen;
1978 
1979 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1980 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1981 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1982 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1983 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1984 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1985 				    isp->isp_osinfo.ktmature == 0) {
1986 					break;
1987 				}
1988 			}
1989 #ifdef	ISP_SMPLOCK
1990 			msleep(isp_kthread, &isp->isp_lock,
1991 			    PRIBIO, "isp_fcthrd", hz);
1992 #else
1993 			(void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1994 #endif
1995 		}
1996 
1997 		/*
1998 		 * Even if we didn't get good loop state we may be
1999 		 * unfreezing the SIMQ so that we can kill off
2000 		 * commands (if we've never seen loop before, for example).
2001 		 */
2002 		isp->isp_osinfo.ktmature = 1;
2003 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2004 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2005 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2006 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2007 			ISPLOCK_2_CAMLOCK(isp);
2008 			xpt_release_simq(isp->isp_sim, 1);
2009 			CAMLOCK_2_ISPLOCK(isp);
2010 		}
2011 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2012 #ifdef	ISP_SMPLOCK
2013 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2014 #else
2015 		(void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2016 #endif
2017 	}
2018 }
2019 
2020 static void
2021 isp_action(struct cam_sim *sim, union ccb *ccb)
2022 {
2023 	int bus, tgt, error;
2024 	struct ispsoftc *isp;
2025 	struct ccb_trans_settings *cts;
2026 
2027 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2028 
2029 	isp = (struct ispsoftc *)cam_sim_softc(sim);
2030 	ccb->ccb_h.sim_priv.entries[0].field = 0;
2031 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2032 	if (isp->isp_state != ISP_RUNSTATE &&
2033 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
2034 		CAMLOCK_2_ISPLOCK(isp);
2035 		isp_init(isp);
2036 		if (isp->isp_state != ISP_INITSTATE) {
2037 			ISP_UNLOCK(isp);
2038 			/*
2039 			 * Lie. Say it was a selection timeout.
2040 			 */
2041 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2042 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2043 			xpt_done(ccb);
2044 			return;
2045 		}
2046 		isp->isp_state = ISP_RUNSTATE;
2047 		ISPLOCK_2_CAMLOCK(isp);
2048 	}
2049 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2050 
2051 
2052 	switch (ccb->ccb_h.func_code) {
2053 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2054 		/*
2055 		 * Do a couple of preliminary checks...
2056 		 */
2057 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2058 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2059 				ccb->ccb_h.status = CAM_REQ_INVALID;
2060 				xpt_done(ccb);
2061 				break;
2062 			}
2063 		}
2064 #ifdef	DIAGNOSTIC
2065 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2066 			ccb->ccb_h.status = CAM_PATH_INVALID;
2067 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2068 			ccb->ccb_h.status = CAM_PATH_INVALID;
2069 		}
2070 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2071 			isp_prt(isp, ISP_LOGERR,
2072 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2073 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2074 			xpt_done(ccb);
2075 			break;
2076 		}
2077 #endif
2078 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2079 		CAMLOCK_2_ISPLOCK(isp);
2080 		error = isp_start((XS_T *) ccb);
2081 		switch (error) {
2082 		case CMD_QUEUED:
2083 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2084 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2085 				u_int64_t ticks = (u_int64_t) hz;
2086 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2087 					ticks = 60 * 1000 * ticks;
2088 				else
2089 					ticks = ccb->ccb_h.timeout * hz;
2090 				ticks = ((ticks + 999) / 1000) + hz + hz;
2091 				if (ticks >= 0x80000000) {
2092 					isp_prt(isp, ISP_LOGERR,
2093 					    "timeout overflow");
2094 					ticks = 0x7fffffff;
2095 				}
2096 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2097 				    (caddr_t)ccb, (int)ticks);
2098 			} else {
2099 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2100 			}
2101 			ISPLOCK_2_CAMLOCK(isp);
2102 			break;
2103 		case CMD_RQLATER:
2104 			/*
2105 			 * This can only happen for Fibre Channel
2106 			 */
2107 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2108 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2109 			    isp->isp_osinfo.ktmature) {
2110 				ISPLOCK_2_CAMLOCK(isp);
2111 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2112 				xpt_done(ccb);
2113 				break;
2114 			}
2115 #ifdef	ISP_SMPLOCK
2116 			cv_signal(&isp->isp_osinfo.kthread_cv);
2117 #else
2118 			wakeup(&isp->isp_osinfo.kthread_cv);
2119 #endif
2120 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2121 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2122 			ISPLOCK_2_CAMLOCK(isp);
2123 			xpt_done(ccb);
2124 			break;
2125 		case CMD_EAGAIN:
2126 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2127 			ISPLOCK_2_CAMLOCK(isp);
2128 			xpt_done(ccb);
2129 			break;
2130 		case CMD_COMPLETE:
2131 			isp_done((struct ccb_scsiio *) ccb);
2132 			ISPLOCK_2_CAMLOCK(isp);
2133 			break;
2134 		default:
2135 			isp_prt(isp, ISP_LOGERR,
2136 			    "What's this? 0x%x at %d in file %s",
2137 			    error, __LINE__, __FILE__);
2138 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2139 			xpt_done(ccb);
2140 			ISPLOCK_2_CAMLOCK(isp);
2141 		}
2142 		break;
2143 
2144 #ifdef	ISP_TARGET_MODE
2145 	case XPT_EN_LUN:		/* Enable LUN as a target */
2146 	{
2147 		int iok;
2148 		CAMLOCK_2_ISPLOCK(isp);
2149 		iok = isp->isp_osinfo.intsok;
2150 		isp->isp_osinfo.intsok = 0;
2151 		isp_en_lun(isp, ccb);
2152 		isp->isp_osinfo.intsok = iok;
2153 		ISPLOCK_2_CAMLOCK(isp);
2154 		xpt_done(ccb);
2155 		break;
2156 	}
2157 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2158 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2159 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2160 	{
2161 		tstate_t *tptr =
2162 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2163 		if (tptr == NULL) {
2164 			ccb->ccb_h.status = CAM_LUN_INVALID;
2165 			xpt_done(ccb);
2166 			break;
2167 		}
2168 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2169 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2170 		ccb->ccb_h.flags = 0;
2171 
2172 		CAMLOCK_2_ISPLOCK(isp);
2173 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2174 			/*
2175 			 * Note that the command itself may not be done-
2176 			 * it may not even have had the first CTIO sent.
2177 			 */
2178 			tptr->atio_count++;
2179 			isp_prt(isp, ISP_LOGTDEBUG0,
2180 			    "Put FREE ATIO2, lun %d, count now %d",
2181 			    ccb->ccb_h.target_lun, tptr->atio_count);
2182 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2183 			    sim_links.sle);
2184 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2185 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2186 			    sim_links.sle);
2187 		} else {
2188 			;
2189 		}
2190 		rls_lun_statep(isp, tptr);
2191 		ccb->ccb_h.status = CAM_REQ_INPROG;
2192 		ISPLOCK_2_CAMLOCK(isp);
2193 		break;
2194 	}
2195 	case XPT_CONT_TARGET_IO:
2196 	{
2197 		CAMLOCK_2_ISPLOCK(isp);
2198 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2199 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2200 			isp_prt(isp, ISP_LOGWARN,
2201 			    "XPT_CONT_TARGET_IO: status 0x%x",
2202 			    ccb->ccb_h.status);
2203 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2204 			ISPLOCK_2_CAMLOCK(isp);
2205 			xpt_done(ccb);
2206 		} else {
2207 			ISPLOCK_2_CAMLOCK(isp);
2208 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2209 		}
2210 		break;
2211 	}
2212 #endif
2213 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2214 
2215 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2216 		tgt = ccb->ccb_h.target_id;
2217 		tgt |= (bus << 16);
2218 
2219 		CAMLOCK_2_ISPLOCK(isp);
2220 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2221 		ISPLOCK_2_CAMLOCK(isp);
2222 		if (error) {
2223 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2224 		} else {
2225 			ccb->ccb_h.status = CAM_REQ_CMP;
2226 		}
2227 		xpt_done(ccb);
2228 		break;
2229 	case XPT_ABORT:			/* Abort the specified CCB */
2230 	{
2231 		union ccb *accb = ccb->cab.abort_ccb;
2232 		CAMLOCK_2_ISPLOCK(isp);
2233 		switch (accb->ccb_h.func_code) {
2234 #ifdef	ISP_TARGET_MODE
2235 		case XPT_ACCEPT_TARGET_IO:
2236 		case XPT_IMMED_NOTIFY:
2237         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2238 			break;
2239 		case XPT_CONT_TARGET_IO:
2240 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2241 			ccb->ccb_h.status = CAM_UA_ABORT;
2242 			break;
2243 #endif
2244 		case XPT_SCSI_IO:
2245 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2246 			if (error) {
2247 				ccb->ccb_h.status = CAM_UA_ABORT;
2248 			} else {
2249 				ccb->ccb_h.status = CAM_REQ_CMP;
2250 			}
2251 			break;
2252 		default:
2253 			ccb->ccb_h.status = CAM_REQ_INVALID;
2254 			break;
2255 		}
2256 		ISPLOCK_2_CAMLOCK(isp);
2257 		xpt_done(ccb);
2258 		break;
2259 	}
2260 #ifdef	CAM_NEW_TRAN_CODE
2261 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2262 #else
2263 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2264 #endif
2265 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2266 		cts = &ccb->cts;
2267 		if (!IS_CURRENT_SETTINGS(cts)) {
2268 			ccb->ccb_h.status = CAM_REQ_INVALID;
2269 			xpt_done(ccb);
2270 			break;
2271 		}
2272 		tgt = cts->ccb_h.target_id;
2273 		CAMLOCK_2_ISPLOCK(isp);
2274 		if (IS_SCSI(isp)) {
2275 #ifndef	CAM_NEW_TRAN_CODE
2276 			sdparam *sdp = isp->isp_param;
2277 			u_int16_t *dptr;
2278 
2279 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2280 
2281 			sdp += bus;
2282 			/*
2283 			 * We always update (internally) from goal_flags
2284 			 * so any request to change settings just gets
2285 			 * vectored to that location.
2286 			 */
2287 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2288 
2289 			/*
2290 			 * Note that these operations affect the
2291 			 * the goal flags (goal_flags)- not
2292 			 * the current state flags. Then we mark
2293 			 * things so that the next operation to
2294 			 * this HBA will cause the update to occur.
2295 			 */
2296 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2297 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2298 					*dptr |= DPARM_DISC;
2299 				} else {
2300 					*dptr &= ~DPARM_DISC;
2301 				}
2302 			}
2303 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2304 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2305 					*dptr |= DPARM_TQING;
2306 				} else {
2307 					*dptr &= ~DPARM_TQING;
2308 				}
2309 			}
2310 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2311 				switch (cts->bus_width) {
2312 				case MSG_EXT_WDTR_BUS_16_BIT:
2313 					*dptr |= DPARM_WIDE;
2314 					break;
2315 				default:
2316 					*dptr &= ~DPARM_WIDE;
2317 				}
2318 			}
2319 			/*
2320 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2321 			 * of nonzero will cause us to go to the
2322 			 * selected (from NVRAM) maximum value for
2323 			 * this device. At a later point, we'll
2324 			 * allow finer control.
2325 			 */
2326 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2327 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2328 			    (cts->sync_offset > 0)) {
2329 				*dptr |= DPARM_SYNC;
2330 			} else {
2331 				*dptr &= ~DPARM_SYNC;
2332 			}
2333 			*dptr |= DPARM_SAFE_DFLT;
2334 #else
2335 			struct ccb_trans_settings_scsi *scsi =
2336 			    &cts->proto_specific.scsi;
2337 			struct ccb_trans_settings_spi *spi =
2338 			    &cts->xport_specific.spi;
2339 			sdparam *sdp = isp->isp_param;
2340 			u_int16_t *dptr;
2341 
2342 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2343 			sdp += bus;
2344 			/*
2345 			 * We always update (internally) from goal_flags
2346 			 * so any request to change settings just gets
2347 			 * vectored to that location.
2348 			 */
2349 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2350 
2351 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2352 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2353 					*dptr |= DPARM_DISC;
2354 				else
2355 					*dptr &= ~DPARM_DISC;
2356 			}
2357 
2358 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2359 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2360 					*dptr |= DPARM_TQING;
2361 				else
2362 					*dptr &= ~DPARM_TQING;
2363 			}
2364 
2365 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2366 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2367 					*dptr |= DPARM_WIDE;
2368 				else
2369 					*dptr &= ~DPARM_WIDE;
2370 			}
2371 
2372 			/*
2373 			 * XXX: FIX ME
2374 			 */
2375 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2376 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2377 			    (spi->sync_period && spi->sync_offset)) {
2378 				*dptr |= DPARM_SYNC;
2379 				/*
2380 				 * XXX: CHECK FOR LEGALITY
2381 				 */
2382 				sdp->isp_devparam[tgt].goal_period =
2383 				    spi->sync_period;
2384 				sdp->isp_devparam[tgt].goal_offset =
2385 				    spi->sync_offset;
2386 			} else {
2387 				*dptr &= ~DPARM_SYNC;
2388 			}
2389 #endif
2390 			isp_prt(isp, ISP_LOGDEBUG0,
2391 			    "SET bus %d targ %d to flags %x off %x per %x",
2392 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2393 			    sdp->isp_devparam[tgt].goal_offset,
2394 			    sdp->isp_devparam[tgt].goal_period);
2395 			sdp->isp_devparam[tgt].dev_update = 1;
2396 			isp->isp_update |= (1 << bus);
2397 		}
2398 		ISPLOCK_2_CAMLOCK(isp);
2399 		ccb->ccb_h.status = CAM_REQ_CMP;
2400 		xpt_done(ccb);
2401 		break;
2402 	case XPT_GET_TRAN_SETTINGS:
2403 		cts = &ccb->cts;
2404 		tgt = cts->ccb_h.target_id;
2405 		CAMLOCK_2_ISPLOCK(isp);
2406 		if (IS_FC(isp)) {
2407 #ifndef	CAM_NEW_TRAN_CODE
2408 			/*
2409 			 * a lot of normal SCSI things don't make sense.
2410 			 */
2411 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2412 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2413 			/*
2414 			 * How do you measure the width of a high
2415 			 * speed serial bus? Well, in bytes.
2416 			 *
2417 			 * Offset and period make no sense, though, so we set
2418 			 * (above) a 'base' transfer speed to be gigabit.
2419 			 */
2420 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2421 #else
2422 			fcparam *fcp = isp->isp_param;
2423 			struct ccb_trans_settings_fc *fc =
2424 			    &cts->xport_specific.fc;
2425 
2426 			cts->protocol = PROTO_SCSI;
2427 			cts->protocol_version = SCSI_REV_2;
2428 			cts->transport = XPORT_FC;
2429 			cts->transport_version = 0;
2430 
2431 			fc->valid = CTS_FC_VALID_SPEED;
2432 			if (fcp->isp_gbspeed == 2)
2433 				fc->bitrate = 200000;
2434 			else
2435 				fc->bitrate = 100000;
2436 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2437 				struct lportdb *lp = &fcp->portdb[tgt];
2438 				fc->wwnn = lp->node_wwn;
2439 				fc->wwpn = lp->port_wwn;
2440 				fc->port = lp->portid;
2441 				fc->valid |= CTS_FC_VALID_WWNN |
2442 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2443 			}
2444 #endif
2445 		} else {
2446 #ifdef	CAM_NEW_TRAN_CODE
2447 			struct ccb_trans_settings_scsi *scsi =
2448 			    &cts->proto_specific.scsi;
2449 			struct ccb_trans_settings_spi *spi =
2450 			    &cts->xport_specific.spi;
2451 #endif
2452 			sdparam *sdp = isp->isp_param;
2453 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2454 			u_int16_t dval, pval, oval;
2455 
2456 			sdp += bus;
2457 
2458 			if (IS_CURRENT_SETTINGS(cts)) {
2459 				sdp->isp_devparam[tgt].dev_refresh = 1;
2460 				isp->isp_update |= (1 << bus);
2461 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2462 				    NULL);
2463 				dval = sdp->isp_devparam[tgt].actv_flags;
2464 				oval = sdp->isp_devparam[tgt].actv_offset;
2465 				pval = sdp->isp_devparam[tgt].actv_period;
2466 			} else {
2467 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2468 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2469 				pval = sdp->isp_devparam[tgt].nvrm_period;
2470 			}
2471 
2472 #ifndef	CAM_NEW_TRAN_CODE
2473 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2474 
2475 			if (dval & DPARM_DISC) {
2476 				cts->flags |= CCB_TRANS_DISC_ENB;
2477 			}
2478 			if (dval & DPARM_TQING) {
2479 				cts->flags |= CCB_TRANS_TAG_ENB;
2480 			}
2481 			if (dval & DPARM_WIDE) {
2482 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2483 			} else {
2484 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2485 			}
2486 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2487 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2488 
2489 			if ((dval & DPARM_SYNC) && oval != 0) {
2490 				cts->sync_period = pval;
2491 				cts->sync_offset = oval;
2492 				cts->valid |=
2493 				    CCB_TRANS_SYNC_RATE_VALID |
2494 				    CCB_TRANS_SYNC_OFFSET_VALID;
2495 			}
2496 #else
2497 			cts->protocol = PROTO_SCSI;
2498 			cts->protocol_version = SCSI_REV_2;
2499 			cts->transport = XPORT_SPI;
2500 			cts->transport_version = 2;
2501 
2502 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2503 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2504 			if (dval & DPARM_DISC) {
2505 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2506 			}
2507 			if (dval & DPARM_TQING) {
2508 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2509 			}
2510 			if ((dval & DPARM_SYNC) && oval && pval) {
2511 				spi->sync_offset = oval;
2512 				spi->sync_period = pval;
2513 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2514 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2515 			}
2516 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2517 			if (dval & DPARM_WIDE) {
2518 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2519 			} else {
2520 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2521 			}
2522 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2523 				scsi->valid = CTS_SCSI_VALID_TQ;
2524 				spi->valid |= CTS_SPI_VALID_DISC;
2525 			} else {
2526 				scsi->valid = 0;
2527 			}
2528 #endif
2529 			isp_prt(isp, ISP_LOGDEBUG0,
2530 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2531 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2532 			    bus, tgt, dval, oval, pval);
2533 		}
2534 		ISPLOCK_2_CAMLOCK(isp);
2535 		ccb->ccb_h.status = CAM_REQ_CMP;
2536 		xpt_done(ccb);
2537 		break;
2538 
2539 	case XPT_CALC_GEOMETRY:
2540 	{
2541 		struct ccb_calc_geometry *ccg;
2542 		u_int32_t secs_per_cylinder;
2543 		u_int32_t size_mb;
2544 
2545 		ccg = &ccb->ccg;
2546 		if (ccg->block_size == 0) {
2547 			isp_prt(isp, ISP_LOGERR,
2548 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2549 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2550 			ccb->ccb_h.status = CAM_REQ_INVALID;
2551 			xpt_done(ccb);
2552 			break;
2553 		}
2554 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2555 		if (size_mb > 1024) {
2556 			ccg->heads = 255;
2557 			ccg->secs_per_track = 63;
2558 		} else {
2559 			ccg->heads = 64;
2560 			ccg->secs_per_track = 32;
2561 		}
2562 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2563 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2564 		ccb->ccb_h.status = CAM_REQ_CMP;
2565 		xpt_done(ccb);
2566 		break;
2567 	}
2568 	case XPT_RESET_BUS:		/* Reset the specified bus */
2569 		bus = cam_sim_bus(sim);
2570 		CAMLOCK_2_ISPLOCK(isp);
2571 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2572 		ISPLOCK_2_CAMLOCK(isp);
2573 		if (error)
2574 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2575 		else {
2576 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2577 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2578 			else if (isp->isp_path != NULL)
2579 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2580 			ccb->ccb_h.status = CAM_REQ_CMP;
2581 		}
2582 		xpt_done(ccb);
2583 		break;
2584 
2585 	case XPT_TERM_IO:		/* Terminate the I/O process */
2586 		ccb->ccb_h.status = CAM_REQ_INVALID;
2587 		xpt_done(ccb);
2588 		break;
2589 
2590 	case XPT_PATH_INQ:		/* Path routing inquiry */
2591 	{
2592 		struct ccb_pathinq *cpi = &ccb->cpi;
2593 
2594 		cpi->version_num = 1;
2595 #ifdef	ISP_TARGET_MODE
2596 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2597 #else
2598 		cpi->target_sprt = 0;
2599 #endif
2600 		cpi->hba_eng_cnt = 0;
2601 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2602 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2603 		cpi->bus_id = cam_sim_bus(sim);
2604 		if (IS_FC(isp)) {
2605 			cpi->hba_misc = PIM_NOBUSRESET;
2606 			/*
2607 			 * Because our loop ID can shift from time to time,
2608 			 * make our initiator ID out of range of our bus.
2609 			 */
2610 			cpi->initiator_id = cpi->max_target + 1;
2611 
2612 			/*
2613 			 * Set base transfer capabilities for Fibre Channel.
2614 			 * Technically not correct because we don't know
2615 			 * what media we're running on top of- but we'll
2616 			 * look good if we always say 100MB/s.
2617 			 */
2618 			if (FCPARAM(isp)->isp_gbspeed == 2)
2619 				cpi->base_transfer_speed = 200000;
2620 			else
2621 				cpi->base_transfer_speed = 100000;
2622 			cpi->hba_inquiry = PI_TAG_ABLE;
2623 #ifdef	CAM_NEW_TRAN_CODE
2624 			cpi->transport = XPORT_FC;
2625 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2626 #endif
2627 		} else {
2628 			sdparam *sdp = isp->isp_param;
2629 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2630 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2631 			cpi->hba_misc = 0;
2632 			cpi->initiator_id = sdp->isp_initiator_id;
2633 			cpi->base_transfer_speed = 3300;
2634 #ifdef	CAM_NEW_TRAN_CODE
2635 			cpi->transport = XPORT_SPI;
2636 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2637 #endif
2638 		}
2639 #ifdef	CAM_NEW_TRAN_CODE
2640 		cpi->protocol = PROTO_SCSI;
2641 		cpi->protocol_version = SCSI_REV_2;
2642 #endif
2643 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2644 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2645 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2646 		cpi->unit_number = cam_sim_unit(sim);
2647 		cpi->ccb_h.status = CAM_REQ_CMP;
2648 		xpt_done(ccb);
2649 		break;
2650 	}
2651 	default:
2652 		ccb->ccb_h.status = CAM_REQ_INVALID;
2653 		xpt_done(ccb);
2654 		break;
2655 	}
2656 }
2657 
2658 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2659 void
2660 isp_done(struct ccb_scsiio *sccb)
2661 {
2662 	struct ispsoftc *isp = XS_ISP(sccb);
2663 
2664 	if (XS_NOERR(sccb))
2665 		XS_SETERR(sccb, CAM_REQ_CMP);
2666 
2667 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2668 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2669 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2670 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2671 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2672 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2673 		} else {
2674 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2675 		}
2676 	}
2677 
2678 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2679 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2680 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2681 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2682 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2683 			isp_prt(isp, ISP_LOGDEBUG0,
2684 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2685 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2686 			    sccb->ccb_h.status, sccb->scsi_status);
2687 		}
2688 	}
2689 
2690 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2691 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2692 		xpt_print_path(sccb->ccb_h.path);
2693 		isp_prt(isp, ISP_LOGINFO,
2694 		    "cam completion status 0x%x", sccb->ccb_h.status);
2695 	}
2696 
2697 	XS_CMD_S_DONE(sccb);
2698 	if (XS_CMD_WDOG_P(sccb) == 0) {
2699 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2700 		if (XS_CMD_GRACE_P(sccb)) {
2701 			isp_prt(isp, ISP_LOGDEBUG2,
2702 			    "finished command on borrowed time");
2703 		}
2704 		XS_CMD_S_CLEAR(sccb);
2705 		ISPLOCK_2_CAMLOCK(isp);
2706 		xpt_done((union ccb *) sccb);
2707 		CAMLOCK_2_ISPLOCK(isp);
2708 	}
2709 }
2710 
2711 int
2712 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2713 {
2714 	int bus, rv = 0;
2715 	switch (cmd) {
2716 	case ISPASYNC_NEW_TGT_PARAMS:
2717 	{
2718 #ifdef	CAM_NEW_TRAN_CODE
2719 		struct ccb_trans_settings_scsi *scsi;
2720 		struct ccb_trans_settings_spi *spi;
2721 #endif
2722 		int flags, tgt;
2723 		sdparam *sdp = isp->isp_param;
2724 		struct ccb_trans_settings cts;
2725 		struct cam_path *tmppath;
2726 
2727 		bzero(&cts, sizeof (struct ccb_trans_settings));
2728 
2729 		tgt = *((int *)arg);
2730 		bus = (tgt >> 16) & 0xffff;
2731 		tgt &= 0xffff;
2732 		sdp += bus;
2733 		ISPLOCK_2_CAMLOCK(isp);
2734 		if (xpt_create_path(&tmppath, NULL,
2735 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2736 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2737 			CAMLOCK_2_ISPLOCK(isp);
2738 			isp_prt(isp, ISP_LOGWARN,
2739 			    "isp_async cannot make temp path for %d.%d",
2740 			    tgt, bus);
2741 			rv = -1;
2742 			break;
2743 		}
2744 		CAMLOCK_2_ISPLOCK(isp);
2745 		flags = sdp->isp_devparam[tgt].actv_flags;
2746 #ifdef	CAM_NEW_TRAN_CODE
2747 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2748 		cts.protocol = PROTO_SCSI;
2749 		cts.transport = XPORT_SPI;
2750 
2751 		scsi = &cts.proto_specific.scsi;
2752 		spi = &cts.xport_specific.spi;
2753 
2754 		if (flags & DPARM_TQING) {
2755 			scsi->valid |= CTS_SCSI_VALID_TQ;
2756 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2757 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2758 		}
2759 
2760 		if (flags & DPARM_DISC) {
2761 			spi->valid |= CTS_SPI_VALID_DISC;
2762 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2763 		}
2764 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2765 		if (flags & DPARM_WIDE) {
2766 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2767 		} else {
2768 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2769 		}
2770 		if (flags & DPARM_SYNC) {
2771 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2772 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2773 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2774 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2775 		}
2776 #else
2777 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2778 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2779 		if (flags & DPARM_DISC) {
2780 			cts.flags |= CCB_TRANS_DISC_ENB;
2781 		}
2782 		if (flags & DPARM_TQING) {
2783 			cts.flags |= CCB_TRANS_TAG_ENB;
2784 		}
2785 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2786 		cts.bus_width = (flags & DPARM_WIDE)?
2787 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2788 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2789 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2790 		if (flags & DPARM_SYNC) {
2791 			cts.valid |=
2792 			    CCB_TRANS_SYNC_RATE_VALID |
2793 			    CCB_TRANS_SYNC_OFFSET_VALID;
2794 		}
2795 #endif
2796 		isp_prt(isp, ISP_LOGDEBUG2,
2797 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2798 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2799 		    sdp->isp_devparam[tgt].actv_offset, flags);
2800 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2801 		ISPLOCK_2_CAMLOCK(isp);
2802 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2803 		xpt_free_path(tmppath);
2804 		CAMLOCK_2_ISPLOCK(isp);
2805 		break;
2806 	}
2807 	case ISPASYNC_BUS_RESET:
2808 		bus = *((int *)arg);
2809 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2810 		    bus);
2811 		if (bus > 0 && isp->isp_path2) {
2812 			ISPLOCK_2_CAMLOCK(isp);
2813 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2814 			CAMLOCK_2_ISPLOCK(isp);
2815 		} else if (isp->isp_path) {
2816 			ISPLOCK_2_CAMLOCK(isp);
2817 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2818 			CAMLOCK_2_ISPLOCK(isp);
2819 		}
2820 		break;
2821 	case ISPASYNC_LIP:
2822 		if (isp->isp_path) {
2823 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2824 		}
2825 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2826 		break;
2827 	case ISPASYNC_LOOP_RESET:
2828 		if (isp->isp_path) {
2829 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2830 		}
2831 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2832 		break;
2833 	case ISPASYNC_LOOP_DOWN:
2834 		if (isp->isp_path) {
2835 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2836 		}
2837 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2838 		break;
2839 	case ISPASYNC_LOOP_UP:
2840 		/*
2841 		 * Now we just note that Loop has come up. We don't
2842 		 * actually do anything because we're waiting for a
2843 		 * Change Notify before activating the FC cleanup
2844 		 * thread to look at the state of the loop again.
2845 		 */
2846 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2847 		break;
2848 	case ISPASYNC_PROMENADE:
2849 	{
2850 		struct cam_path *tmppath;
2851 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2852 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2853 		static const char *roles[4] = {
2854 		    "(none)", "Target", "Initiator", "Target/Initiator"
2855 		};
2856 		fcparam *fcp = isp->isp_param;
2857 		int tgt = *((int *) arg);
2858 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2859 		struct lportdb *lp = &fcp->portdb[tgt];
2860 
2861 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2862 		    roles[lp->roles & 0x3],
2863 		    (lp->valid)? "Arrived" : "Departed",
2864 		    (u_int32_t) (lp->port_wwn >> 32),
2865 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2866 		    (u_int32_t) (lp->node_wwn >> 32),
2867 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2868 
2869 		ISPLOCK_2_CAMLOCK(isp);
2870 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2871 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2872 			CAMLOCK_2_ISPLOCK(isp);
2873                         break;
2874                 }
2875 		/*
2876 		 * Policy: only announce targets.
2877 		 */
2878 		if (lp->roles & is_tgt_mask) {
2879 			if (lp->valid) {
2880 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2881 			} else {
2882 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2883 			}
2884 		}
2885 		xpt_free_path(tmppath);
2886 		CAMLOCK_2_ISPLOCK(isp);
2887 		break;
2888 	}
2889 	case ISPASYNC_CHANGE_NOTIFY:
2890 		if (arg == ISPASYNC_CHANGE_PDB) {
2891 			isp_prt(isp, ISP_LOGINFO,
2892 			    "Port Database Changed");
2893 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2894 			isp_prt(isp, ISP_LOGINFO,
2895 			    "Name Server Database Changed");
2896 		}
2897 #ifdef	ISP_SMPLOCK
2898 		cv_signal(&isp->isp_osinfo.kthread_cv);
2899 #else
2900 		wakeup(&isp->isp_osinfo.kthread_cv);
2901 #endif
2902 		break;
2903 	case ISPASYNC_FABRIC_DEV:
2904 	{
2905 		int target, base, lim;
2906 		fcparam *fcp = isp->isp_param;
2907 		struct lportdb *lp = NULL;
2908 		struct lportdb *clp = (struct lportdb *) arg;
2909 		char *pt;
2910 
2911 		switch (clp->port_type) {
2912 		case 1:
2913 			pt = "   N_Port";
2914 			break;
2915 		case 2:
2916 			pt = "  NL_Port";
2917 			break;
2918 		case 3:
2919 			pt = "F/NL_Port";
2920 			break;
2921 		case 0x7f:
2922 			pt = "  Nx_Port";
2923 			break;
2924 		case 0x81:
2925 			pt = "  F_port";
2926 			break;
2927 		case 0x82:
2928 			pt = "  FL_Port";
2929 			break;
2930 		case 0x84:
2931 			pt = "   E_port";
2932 			break;
2933 		default:
2934 			pt = " ";
2935 			break;
2936 		}
2937 
2938 		isp_prt(isp, ISP_LOGINFO,
2939 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2940 
2941 		/*
2942 		 * If we don't have an initiator role we bail.
2943 		 *
2944 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2945 		 */
2946 
2947 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2948 			break;
2949 		}
2950 
2951 		/*
2952 		 * Is this entry for us? If so, we bail.
2953 		 */
2954 
2955 		if (fcp->isp_portid == clp->portid) {
2956 			break;
2957 		}
2958 
2959 		/*
2960 		 * Else, the default policy is to find room for it in
2961 		 * our local port database. Later, when we execute
2962 		 * the call to isp_pdb_sync either this newly arrived
2963 		 * or already logged in device will be (re)announced.
2964 		 */
2965 
2966 		if (fcp->isp_topo == TOPO_FL_PORT)
2967 			base = FC_SNS_ID+1;
2968 		else
2969 			base = 0;
2970 
2971 		if (fcp->isp_topo == TOPO_N_PORT)
2972 			lim = 1;
2973 		else
2974 			lim = MAX_FC_TARG;
2975 
2976 		/*
2977 		 * Is it already in our list?
2978 		 */
2979 		for (target = base; target < lim; target++) {
2980 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2981 				continue;
2982 			}
2983 			lp = &fcp->portdb[target];
2984 			if (lp->port_wwn == clp->port_wwn &&
2985 			    lp->node_wwn == clp->node_wwn) {
2986 				lp->fabric_dev = 1;
2987 				break;
2988 			}
2989 		}
2990 		if (target < lim) {
2991 			break;
2992 		}
2993 		for (target = base; target < lim; target++) {
2994 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2995 				continue;
2996 			}
2997 			lp = &fcp->portdb[target];
2998 			if (lp->port_wwn == 0) {
2999 				break;
3000 			}
3001 		}
3002 		if (target == lim) {
3003 			isp_prt(isp, ISP_LOGWARN,
3004 			    "out of space for fabric devices");
3005 			break;
3006 		}
3007 		lp->port_type = clp->port_type;
3008 		lp->fc4_type = clp->fc4_type;
3009 		lp->node_wwn = clp->node_wwn;
3010 		lp->port_wwn = clp->port_wwn;
3011 		lp->portid = clp->portid;
3012 		lp->fabric_dev = 1;
3013 		break;
3014 	}
3015 #ifdef	ISP_TARGET_MODE
3016 	case ISPASYNC_TARGET_MESSAGE:
3017 	{
3018 		tmd_msg_t *mp = arg;
3019 		isp_prt(isp, ISP_LOGALL,
3020 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3021 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3022 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3023 		    mp->nt_msg[0]);
3024 		break;
3025 	}
3026 	case ISPASYNC_TARGET_EVENT:
3027 	{
3028 		tmd_event_t *ep = arg;
3029 		isp_prt(isp, ISP_LOGALL,
3030 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3031 		break;
3032 	}
3033 	case ISPASYNC_TARGET_ACTION:
3034 		switch (((isphdr_t *)arg)->rqs_entry_type) {
3035 		default:
3036 			isp_prt(isp, ISP_LOGWARN,
3037 			   "event 0x%x for unhandled target action",
3038 			    ((isphdr_t *)arg)->rqs_entry_type);
3039 			break;
3040 		case RQSTYPE_NOTIFY:
3041 			if (IS_SCSI(isp)) {
3042 				rv = isp_handle_platform_notify_scsi(isp,
3043 				    (in_entry_t *) arg);
3044 			} else {
3045 				rv = isp_handle_platform_notify_fc(isp,
3046 				    (in_fcentry_t *) arg);
3047 			}
3048 			break;
3049 		case RQSTYPE_ATIO:
3050 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3051 			break;
3052 		case RQSTYPE_ATIO2:
3053 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3054 			break;
3055 		case RQSTYPE_CTIO2:
3056 		case RQSTYPE_CTIO:
3057 			rv = isp_handle_platform_ctio(isp, arg);
3058 			break;
3059 		case RQSTYPE_ENABLE_LUN:
3060 		case RQSTYPE_MODIFY_LUN:
3061 			if (IS_DUALBUS(isp)) {
3062 				bus =
3063 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3064 			} else {
3065 				bus = 0;
3066 			}
3067 			isp_cv_signal_rqe(isp, bus,
3068 			    ((lun_entry_t *)arg)->le_status);
3069 			break;
3070 		}
3071 		break;
3072 #endif
3073 	case ISPASYNC_FW_CRASH:
3074 	{
3075 		u_int16_t mbox1, mbox6;
3076 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3077 		if (IS_DUALBUS(isp)) {
3078 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3079 		} else {
3080 			mbox6 = 0;
3081 		}
3082                 isp_prt(isp, ISP_LOGERR,
3083                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3084                     mbox6, mbox1);
3085 #ifdef	ISP_FW_CRASH_DUMP
3086 		/*
3087 		 * XXX: really need a thread to do this right.
3088 		 */
3089 		if (IS_FC(isp)) {
3090 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3091 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3092 			isp_freeze_loopdown(isp, "f/w crash");
3093 			isp_fw_dump(isp);
3094 		}
3095 		isp_reinit(isp);
3096 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3097 #endif
3098 		break;
3099 	}
3100 	case ISPASYNC_UNHANDLED_RESPONSE:
3101 		break;
3102 	default:
3103 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3104 		break;
3105 	}
3106 	return (rv);
3107 }
3108 
3109 
3110 /*
3111  * Locks are held before coming here.
3112  */
3113 void
3114 isp_uninit(struct ispsoftc *isp)
3115 {
3116 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3117 	DISABLE_INTS(isp);
3118 }
3119 
3120 void
3121 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3122 {
3123 	va_list ap;
3124 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3125 		return;
3126 	}
3127 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3128 	va_start(ap, fmt);
3129 	vprintf(fmt, ap);
3130 	va_end(ap);
3131 	printf("\n");
3132 }
3133