xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision ff7cd805df308ccde1d28ffaa084e25925763f98)
1 /*
2  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
3  *
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice immediately at the beginning of the file, without modification,
11  *    this list of conditions, and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <dev/isp/isp_freebsd.h>
32 #include <sys/unistd.h>
33 #include <sys/kthread.h>
34 #include <machine/stdarg.h>	/* for use by isp_prt below */
35 #include <sys/conf.h>
36 #include <sys/module.h>
37 #include <sys/ioccom.h>
38 #include <dev/isp/isp_ioctl.h>
39 
40 
41 MODULE_VERSION(isp, 1);
42 MODULE_DEPEND(isp, cam, 1, 1, 1);
43 int isp_announced = 0;
44 ispfwfunc *isp_get_firmware_p = NULL;
45 
46 static d_ioctl_t ispioctl;
47 static void isp_intr_enable(void *);
48 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
49 static void isp_poll(struct cam_sim *);
50 static timeout_t isp_watchdog;
51 static void isp_kthread(void *);
52 static void isp_action(struct cam_sim *, union ccb *);
53 
54 
55 #define ISP_CDEV_MAJOR	248
56 static struct cdevsw isp_cdevsw = {
57 	.d_ioctl =	ispioctl,
58 	.d_name =	"isp",
59 	.d_maj =	ISP_CDEV_MAJOR,
60 	.d_flags =	D_TAPE,
61 };
62 
63 static struct ispsoftc *isplist = NULL;
64 
65 void
66 isp_attach(struct ispsoftc *isp)
67 {
68 	int primary, secondary;
69 	struct ccb_setasync csa;
70 	struct cam_devq *devq;
71 	struct cam_sim *sim;
72 	struct cam_path *path;
73 
74 	/*
75 	 * Establish (in case of 12X0) which bus is the primary.
76 	 */
77 
78 	primary = 0;
79 	secondary = 1;
80 
81 	/*
82 	 * Create the device queue for our SIM(s).
83 	 */
84 	devq = cam_simq_alloc(isp->isp_maxcmds);
85 	if (devq == NULL) {
86 		return;
87 	}
88 
89 	/*
90 	 * Construct our SIM entry.
91 	 */
92 	ISPLOCK_2_CAMLOCK(isp);
93 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
94 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
95 	if (sim == NULL) {
96 		cam_simq_free(devq);
97 		CAMLOCK_2_ISPLOCK(isp);
98 		return;
99 	}
100 	CAMLOCK_2_ISPLOCK(isp);
101 
102 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
103 	isp->isp_osinfo.ehook.ich_arg = isp;
104 	ISPLOCK_2_CAMLOCK(isp);
105 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
106 		cam_sim_free(sim, TRUE);
107 		CAMLOCK_2_ISPLOCK(isp);
108 		isp_prt(isp, ISP_LOGERR,
109 		    "could not establish interrupt enable hook");
110 		return;
111 	}
112 
113 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
114 		cam_sim_free(sim, TRUE);
115 		CAMLOCK_2_ISPLOCK(isp);
116 		return;
117 	}
118 
119 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
120 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
121 		xpt_bus_deregister(cam_sim_path(sim));
122 		cam_sim_free(sim, TRUE);
123 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
124 		CAMLOCK_2_ISPLOCK(isp);
125 		return;
126 	}
127 
128 	xpt_setup_ccb(&csa.ccb_h, path, 5);
129 	csa.ccb_h.func_code = XPT_SASYNC_CB;
130 	csa.event_enable = AC_LOST_DEVICE;
131 	csa.callback = isp_cam_async;
132 	csa.callback_arg = sim;
133 	xpt_action((union ccb *)&csa);
134 	CAMLOCK_2_ISPLOCK(isp);
135 	isp->isp_sim = sim;
136 	isp->isp_path = path;
137 	/*
138 	 * Create a kernel thread for fibre channel instances. We
139 	 * don't have dual channel FC cards.
140 	 */
141 	if (IS_FC(isp)) {
142 		ISPLOCK_2_CAMLOCK(isp);
143 		/* XXX: LOCK VIOLATION */
144 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
145 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
146 		    RFHIGHPID, 0, "%s: fc_thrd",
147 		    device_get_nameunit(isp->isp_dev))) {
148 			xpt_bus_deregister(cam_sim_path(sim));
149 			cam_sim_free(sim, TRUE);
150 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
151 			CAMLOCK_2_ISPLOCK(isp);
152 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
153 			return;
154 		}
155 		CAMLOCK_2_ISPLOCK(isp);
156 	}
157 
158 
159 	/*
160 	 * If we have a second channel, construct SIM entry for that.
161 	 */
162 	if (IS_DUALBUS(isp)) {
163 		ISPLOCK_2_CAMLOCK(isp);
164 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
165 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
166 		if (sim == NULL) {
167 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
168 			xpt_free_path(isp->isp_path);
169 			cam_simq_free(devq);
170 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
171 			return;
172 		}
173 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
174 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
175 			xpt_free_path(isp->isp_path);
176 			cam_sim_free(sim, TRUE);
177 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
178 			CAMLOCK_2_ISPLOCK(isp);
179 			return;
180 		}
181 
182 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
183 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
184 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
185 			xpt_free_path(isp->isp_path);
186 			xpt_bus_deregister(cam_sim_path(sim));
187 			cam_sim_free(sim, TRUE);
188 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
189 			CAMLOCK_2_ISPLOCK(isp);
190 			return;
191 		}
192 
193 		xpt_setup_ccb(&csa.ccb_h, path, 5);
194 		csa.ccb_h.func_code = XPT_SASYNC_CB;
195 		csa.event_enable = AC_LOST_DEVICE;
196 		csa.callback = isp_cam_async;
197 		csa.callback_arg = sim;
198 		xpt_action((union ccb *)&csa);
199 		CAMLOCK_2_ISPLOCK(isp);
200 		isp->isp_sim2 = sim;
201 		isp->isp_path2 = path;
202 	}
203 
204 #ifdef	ISP_TARGET_MODE
205 	cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
206 	cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
207 	cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
208 	cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
209 #endif
210 	/*
211 	 * Create device nodes
212 	 */
213 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
214 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
215 
216 	if (isp->isp_role != ISP_ROLE_NONE) {
217 		isp->isp_state = ISP_RUNSTATE;
218 		ENABLE_INTS(isp);
219 	}
220 	if (isplist == NULL) {
221 		isplist = isp;
222 	} else {
223 		struct ispsoftc *tmp = isplist;
224 		while (tmp->isp_osinfo.next) {
225 			tmp = tmp->isp_osinfo.next;
226 		}
227 		tmp->isp_osinfo.next = isp;
228 	}
229 
230 }
231 
232 static INLINE void
233 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
234 {
235 	if (isp->isp_osinfo.simqfrozen == 0) {
236 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
237 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
238 		ISPLOCK_2_CAMLOCK(isp);
239 		xpt_freeze_simq(isp->isp_sim, 1);
240 		CAMLOCK_2_ISPLOCK(isp);
241 	} else {
242 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
243 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
244 	}
245 }
246 
247 static int
248 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
249 {
250 	struct ispsoftc *isp;
251 	int retval = ENOTTY;
252 
253 	isp = isplist;
254 	while (isp) {
255 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
256 			break;
257 		}
258 		isp = isp->isp_osinfo.next;
259 	}
260 	if (isp == NULL)
261 		return (ENXIO);
262 
263 	switch (cmd) {
264 #ifdef	ISP_FW_CRASH_DUMP
265 	case ISP_GET_FW_CRASH_DUMP:
266 	{
267 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
268 		size_t sz;
269 
270 		retval = 0;
271 		if (IS_2200(isp))
272 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
273 		else
274 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
275 		ISP_LOCK(isp);
276 		if (ptr && *ptr) {
277 			void *uaddr = *((void **) addr);
278 			if (copyout(ptr, uaddr, sz)) {
279 				retval = EFAULT;
280 			} else {
281 				*ptr = 0;
282 			}
283 		} else {
284 			retval = ENXIO;
285 		}
286 		ISP_UNLOCK(isp);
287 		break;
288 	}
289 
290 	case ISP_FORCE_CRASH_DUMP:
291 		ISP_LOCK(isp);
292 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
293 		isp_fw_dump(isp);
294 		isp_reinit(isp);
295 		ISP_UNLOCK(isp);
296 		retval = 0;
297 		break;
298 #endif
299 	case ISP_SDBLEV:
300 	{
301 		int olddblev = isp->isp_dblev;
302 		isp->isp_dblev = *(int *)addr;
303 		*(int *)addr = olddblev;
304 		retval = 0;
305 		break;
306 	}
307 	case ISP_RESETHBA:
308 		ISP_LOCK(isp);
309 		isp_reinit(isp);
310 		ISP_UNLOCK(isp);
311 		retval = 0;
312 		break;
313 	case ISP_RESCAN:
314 		if (IS_FC(isp)) {
315 			ISP_LOCK(isp);
316 			if (isp_fc_runstate(isp, 5 * 1000000)) {
317 				retval = EIO;
318 			} else {
319 				retval = 0;
320 			}
321 			ISP_UNLOCK(isp);
322 		}
323 		break;
324 	case ISP_FC_LIP:
325 		if (IS_FC(isp)) {
326 			ISP_LOCK(isp);
327 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
328 				retval = EIO;
329 			} else {
330 				retval = 0;
331 			}
332 			ISP_UNLOCK(isp);
333 		}
334 		break;
335 	case ISP_FC_GETDINFO:
336 	{
337 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
338 		struct lportdb *lp;
339 
340 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
341 			retval = EINVAL;
342 			break;
343 		}
344 		ISP_LOCK(isp);
345 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
346 		if (lp->valid) {
347 			ifc->loopid = lp->loopid;
348 			ifc->portid = lp->portid;
349 			ifc->node_wwn = lp->node_wwn;
350 			ifc->port_wwn = lp->port_wwn;
351 			retval = 0;
352 		} else {
353 			retval = ENODEV;
354 		}
355 		ISP_UNLOCK(isp);
356 		break;
357 	}
358 	case ISP_GET_STATS:
359 	{
360 		isp_stats_t *sp = (isp_stats_t *) addr;
361 
362 		MEMZERO(sp, sizeof (*sp));
363 		sp->isp_stat_version = ISP_STATS_VERSION;
364 		sp->isp_type = isp->isp_type;
365 		sp->isp_revision = isp->isp_revision;
366 		ISP_LOCK(isp);
367 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
368 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
369 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
370 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
371 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
372 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
373 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
374 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
375 		ISP_UNLOCK(isp);
376 		retval = 0;
377 		break;
378 	}
379 	case ISP_CLR_STATS:
380 		ISP_LOCK(isp);
381 		isp->isp_intcnt = 0;
382 		isp->isp_intbogus = 0;
383 		isp->isp_intmboxc = 0;
384 		isp->isp_intoasync = 0;
385 		isp->isp_rsltccmplt = 0;
386 		isp->isp_fphccmplt = 0;
387 		isp->isp_rscchiwater = 0;
388 		isp->isp_fpcchiwater = 0;
389 		ISP_UNLOCK(isp);
390 		retval = 0;
391 		break;
392 	case ISP_FC_GETHINFO:
393 	{
394 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
395 		MEMZERO(hba, sizeof (*hba));
396 		ISP_LOCK(isp);
397 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
398 		hba->fc_scsi_supported = 1;
399 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
400 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
401 		hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
402 		hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
403 		hba->active_node_wwn = ISP_NODEWWN(isp);
404 		hba->active_port_wwn = ISP_PORTWWN(isp);
405 		ISP_UNLOCK(isp);
406 		retval = 0;
407 		break;
408 	}
409 	case ISP_GET_FC_PARAM:
410 	{
411 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
412 
413 		if (!IS_FC(isp)) {
414 			retval = EINVAL;
415 			break;
416 		}
417 		f->parameter = 0;
418 		if (strcmp(f->param_name, "framelength") == 0) {
419 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
420 			retval = 0;
421 			break;
422 		}
423 		if (strcmp(f->param_name, "exec_throttle") == 0) {
424 			f->parameter = FCPARAM(isp)->isp_execthrottle;
425 			retval = 0;
426 			break;
427 		}
428 		if (strcmp(f->param_name, "fullduplex") == 0) {
429 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
430 				f->parameter = 1;
431 			retval = 0;
432 			break;
433 		}
434 		if (strcmp(f->param_name, "loopid") == 0) {
435 			f->parameter = FCPARAM(isp)->isp_loopid;
436 			retval = 0;
437 			break;
438 		}
439 		retval = EINVAL;
440 		break;
441 	}
442 	case ISP_SET_FC_PARAM:
443 	{
444 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
445 		u_int32_t param = f->parameter;
446 
447 		if (!IS_FC(isp)) {
448 			retval = EINVAL;
449 			break;
450 		}
451 		f->parameter = 0;
452 		if (strcmp(f->param_name, "framelength") == 0) {
453 			if (param != 512 && param != 1024 && param != 1024) {
454 				retval = EINVAL;
455 				break;
456 			}
457 			FCPARAM(isp)->isp_maxfrmlen = param;
458 			retval = 0;
459 			break;
460 		}
461 		if (strcmp(f->param_name, "exec_throttle") == 0) {
462 			if (param < 16 || param > 255) {
463 				retval = EINVAL;
464 				break;
465 			}
466 			FCPARAM(isp)->isp_execthrottle = param;
467 			retval = 0;
468 			break;
469 		}
470 		if (strcmp(f->param_name, "fullduplex") == 0) {
471 			if (param != 0 && param != 1) {
472 				retval = EINVAL;
473 				break;
474 			}
475 			if (param) {
476 				FCPARAM(isp)->isp_fwoptions |=
477 				    ICBOPT_FULL_DUPLEX;
478 			} else {
479 				FCPARAM(isp)->isp_fwoptions &=
480 				    ~ICBOPT_FULL_DUPLEX;
481 			}
482 			retval = 0;
483 			break;
484 		}
485 		if (strcmp(f->param_name, "loopid") == 0) {
486 			if (param < 0 || param > 125) {
487 				retval = EINVAL;
488 				break;
489 			}
490 			FCPARAM(isp)->isp_loopid = param;
491 			retval = 0;
492 			break;
493 		}
494 		retval = EINVAL;
495 		break;
496 	}
497 	default:
498 		break;
499 	}
500 	return (retval);
501 }
502 
503 static void
504 isp_intr_enable(void *arg)
505 {
506 	struct ispsoftc *isp = arg;
507 	if (isp->isp_role != ISP_ROLE_NONE) {
508 		ENABLE_INTS(isp);
509 		isp->isp_osinfo.intsok = 1;
510 	}
511 	/* Release our hook so that the boot can continue. */
512 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
513 }
514 
515 /*
516  * Put the target mode functions here, because some are inlines
517  */
518 
519 #ifdef	ISP_TARGET_MODE
520 
521 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
522 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
523 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
524 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
525 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
526 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
527 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
528 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
529 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
530 static cam_status
531 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
532 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
533 static void isp_en_lun(struct ispsoftc *, union ccb *);
534 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
535 static timeout_t isp_refire_putback_atio;
536 static void isp_complete_ctio(union ccb *);
537 static void isp_target_putback_atio(union ccb *);
538 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
539 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
540 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
541 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
542 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
543 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
544 
545 static INLINE int
546 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
547 {
548 	tstate_t *tptr;
549 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
550 	if (tptr == NULL) {
551 		return (0);
552 	}
553 	do {
554 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
555 			return (1);
556 		}
557 	} while ((tptr = tptr->next) != NULL);
558 	return (0);
559 }
560 
561 static INLINE int
562 are_any_luns_enabled(struct ispsoftc *isp, int port)
563 {
564 	int lo, hi;
565 	if (IS_DUALBUS(isp)) {
566 		lo = (port * (LUN_HASH_SIZE >> 1));
567 		hi = lo + (LUN_HASH_SIZE >> 1);
568 	} else {
569 		lo = 0;
570 		hi = LUN_HASH_SIZE;
571 	}
572 	for (lo = 0; lo < hi; lo++) {
573 		if (isp->isp_osinfo.lun_hash[lo]) {
574 			return (1);
575 		}
576 	}
577 	return (0);
578 }
579 
580 static INLINE tstate_t *
581 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
582 {
583 	tstate_t *tptr = NULL;
584 
585 	if (lun == CAM_LUN_WILDCARD) {
586 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
587 			tptr = &isp->isp_osinfo.tsdflt[bus];
588 			tptr->hold++;
589 			return (tptr);
590 		}
591 	} else {
592 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
593 		if (tptr == NULL) {
594 			return (NULL);
595 		}
596 	}
597 
598 	do {
599 		if (tptr->lun == lun && tptr->bus == bus) {
600 			tptr->hold++;
601 			return (tptr);
602 		}
603 	} while ((tptr = tptr->next) != NULL);
604 	return (tptr);
605 }
606 
607 static INLINE void
608 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
609 {
610 	if (tptr->hold)
611 		tptr->hold--;
612 }
613 
614 static INLINE int
615 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
616 {
617 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
618 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
619 #ifdef	ISP_SMPLOCK
620 		if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
621 			return (-1);
622 		}
623 #else
624 		if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) {
625 			return (-1);
626 		}
627 #endif
628 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
629 	}
630 	return (0);
631 }
632 
633 static INLINE int
634 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
635 {
636 #ifdef	ISP_SMPLOCK
637 	if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
638 		return (-1);
639 	}
640 #else
641 	if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) {
642 		return (-1);
643 	}
644 #endif
645 	return (0);
646 }
647 
648 static INLINE void
649 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
650 {
651 	isp->isp_osinfo.rstatus[bus] = status;
652 #ifdef	ISP_SMPLOCK
653 	cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
654 #else
655 	wakeup(&isp->isp_osinfo.tgtcv1[bus]);
656 #endif
657 }
658 
659 static INLINE void
660 isp_vsema_rqe(struct ispsoftc *isp, int bus)
661 {
662 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
663 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
664 #ifdef	ISP_SMPLOCK
665 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
666 #else
667 		cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
668 #endif
669 	}
670 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
671 }
672 
673 static INLINE atio_private_data_t *
674 isp_get_atpd(struct ispsoftc *isp, int tag)
675 {
676 	atio_private_data_t *atp;
677 	for (atp = isp->isp_osinfo.atpdp;
678 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
679 		if (atp->tag == tag)
680 			return (atp);
681 	}
682 	return (NULL);
683 }
684 
685 static cam_status
686 create_lun_state(struct ispsoftc *isp, int bus,
687     struct cam_path *path, tstate_t **rslt)
688 {
689 	cam_status status;
690 	lun_id_t lun;
691 	int hfx;
692 	tstate_t *tptr, *new;
693 
694 	lun = xpt_path_lun_id(path);
695 	if (lun < 0) {
696 		return (CAM_LUN_INVALID);
697 	}
698 	if (is_lun_enabled(isp, bus, lun)) {
699 		return (CAM_LUN_ALRDY_ENA);
700 	}
701 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
702 	if (new == NULL) {
703 		return (CAM_RESRC_UNAVAIL);
704 	}
705 
706 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
707 	    xpt_path_target_id(path), xpt_path_lun_id(path));
708 	if (status != CAM_REQ_CMP) {
709 		free(new, M_DEVBUF);
710 		return (status);
711 	}
712 	new->bus = bus;
713 	new->lun = lun;
714 	SLIST_INIT(&new->atios);
715 	SLIST_INIT(&new->inots);
716 	new->hold = 1;
717 
718 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
719 	tptr = isp->isp_osinfo.lun_hash[hfx];
720 	if (tptr == NULL) {
721 		isp->isp_osinfo.lun_hash[hfx] = new;
722 	} else {
723 		while (tptr->next)
724 			tptr = tptr->next;
725 		tptr->next = new;
726 	}
727 	*rslt = new;
728 	return (CAM_REQ_CMP);
729 }
730 
731 static INLINE void
732 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
733 {
734 	int hfx;
735 	tstate_t *lw, *pw;
736 
737 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
738 	if (tptr->hold) {
739 		return;
740 	}
741 	pw = isp->isp_osinfo.lun_hash[hfx];
742 	if (pw == NULL) {
743 		return;
744 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
745 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
746 	} else {
747 		lw = pw;
748 		pw = lw->next;
749 		while (pw) {
750 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
751 				lw->next = pw->next;
752 				break;
753 			}
754 			lw = pw;
755 			pw = pw->next;
756 		}
757 		if (pw == NULL) {
758 			return;
759 		}
760 	}
761 	free(tptr, M_DEVBUF);
762 }
763 
764 /*
765  * we enter with our locks held.
766  */
767 static void
768 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
769 {
770 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
771 	struct ccb_en_lun *cel = &ccb->cel;
772 	tstate_t *tptr;
773 	u_int16_t rstat;
774 	int bus, cmd, av, wildcard;
775 	lun_id_t lun;
776 	target_id_t tgt;
777 
778 
779 	bus = XS_CHANNEL(ccb) & 0x1;
780 	tgt = ccb->ccb_h.target_id;
781 	lun = ccb->ccb_h.target_lun;
782 
783 	/*
784 	 * Do some sanity checking first.
785 	 */
786 
787 	if ((lun != CAM_LUN_WILDCARD) &&
788 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
789 		ccb->ccb_h.status = CAM_LUN_INVALID;
790 		return;
791 	}
792 
793 	if (IS_SCSI(isp)) {
794 		sdparam *sdp = isp->isp_param;
795 		sdp += bus;
796 		if (tgt != CAM_TARGET_WILDCARD &&
797 		    tgt != sdp->isp_initiator_id) {
798 			ccb->ccb_h.status = CAM_TID_INVALID;
799 			return;
800 		}
801 	} else {
802 		if (tgt != CAM_TARGET_WILDCARD &&
803 		    tgt != FCPARAM(isp)->isp_iid) {
804 			ccb->ccb_h.status = CAM_TID_INVALID;
805 			return;
806 		}
807 		/*
808 		 * This is as a good a place as any to check f/w capabilities.
809 		 */
810 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
811 			isp_prt(isp, ISP_LOGERR,
812 			    "firmware does not support target mode");
813 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
814 			return;
815 		}
816 		/*
817 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
818 		 * XXX: dorks with our already fragile enable/disable code.
819 		 */
820 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
821 			isp_prt(isp, ISP_LOGERR,
822 			    "firmware not SCCLUN capable");
823 		}
824 	}
825 
826 	if (tgt == CAM_TARGET_WILDCARD) {
827 		if (lun == CAM_LUN_WILDCARD) {
828 			wildcard = 1;
829 		} else {
830 			ccb->ccb_h.status = CAM_LUN_INVALID;
831 			return;
832 		}
833 	} else {
834 		wildcard = 0;
835 	}
836 
837 	/*
838 	 * Next check to see whether this is a target/lun wildcard action.
839 	 *
840 	 * If so, we know that we can accept commands for luns that haven't
841 	 * been enabled yet and send them upstream. Otherwise, we have to
842 	 * handle them locally (if we see them at all).
843 	 */
844 
845 	if (wildcard) {
846 		tptr = &isp->isp_osinfo.tsdflt[bus];
847 		if (cel->enable) {
848 			if (isp->isp_osinfo.tmflags[bus] &
849 			    TM_WILDCARD_ENABLED) {
850 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
851 				return;
852 			}
853 			ccb->ccb_h.status =
854 			    xpt_create_path(&tptr->owner, NULL,
855 			    xpt_path_path_id(ccb->ccb_h.path),
856 			    xpt_path_target_id(ccb->ccb_h.path),
857 			    xpt_path_lun_id(ccb->ccb_h.path));
858 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
859 				return;
860 			}
861 			SLIST_INIT(&tptr->atios);
862 			SLIST_INIT(&tptr->inots);
863 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
864 		} else {
865 			if ((isp->isp_osinfo.tmflags[bus] &
866 			    TM_WILDCARD_ENABLED) == 0) {
867 				ccb->ccb_h.status = CAM_REQ_CMP;
868 				return;
869 			}
870 			if (tptr->hold) {
871 				ccb->ccb_h.status = CAM_SCSI_BUSY;
872 				return;
873 			}
874 			xpt_free_path(tptr->owner);
875 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
876 		}
877 	}
878 
879 	/*
880 	 * Now check to see whether this bus needs to be
881 	 * enabled/disabled with respect to target mode.
882 	 */
883 	av = bus << 31;
884 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
885 		av |= ENABLE_TARGET_FLAG;
886 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
887 		if (av) {
888 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
889 			if (wildcard) {
890 				isp->isp_osinfo.tmflags[bus] &=
891 				    ~TM_WILDCARD_ENABLED;
892 				xpt_free_path(tptr->owner);
893 			}
894 			return;
895 		}
896 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
897 		isp_prt(isp, ISP_LOGINFO,
898 		    "Target Mode enabled on channel %d", bus);
899 	} else if (cel->enable == 0 &&
900 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
901 		if (are_any_luns_enabled(isp, bus)) {
902 			ccb->ccb_h.status = CAM_SCSI_BUSY;
903 			return;
904 		}
905 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
906 		if (av) {
907 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
908 			return;
909 		}
910 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
911 		isp_prt(isp, ISP_LOGINFO,
912 		    "Target Mode disabled on channel %d", bus);
913 	}
914 
915 	if (wildcard) {
916 		ccb->ccb_h.status = CAM_REQ_CMP;
917 		return;
918 	}
919 
920 	if (cel->enable) {
921 		ccb->ccb_h.status =
922 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
923 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
924 			return;
925 		}
926 	} else {
927 		tptr = get_lun_statep(isp, bus, lun);
928 		if (tptr == NULL) {
929 			ccb->ccb_h.status = CAM_LUN_INVALID;
930 			return;
931 		}
932 	}
933 
934 	if (isp_psema_sig_rqe(isp, bus)) {
935 		rls_lun_statep(isp, tptr);
936 		if (cel->enable)
937 			destroy_lun_state(isp, tptr);
938 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
939 		return;
940 	}
941 
942 	if (cel->enable) {
943 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
944 		int c, n, ulun = lun;
945 
946 		cmd = RQSTYPE_ENABLE_LUN;
947 		c = DFLT_CMND_CNT;
948 		n = DFLT_INOT_CNT;
949 		if (IS_FC(isp) && lun != 0) {
950 			cmd = RQSTYPE_MODIFY_LUN;
951 			n = 0;
952 			/*
953 		 	 * For SCC firmware, we only deal with setting
954 			 * (enabling or modifying) lun 0.
955 			 */
956 			ulun = 0;
957 		}
958 		rstat = LUN_ERR;
959 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
960 			xpt_print_path(ccb->ccb_h.path);
961 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
962 			goto out;
963 		}
964 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
965 			xpt_print_path(ccb->ccb_h.path);
966 			isp_prt(isp, ISP_LOGERR,
967 			    "wait for ENABLE/MODIFY LUN timed out");
968 			goto out;
969 		}
970 		rstat = isp->isp_osinfo.rstatus[bus];
971 		if (rstat != LUN_OK) {
972 			xpt_print_path(ccb->ccb_h.path);
973 			isp_prt(isp, ISP_LOGERR,
974 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
975 			goto out;
976 		}
977 	} else {
978 		int c, n, ulun = lun;
979 		u_int32_t seq;
980 
981 		rstat = LUN_ERR;
982 		seq = isp->isp_osinfo.rollinfo++;
983 		cmd = -RQSTYPE_MODIFY_LUN;
984 
985 		c = DFLT_CMND_CNT;
986 		n = DFLT_INOT_CNT;
987 		if (IS_FC(isp) && lun != 0) {
988 			n = 0;
989 			/*
990 		 	 * For SCC firmware, we only deal with setting
991 			 * (enabling or modifying) lun 0.
992 			 */
993 			ulun = 0;
994 		}
995 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
996 			xpt_print_path(ccb->ccb_h.path);
997 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
998 			goto out;
999 		}
1000 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1001 			xpt_print_path(ccb->ccb_h.path);
1002 			isp_prt(isp, ISP_LOGERR,
1003 			    "wait for MODIFY LUN timed out");
1004 			goto out;
1005 		}
1006 		rstat = isp->isp_osinfo.rstatus[bus];
1007 		if (rstat != LUN_OK) {
1008 			xpt_print_path(ccb->ccb_h.path);
1009 			isp_prt(isp, ISP_LOGERR,
1010 			    "MODIFY LUN returned 0x%x", rstat);
1011 			goto out;
1012 		}
1013 		if (IS_FC(isp) && lun) {
1014 			goto out;
1015 		}
1016 
1017 		seq = isp->isp_osinfo.rollinfo++;
1018 
1019 		rstat = LUN_ERR;
1020 		cmd = -RQSTYPE_ENABLE_LUN;
1021 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1022 			xpt_print_path(ccb->ccb_h.path);
1023 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1024 			goto out;
1025 		}
1026 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1027 			xpt_print_path(ccb->ccb_h.path);
1028 			isp_prt(isp, ISP_LOGERR,
1029 			     "wait for DISABLE LUN timed out");
1030 			goto out;
1031 		}
1032 		rstat = isp->isp_osinfo.rstatus[bus];
1033 		if (rstat != LUN_OK) {
1034 			xpt_print_path(ccb->ccb_h.path);
1035 			isp_prt(isp, ISP_LOGWARN,
1036 			    "DISABLE LUN returned 0x%x", rstat);
1037 			goto out;
1038 		}
1039 		if (are_any_luns_enabled(isp, bus) == 0) {
1040 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1041 			if (av) {
1042 				isp_prt(isp, ISP_LOGWARN,
1043 				    "disable target mode on channel %d failed",
1044 				    bus);
1045 				goto out;
1046 			}
1047 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1048 			xpt_print_path(ccb->ccb_h.path);
1049 			isp_prt(isp, ISP_LOGINFO,
1050 			    "Target Mode disabled on channel %d", bus);
1051 		}
1052 	}
1053 
1054 out:
1055 	isp_vsema_rqe(isp, bus);
1056 
1057 	if (rstat != LUN_OK) {
1058 		xpt_print_path(ccb->ccb_h.path);
1059 		isp_prt(isp, ISP_LOGWARN,
1060 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1061 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1062 		rls_lun_statep(isp, tptr);
1063 		if (cel->enable)
1064 			destroy_lun_state(isp, tptr);
1065 	} else {
1066 		xpt_print_path(ccb->ccb_h.path);
1067 		isp_prt(isp, ISP_LOGINFO, lfmt,
1068 		    (cel->enable) ? "en" : "dis", bus);
1069 		rls_lun_statep(isp, tptr);
1070 		if (cel->enable == 0) {
1071 			destroy_lun_state(isp, tptr);
1072 		}
1073 		ccb->ccb_h.status = CAM_REQ_CMP;
1074 	}
1075 }
1076 
1077 static cam_status
1078 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1079 {
1080 	tstate_t *tptr;
1081 	struct ccb_hdr_slist *lp;
1082 	struct ccb_hdr *curelm;
1083 	int found;
1084 	union ccb *accb = ccb->cab.abort_ccb;
1085 
1086 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1087 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1088 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1089 			return (CAM_PATH_INVALID);
1090 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1091 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1092 			return (CAM_PATH_INVALID);
1093 		}
1094 	}
1095 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1096 	if (tptr == NULL) {
1097 		return (CAM_PATH_INVALID);
1098 	}
1099 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1100 		lp = &tptr->atios;
1101 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1102 		lp = &tptr->inots;
1103 	} else {
1104 		rls_lun_statep(isp, tptr);
1105 		return (CAM_UA_ABORT);
1106 	}
1107 	curelm = SLIST_FIRST(lp);
1108 	found = 0;
1109 	if (curelm == &accb->ccb_h) {
1110 		found = 1;
1111 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1112 	} else {
1113 		while(curelm != NULL) {
1114 			struct ccb_hdr *nextelm;
1115 
1116 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1117 			if (nextelm == &accb->ccb_h) {
1118 				found = 1;
1119 				SLIST_NEXT(curelm, sim_links.sle) =
1120 				    SLIST_NEXT(nextelm, sim_links.sle);
1121 				break;
1122 			}
1123 			curelm = nextelm;
1124 		}
1125 	}
1126 	rls_lun_statep(isp, tptr);
1127 	if (found) {
1128 		accb->ccb_h.status = CAM_REQ_ABORTED;
1129 		return (CAM_REQ_CMP);
1130 	}
1131 	return(CAM_PATH_INVALID);
1132 }
1133 
1134 static cam_status
1135 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1136 {
1137 	void *qe;
1138 	struct ccb_scsiio *cso = &ccb->csio;
1139 	u_int16_t *hp, save_handle;
1140 	u_int16_t nxti, optr;
1141 	u_int8_t local[QENTRY_LEN];
1142 
1143 
1144 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1145 		xpt_print_path(ccb->ccb_h.path);
1146 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1147 		return (CAM_RESRC_UNAVAIL);
1148 	}
1149 	bzero(local, QENTRY_LEN);
1150 
1151 	/*
1152 	 * We're either moving data or completing a command here.
1153 	 */
1154 
1155 	if (IS_FC(isp)) {
1156 		atio_private_data_t *atp;
1157 		ct2_entry_t *cto = (ct2_entry_t *) local;
1158 
1159 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1160 		cto->ct_header.rqs_entry_count = 1;
1161 		cto->ct_iid = cso->init_id;
1162 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1163 			cto->ct_lun = ccb->ccb_h.target_lun;
1164 		}
1165 
1166 		atp = isp_get_atpd(isp, cso->tag_id);
1167 		if (atp == NULL) {
1168 			isp_prt(isp, ISP_LOGERR,
1169 			    "cannot find private data adjunct for tag %x",
1170 			    cso->tag_id);
1171 			return (-1);
1172 		}
1173 
1174 		cto->ct_rxid = cso->tag_id;
1175 		if (cso->dxfer_len == 0) {
1176 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1177 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1178 				cto->ct_flags |= CT2_SENDSTATUS;
1179 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1180 				cto->ct_resid =
1181 				    atp->orig_datalen - atp->bytes_xfered;
1182 				if (cto->ct_resid < 0) {
1183 					cto->rsp.m1.ct_scsi_status |=
1184 					    CT2_DATA_OVER;
1185 				} else if (cto->ct_resid > 0) {
1186 					cto->rsp.m1.ct_scsi_status |=
1187 					    CT2_DATA_UNDER;
1188 				}
1189 			}
1190 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1191 				int m = min(cso->sense_len, MAXRESPLEN);
1192 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1193 				cto->rsp.m1.ct_senselen = m;
1194 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1195 			}
1196 		} else {
1197 			cto->ct_flags |= CT2_FLAG_MODE0;
1198 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1199 				cto->ct_flags |= CT2_DATA_IN;
1200 			} else {
1201 				cto->ct_flags |= CT2_DATA_OUT;
1202 			}
1203 			cto->ct_reloff = atp->bytes_xfered;
1204 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1205 				cto->ct_flags |= CT2_SENDSTATUS;
1206 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1207 				cto->ct_resid =
1208 				    atp->orig_datalen -
1209 				    (atp->bytes_xfered + cso->dxfer_len);
1210 				if (cto->ct_resid < 0) {
1211 					cto->rsp.m0.ct_scsi_status |=
1212 					    CT2_DATA_OVER;
1213 				} else if (cto->ct_resid > 0) {
1214 					cto->rsp.m0.ct_scsi_status |=
1215 					    CT2_DATA_UNDER;
1216 				}
1217 			} else {
1218 				atp->last_xframt = cso->dxfer_len;
1219 			}
1220 			/*
1221 			 * If we're sending data and status back together,
1222 			 * we can't also send back sense data as well.
1223 			 */
1224 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1225 		}
1226 
1227 		if (cto->ct_flags & CT2_SENDSTATUS) {
1228 			isp_prt(isp, ISP_LOGTDEBUG0,
1229 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1230 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1231 			    cso->dxfer_len, cto->ct_resid);
1232 			cto->ct_flags |= CT2_CCINCR;
1233 			atp->state = ATPD_STATE_LAST_CTIO;
1234 		} else
1235 			atp->state = ATPD_STATE_CTIO;
1236 		cto->ct_timeout = 10;
1237 		hp = &cto->ct_syshandle;
1238 	} else {
1239 		ct_entry_t *cto = (ct_entry_t *) local;
1240 
1241 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1242 		cto->ct_header.rqs_entry_count = 1;
1243 		cto->ct_iid = cso->init_id;
1244 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1245 		cto->ct_tgt = ccb->ccb_h.target_id;
1246 		cto->ct_lun = ccb->ccb_h.target_lun;
1247 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1248 		if (AT_HAS_TAG(cso->tag_id)) {
1249 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1250 			cto->ct_flags |= CT_TQAE;
1251 		}
1252 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1253 			cto->ct_flags |= CT_NODISC;
1254 		}
1255 		if (cso->dxfer_len == 0) {
1256 			cto->ct_flags |= CT_NO_DATA;
1257 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1258 			cto->ct_flags |= CT_DATA_IN;
1259 		} else {
1260 			cto->ct_flags |= CT_DATA_OUT;
1261 		}
1262 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1263 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1264 			cto->ct_scsi_status = cso->scsi_status;
1265 			cto->ct_resid = cso->resid;
1266 			isp_prt(isp, ISP_LOGTDEBUG0,
1267 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1268 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1269 			    cso->tag_id);
1270 		}
1271 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1272 		cto->ct_timeout = 10;
1273 		hp = &cto->ct_syshandle;
1274 	}
1275 
1276 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1277 		xpt_print_path(ccb->ccb_h.path);
1278 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1279 		return (CAM_RESRC_UNAVAIL);
1280 	}
1281 
1282 
1283 	/*
1284 	 * Call the dma setup routines for this entry (and any subsequent
1285 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1286 	 * new things to play with. As with isp_start's usage of DMA setup,
1287 	 * any swizzling is done in the machine dependent layer. Because
1288 	 * of this, we put the request onto the queue area first in native
1289 	 * format.
1290 	 */
1291 
1292 	save_handle = *hp;
1293 
1294 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1295 	case CMD_QUEUED:
1296 		ISP_ADD_REQUEST(isp, nxti);
1297 		return (CAM_REQ_INPROG);
1298 
1299 	case CMD_EAGAIN:
1300 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1301 		isp_destroy_handle(isp, save_handle);
1302 		return (CAM_RESRC_UNAVAIL);
1303 
1304 	default:
1305 		isp_destroy_handle(isp, save_handle);
1306 		return (XS_ERR(ccb));
1307 	}
1308 }
1309 
1310 static void
1311 isp_refire_putback_atio(void *arg)
1312 {
1313 	int s = splcam();
1314 	isp_target_putback_atio(arg);
1315 	splx(s);
1316 }
1317 
1318 static void
1319 isp_target_putback_atio(union ccb *ccb)
1320 {
1321 	struct ispsoftc *isp;
1322 	struct ccb_scsiio *cso;
1323 	u_int16_t nxti, optr;
1324 	void *qe;
1325 
1326 	isp = XS_ISP(ccb);
1327 
1328 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1329 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1330 		isp_prt(isp, ISP_LOGWARN,
1331 		    "isp_target_putback_atio: Request Queue Overflow");
1332 		return;
1333 	}
1334 	bzero(qe, QENTRY_LEN);
1335 	cso = &ccb->csio;
1336 	if (IS_FC(isp)) {
1337 		at2_entry_t local, *at = &local;
1338 		MEMZERO(at, sizeof (at2_entry_t));
1339 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1340 		at->at_header.rqs_entry_count = 1;
1341 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1342 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1343 		} else {
1344 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1345 		}
1346 		at->at_status = CT_OK;
1347 		at->at_rxid = cso->tag_id;
1348 		at->at_iid = cso->ccb_h.target_id;
1349 		isp_put_atio2(isp, at, qe);
1350 	} else {
1351 		at_entry_t local, *at = &local;
1352 		MEMZERO(at, sizeof (at_entry_t));
1353 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1354 		at->at_header.rqs_entry_count = 1;
1355 		at->at_iid = cso->init_id;
1356 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1357 		at->at_tgt = cso->ccb_h.target_id;
1358 		at->at_lun = cso->ccb_h.target_lun;
1359 		at->at_status = CT_OK;
1360 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1361 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1362 		isp_put_atio(isp, at, qe);
1363 	}
1364 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1365 	ISP_ADD_REQUEST(isp, nxti);
1366 	isp_complete_ctio(ccb);
1367 }
1368 
1369 static void
1370 isp_complete_ctio(union ccb *ccb)
1371 {
1372 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1373 		ccb->ccb_h.status |= CAM_REQ_CMP;
1374 	}
1375 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1376 	xpt_done(ccb);
1377 }
1378 
1379 /*
1380  * Handle ATIO stuff that the generic code can't.
1381  * This means handling CDBs.
1382  */
1383 
1384 static int
1385 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1386 {
1387 	tstate_t *tptr;
1388 	int status, bus, iswildcard;
1389 	struct ccb_accept_tio *atiop;
1390 
1391 	/*
1392 	 * The firmware status (except for the QLTM_SVALID bit)
1393 	 * indicates why this ATIO was sent to us.
1394 	 *
1395 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1396 	 *
1397 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1398 	 * we're still connected on the SCSI bus.
1399 	 */
1400 	status = aep->at_status;
1401 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1402 		/*
1403 		 * Bus Phase Sequence error. We should have sense data
1404 		 * suggested by the f/w. I'm not sure quite yet what
1405 		 * to do about this for CAM.
1406 		 */
1407 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1408 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1409 		return (0);
1410 	}
1411 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1412 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1413 		    status);
1414 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1415 		return (0);
1416 	}
1417 
1418 	bus = GET_BUS_VAL(aep->at_iid);
1419 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1420 	if (tptr == NULL) {
1421 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1422 		iswildcard = 1;
1423 	} else {
1424 		iswildcard = 0;
1425 	}
1426 
1427 	if (tptr == NULL) {
1428 		/*
1429 		 * Because we can't autofeed sense data back with
1430 		 * a command for parallel SCSI, we can't give back
1431 		 * a CHECK CONDITION. We'll give back a BUSY status
1432 		 * instead. This works out okay because the only
1433 		 * time we should, in fact, get this, is in the
1434 		 * case that somebody configured us without the
1435 		 * blackhole driver, so they get what they deserve.
1436 		 */
1437 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1438 		return (0);
1439 	}
1440 
1441 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1442 	if (atiop == NULL) {
1443 		/*
1444 		 * Because we can't autofeed sense data back with
1445 		 * a command for parallel SCSI, we can't give back
1446 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1447 		 * instead. This works out okay because the only time we
1448 		 * should, in fact, get this, is in the case that we've
1449 		 * run out of ATIOS.
1450 		 */
1451 		xpt_print_path(tptr->owner);
1452 		isp_prt(isp, ISP_LOGWARN,
1453 		    "no ATIOS for lun %d from initiator %d on channel %d",
1454 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1455 		if (aep->at_flags & AT_TQAE)
1456 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1457 		else
1458 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1459 		rls_lun_statep(isp, tptr);
1460 		return (0);
1461 	}
1462 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1463 	if (iswildcard) {
1464 		atiop->ccb_h.target_id = aep->at_tgt;
1465 		atiop->ccb_h.target_lun = aep->at_lun;
1466 	}
1467 	if (aep->at_flags & AT_NODISC) {
1468 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1469 	} else {
1470 		atiop->ccb_h.flags = 0;
1471 	}
1472 
1473 	if (status & QLTM_SVALID) {
1474 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1475 		atiop->sense_len = amt;
1476 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1477 	} else {
1478 		atiop->sense_len = 0;
1479 	}
1480 
1481 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1482 	atiop->cdb_len = aep->at_cdblen;
1483 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1484 	atiop->ccb_h.status = CAM_CDB_RECVD;
1485 	/*
1486 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1487 	 * and the handle (which we have to preserve).
1488 	 */
1489 	AT_MAKE_TAGID(atiop->tag_id, aep);
1490 	if (aep->at_flags & AT_TQAE) {
1491 		atiop->tag_action = aep->at_tag_type;
1492 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1493 	}
1494 	xpt_done((union ccb*)atiop);
1495 	isp_prt(isp, ISP_LOGTDEBUG0,
1496 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1497 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1498 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1499 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1500 	    "nondisc" : "disconnecting");
1501 	rls_lun_statep(isp, tptr);
1502 	return (0);
1503 }
1504 
1505 static int
1506 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1507 {
1508 	lun_id_t lun;
1509 	tstate_t *tptr;
1510 	struct ccb_accept_tio *atiop;
1511 	atio_private_data_t *atp;
1512 
1513 	/*
1514 	 * The firmware status (except for the QLTM_SVALID bit)
1515 	 * indicates why this ATIO was sent to us.
1516 	 *
1517 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1518 	 */
1519 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1520 		isp_prt(isp, ISP_LOGWARN,
1521 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1522 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1523 		return (0);
1524 	}
1525 
1526 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1527 		lun = aep->at_scclun;
1528 	} else {
1529 		lun = aep->at_lun;
1530 	}
1531 	tptr = get_lun_statep(isp, 0, lun);
1532 	if (tptr == NULL) {
1533 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1534 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1535 	}
1536 
1537 	if (tptr == NULL) {
1538 		/*
1539 		 * What we'd like to know is whether or not we have a listener
1540 		 * upstream that really hasn't configured yet. If we do, then
1541 		 * we can give a more sensible reply here. If not, then we can
1542 		 * reject this out of hand.
1543 		 *
1544 		 * Choices for what to send were
1545 		 *
1546                  *	Not Ready, Unit Not Self-Configured Yet
1547 		 *	(0x2,0x3e,0x00)
1548 		 *
1549 		 * for the former and
1550 		 *
1551 		 *	Illegal Request, Logical Unit Not Supported
1552 		 *	(0x5,0x25,0x00)
1553 		 *
1554 		 * for the latter.
1555 		 *
1556 		 * We used to decide whether there was at least one listener
1557 		 * based upon whether the black hole driver was configured.
1558 		 * However, recent config(8) changes have made this hard to do
1559 		 * at this time.
1560 		 *
1561 		 */
1562 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1563 		return (0);
1564 	}
1565 
1566 	atp = isp_get_atpd(isp, 0);
1567 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1568 	if (atiop == NULL || atp == NULL) {
1569 		/*
1570 		 * Because we can't autofeed sense data back with
1571 		 * a command for parallel SCSI, we can't give back
1572 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1573 		 * instead. This works out okay because the only time we
1574 		 * should, in fact, get this, is in the case that we've
1575 		 * run out of ATIOS.
1576 		 */
1577 		xpt_print_path(tptr->owner);
1578 		isp_prt(isp, ISP_LOGWARN,
1579 		    "no %s for lun %d from initiator %d",
1580 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1581 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1582 		rls_lun_statep(isp, tptr);
1583 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1584 		return (0);
1585 	}
1586 	atp->state = ATPD_STATE_ATIO;
1587 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1588 	tptr->atio_count--;
1589 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1590 	    lun, tptr->atio_count);
1591 
1592 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1593 		atiop->ccb_h.target_id =
1594 		    ((fcparam *)isp->isp_param)->isp_loopid;
1595 		atiop->ccb_h.target_lun = lun;
1596 	}
1597 	/*
1598 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1599 	 */
1600 	atiop->sense_len = 0;
1601 
1602 	atiop->init_id = aep->at_iid;
1603 	atiop->cdb_len = ATIO2_CDBLEN;
1604 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1605 	atiop->ccb_h.status = CAM_CDB_RECVD;
1606 	atiop->tag_id = aep->at_rxid;
1607 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1608 	case ATIO2_TC_ATTR_SIMPLEQ:
1609 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1610 		break;
1611         case ATIO2_TC_ATTR_HEADOFQ:
1612 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1613 		break;
1614         case ATIO2_TC_ATTR_ORDERED:
1615 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1616 		break;
1617         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1618 	case ATIO2_TC_ATTR_UNTAGGED:
1619 	default:
1620 		atiop->tag_action = 0;
1621 		break;
1622 	}
1623 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1624 
1625 	atp->tag = atiop->tag_id;
1626 	atp->lun = lun;
1627 	atp->orig_datalen = aep->at_datalen;
1628 	atp->last_xframt = 0;
1629 	atp->bytes_xfered = 0;
1630 	atp->state = ATPD_STATE_CAM;
1631 	xpt_done((union ccb*)atiop);
1632 
1633 	isp_prt(isp, ISP_LOGTDEBUG0,
1634 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1635 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1636 	    lun, aep->at_taskflags, aep->at_datalen);
1637 	rls_lun_statep(isp, tptr);
1638 	return (0);
1639 }
1640 
1641 static int
1642 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1643 {
1644 	union ccb *ccb;
1645 	int sentstatus, ok, notify_cam, resid = 0;
1646 	u_int16_t tval;
1647 
1648 	/*
1649 	 * CTIO and CTIO2 are close enough....
1650 	 */
1651 
1652 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1653 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1654 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1655 
1656 	if (IS_FC(isp)) {
1657 		ct2_entry_t *ct = arg;
1658 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1659 		if (atp == NULL) {
1660 			isp_prt(isp, ISP_LOGERR,
1661 			    "cannot find adjunct for %x after I/O",
1662 			    ct->ct_rxid);
1663 			return (0);
1664 		}
1665 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1666 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1667 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1668 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1669 		}
1670 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1671 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1672 			resid = ct->ct_resid;
1673 			atp->bytes_xfered += (atp->last_xframt - resid);
1674 			atp->last_xframt = 0;
1675 		}
1676 		if (sentstatus || !ok) {
1677 			atp->tag = 0;
1678 		}
1679 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1680 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1681 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1682 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1683 		    resid, sentstatus? "FIN" : "MID");
1684 		tval = ct->ct_rxid;
1685 
1686 		/* XXX: should really come after isp_complete_ctio */
1687 		atp->state = ATPD_STATE_PDON;
1688 	} else {
1689 		ct_entry_t *ct = arg;
1690 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1691 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1692 		/*
1693 		 * We *ought* to be able to get back to the original ATIO
1694 		 * here, but for some reason this gets lost. It's just as
1695 		 * well because it's squirrelled away as part of periph
1696 		 * private data.
1697 		 *
1698 		 * We can live without it as long as we continue to use
1699 		 * the auto-replenish feature for CTIOs.
1700 		 */
1701 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1702 		if (ct->ct_status & QLTM_SVALID) {
1703 			char *sp = (char *)ct;
1704 			sp += CTIO_SENSE_OFFSET;
1705 			ccb->csio.sense_len =
1706 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1707 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1708 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1709 		}
1710 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1711 			resid = ct->ct_resid;
1712 		}
1713 		isp_prt(isp, ISP_LOGTDEBUG0,
1714 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1715 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1716 		    ct->ct_status, ct->ct_flags, resid,
1717 		    sentstatus? "FIN" : "MID");
1718 		tval = ct->ct_fwhandle;
1719 	}
1720 	ccb->csio.resid += resid;
1721 
1722 	/*
1723 	 * We're here either because intermediate data transfers are done
1724 	 * and/or the final status CTIO (which may have joined with a
1725 	 * Data Transfer) is done.
1726 	 *
1727 	 * In any case, for this platform, the upper layers figure out
1728 	 * what to do next, so all we do here is collect status and
1729 	 * pass information along. Any DMA handles have already been
1730 	 * freed.
1731 	 */
1732 	if (notify_cam == 0) {
1733 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1734 		return (0);
1735 	}
1736 
1737 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1738 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1739 
1740 	if (!ok) {
1741 		isp_target_putback_atio(ccb);
1742 	} else {
1743 		isp_complete_ctio(ccb);
1744 
1745 	}
1746 	return (0);
1747 }
1748 
1749 static int
1750 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1751 {
1752 	return (0);	/* XXXX */
1753 }
1754 
1755 static int
1756 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1757 {
1758 
1759 	switch (inp->in_status) {
1760 	case IN_PORT_LOGOUT:
1761 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1762 		   inp->in_iid);
1763 		break;
1764 	case IN_PORT_CHANGED:
1765 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1766 		   inp->in_iid);
1767 		break;
1768 	case IN_GLOBAL_LOGO:
1769 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1770 		break;
1771 	case IN_ABORT_TASK:
1772 	{
1773 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1774 		struct ccb_immed_notify *inot = NULL;
1775 
1776 		if (atp) {
1777 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1778 			if (tptr) {
1779 				inot = (struct ccb_immed_notify *)
1780 				    SLIST_FIRST(&tptr->inots);
1781 				if (inot) {
1782 					SLIST_REMOVE_HEAD(&tptr->inots,
1783 					    sim_links.sle);
1784 				}
1785 			}
1786 			isp_prt(isp, ISP_LOGWARN,
1787 			   "abort task RX_ID %x IID %d state %d",
1788 			   inp->in_seqid, inp->in_iid, atp->state);
1789 		} else {
1790 			isp_prt(isp, ISP_LOGWARN,
1791 			   "abort task RX_ID %x from iid %d, state unknown",
1792 			   inp->in_seqid, inp->in_iid);
1793 		}
1794 		if (inot) {
1795 			inot->initiator_id = inp->in_iid;
1796 			inot->sense_len = 0;
1797 			inot->message_args[0] = MSG_ABORT_TAG;
1798 			inot->message_args[1] = inp->in_seqid & 0xff;
1799 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1800 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1801 			xpt_done((union ccb *)inot);
1802 		}
1803 		break;
1804 	}
1805 	default:
1806 		break;
1807 	}
1808 	return (0);
1809 }
1810 #endif
1811 
1812 static void
1813 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1814 {
1815 	struct cam_sim *sim;
1816 	struct ispsoftc *isp;
1817 
1818 	sim = (struct cam_sim *)cbarg;
1819 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1820 	switch (code) {
1821 	case AC_LOST_DEVICE:
1822 		if (IS_SCSI(isp)) {
1823 			u_int16_t oflags, nflags;
1824 			sdparam *sdp = isp->isp_param;
1825 			int tgt;
1826 
1827 			tgt = xpt_path_target_id(path);
1828 			if (tgt >= 0) {
1829 				sdp += cam_sim_bus(sim);
1830 				ISP_LOCK(isp);
1831 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1832 #ifndef	ISP_TARGET_MODE
1833 				nflags &= DPARM_SAFE_DFLT;
1834 				if (isp->isp_loaded_fw) {
1835 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1836 				}
1837 #else
1838 				nflags = DPARM_DEFAULT;
1839 #endif
1840 				oflags = sdp->isp_devparam[tgt].goal_flags;
1841 				sdp->isp_devparam[tgt].goal_flags = nflags;
1842 				sdp->isp_devparam[tgt].dev_update = 1;
1843 				isp->isp_update |= (1 << cam_sim_bus(sim));
1844 				(void) isp_control(isp,
1845 				    ISPCTL_UPDATE_PARAMS, NULL);
1846 				sdp->isp_devparam[tgt].goal_flags = oflags;
1847 				ISP_UNLOCK(isp);
1848 			}
1849 		}
1850 		break;
1851 	default:
1852 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1853 		break;
1854 	}
1855 }
1856 
1857 static void
1858 isp_poll(struct cam_sim *sim)
1859 {
1860 	struct ispsoftc *isp = cam_sim_softc(sim);
1861 	u_int16_t isr, sema, mbox;
1862 
1863 	ISP_LOCK(isp);
1864 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1865 		isp_intr(isp, isr, sema, mbox);
1866 	}
1867 	ISP_UNLOCK(isp);
1868 }
1869 
1870 
1871 static void
1872 isp_watchdog(void *arg)
1873 {
1874 	XS_T *xs = arg;
1875 	struct ispsoftc *isp = XS_ISP(xs);
1876 	u_int32_t handle;
1877 	int iok;
1878 
1879 	/*
1880 	 * We've decided this command is dead. Make sure we're not trying
1881 	 * to kill a command that's already dead by getting it's handle and
1882 	 * and seeing whether it's still alive.
1883 	 */
1884 	ISP_LOCK(isp);
1885 	iok = isp->isp_osinfo.intsok;
1886 	isp->isp_osinfo.intsok = 0;
1887 	handle = isp_find_handle(isp, xs);
1888 	if (handle) {
1889 		u_int16_t isr, sema, mbox;
1890 
1891 		if (XS_CMD_DONE_P(xs)) {
1892 			isp_prt(isp, ISP_LOGDEBUG1,
1893 			    "watchdog found done cmd (handle 0x%x)", handle);
1894 			ISP_UNLOCK(isp);
1895 			return;
1896 		}
1897 
1898 		if (XS_CMD_WDOG_P(xs)) {
1899 			isp_prt(isp, ISP_LOGDEBUG2,
1900 			    "recursive watchdog (handle 0x%x)", handle);
1901 			ISP_UNLOCK(isp);
1902 			return;
1903 		}
1904 
1905 		XS_CMD_S_WDOG(xs);
1906 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1907 			isp_intr(isp, isr, sema, mbox);
1908 		}
1909 		if (XS_CMD_DONE_P(xs)) {
1910 			isp_prt(isp, ISP_LOGDEBUG2,
1911 			    "watchdog cleanup for handle 0x%x", handle);
1912 			xpt_done((union ccb *) xs);
1913 		} else if (XS_CMD_GRACE_P(xs)) {
1914 			/*
1915 			 * Make sure the command is *really* dead before we
1916 			 * release the handle (and DMA resources) for reuse.
1917 			 */
1918 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1919 
1920 			/*
1921 			 * After this point, the comamnd is really dead.
1922 			 */
1923 			if (XS_XFRLEN(xs)) {
1924 				ISP_DMAFREE(isp, xs, handle);
1925                 	}
1926 			isp_destroy_handle(isp, handle);
1927 			xpt_print_path(xs->ccb_h.path);
1928 			isp_prt(isp, ISP_LOGWARN,
1929 			    "watchdog timeout for handle 0x%x", handle);
1930 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1931 			XS_CMD_C_WDOG(xs);
1932 			isp_done(xs);
1933 		} else {
1934 			u_int16_t nxti, optr;
1935 			ispreq_t local, *mp= &local, *qe;
1936 
1937 			XS_CMD_C_WDOG(xs);
1938 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1939 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1940 				ISP_UNLOCK(isp);
1941 				return;
1942 			}
1943 			XS_CMD_S_GRACE(xs);
1944 			MEMZERO((void *) mp, sizeof (*mp));
1945 			mp->req_header.rqs_entry_count = 1;
1946 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1947 			mp->req_modifier = SYNC_ALL;
1948 			mp->req_target = XS_CHANNEL(xs) << 7;
1949 			isp_put_request(isp, mp, qe);
1950 			ISP_ADD_REQUEST(isp, nxti);
1951 		}
1952 	} else {
1953 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1954 	}
1955 	isp->isp_osinfo.intsok = iok;
1956 	ISP_UNLOCK(isp);
1957 }
1958 
1959 static void
1960 isp_kthread(void *arg)
1961 {
1962 	struct ispsoftc *isp = arg;
1963 
1964 #ifdef	ISP_SMPLOCK
1965 	mtx_lock(&isp->isp_lock);
1966 #else
1967 	mtx_lock(&Giant);
1968 #endif
1969 	/*
1970 	 * The first loop is for our usage where we have yet to have
1971 	 * gotten good fibre channel state.
1972 	 */
1973 	for (;;) {
1974 		int wasfrozen;
1975 
1976 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1977 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1978 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1979 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1980 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1981 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1982 				    isp->isp_osinfo.ktmature == 0) {
1983 					break;
1984 				}
1985 			}
1986 #ifdef	ISP_SMPLOCK
1987 			msleep(isp_kthread, &isp->isp_lock,
1988 			    PRIBIO, "isp_fcthrd", hz);
1989 #else
1990 			(void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1991 #endif
1992 		}
1993 
1994 		/*
1995 		 * Even if we didn't get good loop state we may be
1996 		 * unfreezing the SIMQ so that we can kill off
1997 		 * commands (if we've never seen loop before, for example).
1998 		 */
1999 		isp->isp_osinfo.ktmature = 1;
2000 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2001 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2002 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2003 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2004 			ISPLOCK_2_CAMLOCK(isp);
2005 			xpt_release_simq(isp->isp_sim, 1);
2006 			CAMLOCK_2_ISPLOCK(isp);
2007 		}
2008 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2009 #ifdef	ISP_SMPLOCK
2010 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2011 #else
2012 		(void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2013 #endif
2014 	}
2015 }
2016 
2017 static void
2018 isp_action(struct cam_sim *sim, union ccb *ccb)
2019 {
2020 	int bus, tgt, error;
2021 	struct ispsoftc *isp;
2022 	struct ccb_trans_settings *cts;
2023 
2024 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2025 
2026 	isp = (struct ispsoftc *)cam_sim_softc(sim);
2027 	ccb->ccb_h.sim_priv.entries[0].field = 0;
2028 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2029 	if (isp->isp_state != ISP_RUNSTATE &&
2030 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
2031 		CAMLOCK_2_ISPLOCK(isp);
2032 		isp_init(isp);
2033 		if (isp->isp_state != ISP_INITSTATE) {
2034 			ISP_UNLOCK(isp);
2035 			/*
2036 			 * Lie. Say it was a selection timeout.
2037 			 */
2038 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2039 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2040 			xpt_done(ccb);
2041 			return;
2042 		}
2043 		isp->isp_state = ISP_RUNSTATE;
2044 		ISPLOCK_2_CAMLOCK(isp);
2045 	}
2046 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2047 
2048 
2049 	switch (ccb->ccb_h.func_code) {
2050 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2051 		/*
2052 		 * Do a couple of preliminary checks...
2053 		 */
2054 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2055 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2056 				ccb->ccb_h.status = CAM_REQ_INVALID;
2057 				xpt_done(ccb);
2058 				break;
2059 			}
2060 		}
2061 #ifdef	DIAGNOSTIC
2062 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2063 			ccb->ccb_h.status = CAM_PATH_INVALID;
2064 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2065 			ccb->ccb_h.status = CAM_PATH_INVALID;
2066 		}
2067 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2068 			isp_prt(isp, ISP_LOGERR,
2069 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2070 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2071 			xpt_done(ccb);
2072 			break;
2073 		}
2074 #endif
2075 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2076 		CAMLOCK_2_ISPLOCK(isp);
2077 		error = isp_start((XS_T *) ccb);
2078 		switch (error) {
2079 		case CMD_QUEUED:
2080 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2081 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2082 				u_int64_t ticks = (u_int64_t) hz;
2083 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2084 					ticks = 60 * 1000 * ticks;
2085 				else
2086 					ticks = ccb->ccb_h.timeout * hz;
2087 				ticks = ((ticks + 999) / 1000) + hz + hz;
2088 				if (ticks >= 0x80000000) {
2089 					isp_prt(isp, ISP_LOGERR,
2090 					    "timeout overflow");
2091 					ticks = 0x7fffffff;
2092 				}
2093 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2094 				    (caddr_t)ccb, (int)ticks);
2095 			} else {
2096 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2097 			}
2098 			ISPLOCK_2_CAMLOCK(isp);
2099 			break;
2100 		case CMD_RQLATER:
2101 			/*
2102 			 * This can only happen for Fibre Channel
2103 			 */
2104 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2105 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2106 			    isp->isp_osinfo.ktmature) {
2107 				ISPLOCK_2_CAMLOCK(isp);
2108 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2109 				xpt_done(ccb);
2110 				break;
2111 			}
2112 #ifdef	ISP_SMPLOCK
2113 			cv_signal(&isp->isp_osinfo.kthread_cv);
2114 #else
2115 			wakeup(&isp->isp_osinfo.kthread_cv);
2116 #endif
2117 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2118 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2119 			ISPLOCK_2_CAMLOCK(isp);
2120 			xpt_done(ccb);
2121 			break;
2122 		case CMD_EAGAIN:
2123 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2124 			ISPLOCK_2_CAMLOCK(isp);
2125 			xpt_done(ccb);
2126 			break;
2127 		case CMD_COMPLETE:
2128 			isp_done((struct ccb_scsiio *) ccb);
2129 			ISPLOCK_2_CAMLOCK(isp);
2130 			break;
2131 		default:
2132 			isp_prt(isp, ISP_LOGERR,
2133 			    "What's this? 0x%x at %d in file %s",
2134 			    error, __LINE__, __FILE__);
2135 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2136 			xpt_done(ccb);
2137 			ISPLOCK_2_CAMLOCK(isp);
2138 		}
2139 		break;
2140 
2141 #ifdef	ISP_TARGET_MODE
2142 	case XPT_EN_LUN:		/* Enable LUN as a target */
2143 	{
2144 		int iok;
2145 		CAMLOCK_2_ISPLOCK(isp);
2146 		iok = isp->isp_osinfo.intsok;
2147 		isp->isp_osinfo.intsok = 0;
2148 		isp_en_lun(isp, ccb);
2149 		isp->isp_osinfo.intsok = iok;
2150 		ISPLOCK_2_CAMLOCK(isp);
2151 		xpt_done(ccb);
2152 		break;
2153 	}
2154 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2155 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2156 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2157 	{
2158 		tstate_t *tptr =
2159 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2160 		if (tptr == NULL) {
2161 			ccb->ccb_h.status = CAM_LUN_INVALID;
2162 			xpt_done(ccb);
2163 			break;
2164 		}
2165 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2166 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2167 		ccb->ccb_h.flags = 0;
2168 
2169 		CAMLOCK_2_ISPLOCK(isp);
2170 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2171 			/*
2172 			 * Note that the command itself may not be done-
2173 			 * it may not even have had the first CTIO sent.
2174 			 */
2175 			tptr->atio_count++;
2176 			isp_prt(isp, ISP_LOGTDEBUG0,
2177 			    "Put FREE ATIO2, lun %d, count now %d",
2178 			    ccb->ccb_h.target_lun, tptr->atio_count);
2179 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2180 			    sim_links.sle);
2181 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2182 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2183 			    sim_links.sle);
2184 		} else {
2185 			;
2186 		}
2187 		rls_lun_statep(isp, tptr);
2188 		ccb->ccb_h.status = CAM_REQ_INPROG;
2189 		ISPLOCK_2_CAMLOCK(isp);
2190 		break;
2191 	}
2192 	case XPT_CONT_TARGET_IO:
2193 	{
2194 		CAMLOCK_2_ISPLOCK(isp);
2195 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2196 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2197 			isp_prt(isp, ISP_LOGWARN,
2198 			    "XPT_CONT_TARGET_IO: status 0x%x",
2199 			    ccb->ccb_h.status);
2200 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2201 			ISPLOCK_2_CAMLOCK(isp);
2202 			xpt_done(ccb);
2203 		} else {
2204 			ISPLOCK_2_CAMLOCK(isp);
2205 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2206 		}
2207 		break;
2208 	}
2209 #endif
2210 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2211 
2212 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2213 		tgt = ccb->ccb_h.target_id;
2214 		tgt |= (bus << 16);
2215 
2216 		CAMLOCK_2_ISPLOCK(isp);
2217 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2218 		ISPLOCK_2_CAMLOCK(isp);
2219 		if (error) {
2220 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2221 		} else {
2222 			ccb->ccb_h.status = CAM_REQ_CMP;
2223 		}
2224 		xpt_done(ccb);
2225 		break;
2226 	case XPT_ABORT:			/* Abort the specified CCB */
2227 	{
2228 		union ccb *accb = ccb->cab.abort_ccb;
2229 		CAMLOCK_2_ISPLOCK(isp);
2230 		switch (accb->ccb_h.func_code) {
2231 #ifdef	ISP_TARGET_MODE
2232 		case XPT_ACCEPT_TARGET_IO:
2233 		case XPT_IMMED_NOTIFY:
2234         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2235 			break;
2236 		case XPT_CONT_TARGET_IO:
2237 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2238 			ccb->ccb_h.status = CAM_UA_ABORT;
2239 			break;
2240 #endif
2241 		case XPT_SCSI_IO:
2242 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2243 			if (error) {
2244 				ccb->ccb_h.status = CAM_UA_ABORT;
2245 			} else {
2246 				ccb->ccb_h.status = CAM_REQ_CMP;
2247 			}
2248 			break;
2249 		default:
2250 			ccb->ccb_h.status = CAM_REQ_INVALID;
2251 			break;
2252 		}
2253 		ISPLOCK_2_CAMLOCK(isp);
2254 		xpt_done(ccb);
2255 		break;
2256 	}
2257 #ifdef	CAM_NEW_TRAN_CODE
2258 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2259 #else
2260 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2261 #endif
2262 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2263 		cts = &ccb->cts;
2264 		if (!IS_CURRENT_SETTINGS(cts)) {
2265 			ccb->ccb_h.status = CAM_REQ_INVALID;
2266 			xpt_done(ccb);
2267 			break;
2268 		}
2269 		tgt = cts->ccb_h.target_id;
2270 		CAMLOCK_2_ISPLOCK(isp);
2271 		if (IS_SCSI(isp)) {
2272 #ifndef	CAM_NEW_TRAN_CODE
2273 			sdparam *sdp = isp->isp_param;
2274 			u_int16_t *dptr;
2275 
2276 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2277 
2278 			sdp += bus;
2279 			/*
2280 			 * We always update (internally) from goal_flags
2281 			 * so any request to change settings just gets
2282 			 * vectored to that location.
2283 			 */
2284 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2285 
2286 			/*
2287 			 * Note that these operations affect the
2288 			 * the goal flags (goal_flags)- not
2289 			 * the current state flags. Then we mark
2290 			 * things so that the next operation to
2291 			 * this HBA will cause the update to occur.
2292 			 */
2293 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2294 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2295 					*dptr |= DPARM_DISC;
2296 				} else {
2297 					*dptr &= ~DPARM_DISC;
2298 				}
2299 			}
2300 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2301 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2302 					*dptr |= DPARM_TQING;
2303 				} else {
2304 					*dptr &= ~DPARM_TQING;
2305 				}
2306 			}
2307 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2308 				switch (cts->bus_width) {
2309 				case MSG_EXT_WDTR_BUS_16_BIT:
2310 					*dptr |= DPARM_WIDE;
2311 					break;
2312 				default:
2313 					*dptr &= ~DPARM_WIDE;
2314 				}
2315 			}
2316 			/*
2317 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2318 			 * of nonzero will cause us to go to the
2319 			 * selected (from NVRAM) maximum value for
2320 			 * this device. At a later point, we'll
2321 			 * allow finer control.
2322 			 */
2323 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2324 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2325 			    (cts->sync_offset > 0)) {
2326 				*dptr |= DPARM_SYNC;
2327 			} else {
2328 				*dptr &= ~DPARM_SYNC;
2329 			}
2330 			*dptr |= DPARM_SAFE_DFLT;
2331 #else
2332 			struct ccb_trans_settings_scsi *scsi =
2333 			    &cts->proto_specific.scsi;
2334 			struct ccb_trans_settings_spi *spi =
2335 			    &cts->xport_specific.spi;
2336 			sdparam *sdp = isp->isp_param;
2337 			u_int16_t *dptr;
2338 
2339 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2340 			sdp += bus;
2341 			/*
2342 			 * We always update (internally) from goal_flags
2343 			 * so any request to change settings just gets
2344 			 * vectored to that location.
2345 			 */
2346 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2347 
2348 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2349 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2350 					*dptr |= DPARM_DISC;
2351 				else
2352 					*dptr &= ~DPARM_DISC;
2353 			}
2354 
2355 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2356 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2357 					*dptr |= DPARM_TQING;
2358 				else
2359 					*dptr &= ~DPARM_TQING;
2360 			}
2361 
2362 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2363 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2364 					*dptr |= DPARM_WIDE;
2365 				else
2366 					*dptr &= ~DPARM_WIDE;
2367 			}
2368 
2369 			/*
2370 			 * XXX: FIX ME
2371 			 */
2372 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2373 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2374 			    (spi->sync_period && spi->sync_offset)) {
2375 				*dptr |= DPARM_SYNC;
2376 				/*
2377 				 * XXX: CHECK FOR LEGALITY
2378 				 */
2379 				sdp->isp_devparam[tgt].goal_period =
2380 				    spi->sync_period;
2381 				sdp->isp_devparam[tgt].goal_offset =
2382 				    spi->sync_offset;
2383 			} else {
2384 				*dptr &= ~DPARM_SYNC;
2385 			}
2386 #endif
2387 			isp_prt(isp, ISP_LOGDEBUG0,
2388 			    "SET bus %d targ %d to flags %x off %x per %x",
2389 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2390 			    sdp->isp_devparam[tgt].goal_offset,
2391 			    sdp->isp_devparam[tgt].goal_period);
2392 			sdp->isp_devparam[tgt].dev_update = 1;
2393 			isp->isp_update |= (1 << bus);
2394 		}
2395 		ISPLOCK_2_CAMLOCK(isp);
2396 		ccb->ccb_h.status = CAM_REQ_CMP;
2397 		xpt_done(ccb);
2398 		break;
2399 	case XPT_GET_TRAN_SETTINGS:
2400 		cts = &ccb->cts;
2401 		tgt = cts->ccb_h.target_id;
2402 		CAMLOCK_2_ISPLOCK(isp);
2403 		if (IS_FC(isp)) {
2404 #ifndef	CAM_NEW_TRAN_CODE
2405 			/*
2406 			 * a lot of normal SCSI things don't make sense.
2407 			 */
2408 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2409 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2410 			/*
2411 			 * How do you measure the width of a high
2412 			 * speed serial bus? Well, in bytes.
2413 			 *
2414 			 * Offset and period make no sense, though, so we set
2415 			 * (above) a 'base' transfer speed to be gigabit.
2416 			 */
2417 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2418 #else
2419 			fcparam *fcp = isp->isp_param;
2420 			struct ccb_trans_settings_fc *fc =
2421 			    &cts->xport_specific.fc;
2422 
2423 			cts->protocol = PROTO_SCSI;
2424 			cts->protocol_version = SCSI_REV_2;
2425 			cts->transport = XPORT_FC;
2426 			cts->transport_version = 0;
2427 
2428 			fc->valid = CTS_FC_VALID_SPEED;
2429 			if (fcp->isp_gbspeed == 2)
2430 				fc->bitrate = 200000;
2431 			else
2432 				fc->bitrate = 100000;
2433 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2434 				struct lportdb *lp = &fcp->portdb[tgt];
2435 				fc->wwnn = lp->node_wwn;
2436 				fc->wwpn = lp->port_wwn;
2437 				fc->port = lp->portid;
2438 				fc->valid |= CTS_FC_VALID_WWNN |
2439 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2440 			}
2441 #endif
2442 		} else {
2443 #ifdef	CAM_NEW_TRAN_CODE
2444 			struct ccb_trans_settings_scsi *scsi =
2445 			    &cts->proto_specific.scsi;
2446 			struct ccb_trans_settings_spi *spi =
2447 			    &cts->xport_specific.spi;
2448 #endif
2449 			sdparam *sdp = isp->isp_param;
2450 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2451 			u_int16_t dval, pval, oval;
2452 
2453 			sdp += bus;
2454 
2455 			if (IS_CURRENT_SETTINGS(cts)) {
2456 				sdp->isp_devparam[tgt].dev_refresh = 1;
2457 				isp->isp_update |= (1 << bus);
2458 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2459 				    NULL);
2460 				dval = sdp->isp_devparam[tgt].actv_flags;
2461 				oval = sdp->isp_devparam[tgt].actv_offset;
2462 				pval = sdp->isp_devparam[tgt].actv_period;
2463 			} else {
2464 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2465 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2466 				pval = sdp->isp_devparam[tgt].nvrm_period;
2467 			}
2468 
2469 #ifndef	CAM_NEW_TRAN_CODE
2470 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2471 
2472 			if (dval & DPARM_DISC) {
2473 				cts->flags |= CCB_TRANS_DISC_ENB;
2474 			}
2475 			if (dval & DPARM_TQING) {
2476 				cts->flags |= CCB_TRANS_TAG_ENB;
2477 			}
2478 			if (dval & DPARM_WIDE) {
2479 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2480 			} else {
2481 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2482 			}
2483 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2484 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2485 
2486 			if ((dval & DPARM_SYNC) && oval != 0) {
2487 				cts->sync_period = pval;
2488 				cts->sync_offset = oval;
2489 				cts->valid |=
2490 				    CCB_TRANS_SYNC_RATE_VALID |
2491 				    CCB_TRANS_SYNC_OFFSET_VALID;
2492 			}
2493 #else
2494 			cts->protocol = PROTO_SCSI;
2495 			cts->protocol_version = SCSI_REV_2;
2496 			cts->transport = XPORT_SPI;
2497 			cts->transport_version = 2;
2498 
2499 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2500 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2501 			if (dval & DPARM_DISC) {
2502 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2503 			}
2504 			if (dval & DPARM_TQING) {
2505 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2506 			}
2507 			if ((dval & DPARM_SYNC) && oval && pval) {
2508 				spi->sync_offset = oval;
2509 				spi->sync_period = pval;
2510 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2511 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2512 			}
2513 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2514 			if (dval & DPARM_WIDE) {
2515 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2516 			} else {
2517 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2518 			}
2519 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2520 				scsi->valid = CTS_SCSI_VALID_TQ;
2521 				spi->valid |= CTS_SPI_VALID_DISC;
2522 			} else {
2523 				scsi->valid = 0;
2524 			}
2525 #endif
2526 			isp_prt(isp, ISP_LOGDEBUG0,
2527 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2528 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2529 			    bus, tgt, dval, oval, pval);
2530 		}
2531 		ISPLOCK_2_CAMLOCK(isp);
2532 		ccb->ccb_h.status = CAM_REQ_CMP;
2533 		xpt_done(ccb);
2534 		break;
2535 
2536 	case XPT_CALC_GEOMETRY:
2537 	{
2538 		struct ccb_calc_geometry *ccg;
2539 
2540 		ccg = &ccb->ccg;
2541 		if (ccg->block_size == 0) {
2542 			isp_prt(isp, ISP_LOGERR,
2543 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2544 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2545 			ccb->ccb_h.status = CAM_REQ_INVALID;
2546 			xpt_done(ccb);
2547 			break;
2548 		}
2549 		cam_calc_geometry(ccg, /*extended*/1);
2550 		xpt_done(ccb);
2551 		break;
2552 	}
2553 	case XPT_RESET_BUS:		/* Reset the specified bus */
2554 		bus = cam_sim_bus(sim);
2555 		CAMLOCK_2_ISPLOCK(isp);
2556 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2557 		ISPLOCK_2_CAMLOCK(isp);
2558 		if (error)
2559 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2560 		else {
2561 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2562 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2563 			else if (isp->isp_path != NULL)
2564 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2565 			ccb->ccb_h.status = CAM_REQ_CMP;
2566 		}
2567 		xpt_done(ccb);
2568 		break;
2569 
2570 	case XPT_TERM_IO:		/* Terminate the I/O process */
2571 		ccb->ccb_h.status = CAM_REQ_INVALID;
2572 		xpt_done(ccb);
2573 		break;
2574 
2575 	case XPT_PATH_INQ:		/* Path routing inquiry */
2576 	{
2577 		struct ccb_pathinq *cpi = &ccb->cpi;
2578 
2579 		cpi->version_num = 1;
2580 #ifdef	ISP_TARGET_MODE
2581 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2582 #else
2583 		cpi->target_sprt = 0;
2584 #endif
2585 		cpi->hba_eng_cnt = 0;
2586 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2587 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2588 		cpi->bus_id = cam_sim_bus(sim);
2589 		if (IS_FC(isp)) {
2590 			cpi->hba_misc = PIM_NOBUSRESET;
2591 			/*
2592 			 * Because our loop ID can shift from time to time,
2593 			 * make our initiator ID out of range of our bus.
2594 			 */
2595 			cpi->initiator_id = cpi->max_target + 1;
2596 
2597 			/*
2598 			 * Set base transfer capabilities for Fibre Channel.
2599 			 * Technically not correct because we don't know
2600 			 * what media we're running on top of- but we'll
2601 			 * look good if we always say 100MB/s.
2602 			 */
2603 			if (FCPARAM(isp)->isp_gbspeed == 2)
2604 				cpi->base_transfer_speed = 200000;
2605 			else
2606 				cpi->base_transfer_speed = 100000;
2607 			cpi->hba_inquiry = PI_TAG_ABLE;
2608 #ifdef	CAM_NEW_TRAN_CODE
2609 			cpi->transport = XPORT_FC;
2610 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2611 #endif
2612 		} else {
2613 			sdparam *sdp = isp->isp_param;
2614 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2615 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2616 			cpi->hba_misc = 0;
2617 			cpi->initiator_id = sdp->isp_initiator_id;
2618 			cpi->base_transfer_speed = 3300;
2619 #ifdef	CAM_NEW_TRAN_CODE
2620 			cpi->transport = XPORT_SPI;
2621 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2622 #endif
2623 		}
2624 #ifdef	CAM_NEW_TRAN_CODE
2625 		cpi->protocol = PROTO_SCSI;
2626 		cpi->protocol_version = SCSI_REV_2;
2627 #endif
2628 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2629 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2630 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2631 		cpi->unit_number = cam_sim_unit(sim);
2632 		cpi->ccb_h.status = CAM_REQ_CMP;
2633 		xpt_done(ccb);
2634 		break;
2635 	}
2636 	default:
2637 		ccb->ccb_h.status = CAM_REQ_INVALID;
2638 		xpt_done(ccb);
2639 		break;
2640 	}
2641 }
2642 
2643 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2644 void
2645 isp_done(struct ccb_scsiio *sccb)
2646 {
2647 	struct ispsoftc *isp = XS_ISP(sccb);
2648 
2649 	if (XS_NOERR(sccb))
2650 		XS_SETERR(sccb, CAM_REQ_CMP);
2651 
2652 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2653 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2654 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2655 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2656 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2657 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2658 		} else {
2659 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2660 		}
2661 	}
2662 
2663 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2664 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2665 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2666 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2667 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2668 			isp_prt(isp, ISP_LOGDEBUG0,
2669 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2670 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2671 			    sccb->ccb_h.status, sccb->scsi_status);
2672 		}
2673 	}
2674 
2675 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2676 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2677 		xpt_print_path(sccb->ccb_h.path);
2678 		isp_prt(isp, ISP_LOGINFO,
2679 		    "cam completion status 0x%x", sccb->ccb_h.status);
2680 	}
2681 
2682 	XS_CMD_S_DONE(sccb);
2683 	if (XS_CMD_WDOG_P(sccb) == 0) {
2684 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2685 		if (XS_CMD_GRACE_P(sccb)) {
2686 			isp_prt(isp, ISP_LOGDEBUG2,
2687 			    "finished command on borrowed time");
2688 		}
2689 		XS_CMD_S_CLEAR(sccb);
2690 		ISPLOCK_2_CAMLOCK(isp);
2691 		xpt_done((union ccb *) sccb);
2692 		CAMLOCK_2_ISPLOCK(isp);
2693 	}
2694 }
2695 
2696 int
2697 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2698 {
2699 	int bus, rv = 0;
2700 	switch (cmd) {
2701 	case ISPASYNC_NEW_TGT_PARAMS:
2702 	{
2703 #ifdef	CAM_NEW_TRAN_CODE
2704 		struct ccb_trans_settings_scsi *scsi;
2705 		struct ccb_trans_settings_spi *spi;
2706 #endif
2707 		int flags, tgt;
2708 		sdparam *sdp = isp->isp_param;
2709 		struct ccb_trans_settings cts;
2710 		struct cam_path *tmppath;
2711 
2712 		bzero(&cts, sizeof (struct ccb_trans_settings));
2713 
2714 		tgt = *((int *)arg);
2715 		bus = (tgt >> 16) & 0xffff;
2716 		tgt &= 0xffff;
2717 		sdp += bus;
2718 		ISPLOCK_2_CAMLOCK(isp);
2719 		if (xpt_create_path(&tmppath, NULL,
2720 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2721 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2722 			CAMLOCK_2_ISPLOCK(isp);
2723 			isp_prt(isp, ISP_LOGWARN,
2724 			    "isp_async cannot make temp path for %d.%d",
2725 			    tgt, bus);
2726 			rv = -1;
2727 			break;
2728 		}
2729 		CAMLOCK_2_ISPLOCK(isp);
2730 		flags = sdp->isp_devparam[tgt].actv_flags;
2731 #ifdef	CAM_NEW_TRAN_CODE
2732 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2733 		cts.protocol = PROTO_SCSI;
2734 		cts.transport = XPORT_SPI;
2735 
2736 		scsi = &cts.proto_specific.scsi;
2737 		spi = &cts.xport_specific.spi;
2738 
2739 		if (flags & DPARM_TQING) {
2740 			scsi->valid |= CTS_SCSI_VALID_TQ;
2741 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2742 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2743 		}
2744 
2745 		if (flags & DPARM_DISC) {
2746 			spi->valid |= CTS_SPI_VALID_DISC;
2747 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2748 		}
2749 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2750 		if (flags & DPARM_WIDE) {
2751 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2752 		} else {
2753 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2754 		}
2755 		if (flags & DPARM_SYNC) {
2756 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2757 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2758 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2759 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2760 		}
2761 #else
2762 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2763 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2764 		if (flags & DPARM_DISC) {
2765 			cts.flags |= CCB_TRANS_DISC_ENB;
2766 		}
2767 		if (flags & DPARM_TQING) {
2768 			cts.flags |= CCB_TRANS_TAG_ENB;
2769 		}
2770 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2771 		cts.bus_width = (flags & DPARM_WIDE)?
2772 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2773 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2774 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2775 		if (flags & DPARM_SYNC) {
2776 			cts.valid |=
2777 			    CCB_TRANS_SYNC_RATE_VALID |
2778 			    CCB_TRANS_SYNC_OFFSET_VALID;
2779 		}
2780 #endif
2781 		isp_prt(isp, ISP_LOGDEBUG2,
2782 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2783 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2784 		    sdp->isp_devparam[tgt].actv_offset, flags);
2785 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2786 		ISPLOCK_2_CAMLOCK(isp);
2787 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2788 		xpt_free_path(tmppath);
2789 		CAMLOCK_2_ISPLOCK(isp);
2790 		break;
2791 	}
2792 	case ISPASYNC_BUS_RESET:
2793 		bus = *((int *)arg);
2794 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2795 		    bus);
2796 		if (bus > 0 && isp->isp_path2) {
2797 			ISPLOCK_2_CAMLOCK(isp);
2798 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2799 			CAMLOCK_2_ISPLOCK(isp);
2800 		} else if (isp->isp_path) {
2801 			ISPLOCK_2_CAMLOCK(isp);
2802 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2803 			CAMLOCK_2_ISPLOCK(isp);
2804 		}
2805 		break;
2806 	case ISPASYNC_LIP:
2807 		if (isp->isp_path) {
2808 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2809 		}
2810 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2811 		break;
2812 	case ISPASYNC_LOOP_RESET:
2813 		if (isp->isp_path) {
2814 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2815 		}
2816 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2817 		break;
2818 	case ISPASYNC_LOOP_DOWN:
2819 		if (isp->isp_path) {
2820 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2821 		}
2822 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2823 		break;
2824 	case ISPASYNC_LOOP_UP:
2825 		/*
2826 		 * Now we just note that Loop has come up. We don't
2827 		 * actually do anything because we're waiting for a
2828 		 * Change Notify before activating the FC cleanup
2829 		 * thread to look at the state of the loop again.
2830 		 */
2831 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2832 		break;
2833 	case ISPASYNC_PROMENADE:
2834 	{
2835 		struct cam_path *tmppath;
2836 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2837 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2838 		static const char *roles[4] = {
2839 		    "(none)", "Target", "Initiator", "Target/Initiator"
2840 		};
2841 		fcparam *fcp = isp->isp_param;
2842 		int tgt = *((int *) arg);
2843 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2844 		struct lportdb *lp = &fcp->portdb[tgt];
2845 
2846 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2847 		    roles[lp->roles & 0x3],
2848 		    (lp->valid)? "Arrived" : "Departed",
2849 		    (u_int32_t) (lp->port_wwn >> 32),
2850 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2851 		    (u_int32_t) (lp->node_wwn >> 32),
2852 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2853 
2854 		ISPLOCK_2_CAMLOCK(isp);
2855 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2856 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2857 			CAMLOCK_2_ISPLOCK(isp);
2858                         break;
2859                 }
2860 		/*
2861 		 * Policy: only announce targets.
2862 		 */
2863 		if (lp->roles & is_tgt_mask) {
2864 			if (lp->valid) {
2865 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2866 			} else {
2867 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2868 			}
2869 		}
2870 		xpt_free_path(tmppath);
2871 		CAMLOCK_2_ISPLOCK(isp);
2872 		break;
2873 	}
2874 	case ISPASYNC_CHANGE_NOTIFY:
2875 		if (arg == ISPASYNC_CHANGE_PDB) {
2876 			isp_prt(isp, ISP_LOGINFO,
2877 			    "Port Database Changed");
2878 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2879 			isp_prt(isp, ISP_LOGINFO,
2880 			    "Name Server Database Changed");
2881 		}
2882 #ifdef	ISP_SMPLOCK
2883 		cv_signal(&isp->isp_osinfo.kthread_cv);
2884 #else
2885 		wakeup(&isp->isp_osinfo.kthread_cv);
2886 #endif
2887 		break;
2888 	case ISPASYNC_FABRIC_DEV:
2889 	{
2890 		int target, base, lim;
2891 		fcparam *fcp = isp->isp_param;
2892 		struct lportdb *lp = NULL;
2893 		struct lportdb *clp = (struct lportdb *) arg;
2894 		char *pt;
2895 
2896 		switch (clp->port_type) {
2897 		case 1:
2898 			pt = "   N_Port";
2899 			break;
2900 		case 2:
2901 			pt = "  NL_Port";
2902 			break;
2903 		case 3:
2904 			pt = "F/NL_Port";
2905 			break;
2906 		case 0x7f:
2907 			pt = "  Nx_Port";
2908 			break;
2909 		case 0x81:
2910 			pt = "  F_port";
2911 			break;
2912 		case 0x82:
2913 			pt = "  FL_Port";
2914 			break;
2915 		case 0x84:
2916 			pt = "   E_port";
2917 			break;
2918 		default:
2919 			pt = " ";
2920 			break;
2921 		}
2922 
2923 		isp_prt(isp, ISP_LOGINFO,
2924 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2925 
2926 		/*
2927 		 * If we don't have an initiator role we bail.
2928 		 *
2929 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2930 		 */
2931 
2932 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2933 			break;
2934 		}
2935 
2936 		/*
2937 		 * Is this entry for us? If so, we bail.
2938 		 */
2939 
2940 		if (fcp->isp_portid == clp->portid) {
2941 			break;
2942 		}
2943 
2944 		/*
2945 		 * Else, the default policy is to find room for it in
2946 		 * our local port database. Later, when we execute
2947 		 * the call to isp_pdb_sync either this newly arrived
2948 		 * or already logged in device will be (re)announced.
2949 		 */
2950 
2951 		if (fcp->isp_topo == TOPO_FL_PORT)
2952 			base = FC_SNS_ID+1;
2953 		else
2954 			base = 0;
2955 
2956 		if (fcp->isp_topo == TOPO_N_PORT)
2957 			lim = 1;
2958 		else
2959 			lim = MAX_FC_TARG;
2960 
2961 		/*
2962 		 * Is it already in our list?
2963 		 */
2964 		for (target = base; target < lim; target++) {
2965 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2966 				continue;
2967 			}
2968 			lp = &fcp->portdb[target];
2969 			if (lp->port_wwn == clp->port_wwn &&
2970 			    lp->node_wwn == clp->node_wwn) {
2971 				lp->fabric_dev = 1;
2972 				break;
2973 			}
2974 		}
2975 		if (target < lim) {
2976 			break;
2977 		}
2978 		for (target = base; target < lim; target++) {
2979 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2980 				continue;
2981 			}
2982 			lp = &fcp->portdb[target];
2983 			if (lp->port_wwn == 0) {
2984 				break;
2985 			}
2986 		}
2987 		if (target == lim) {
2988 			isp_prt(isp, ISP_LOGWARN,
2989 			    "out of space for fabric devices");
2990 			break;
2991 		}
2992 		lp->port_type = clp->port_type;
2993 		lp->fc4_type = clp->fc4_type;
2994 		lp->node_wwn = clp->node_wwn;
2995 		lp->port_wwn = clp->port_wwn;
2996 		lp->portid = clp->portid;
2997 		lp->fabric_dev = 1;
2998 		break;
2999 	}
3000 #ifdef	ISP_TARGET_MODE
3001 	case ISPASYNC_TARGET_MESSAGE:
3002 	{
3003 		tmd_msg_t *mp = arg;
3004 		isp_prt(isp, ISP_LOGALL,
3005 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3006 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3007 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3008 		    mp->nt_msg[0]);
3009 		break;
3010 	}
3011 	case ISPASYNC_TARGET_EVENT:
3012 	{
3013 		tmd_event_t *ep = arg;
3014 		isp_prt(isp, ISP_LOGALL,
3015 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3016 		break;
3017 	}
3018 	case ISPASYNC_TARGET_ACTION:
3019 		switch (((isphdr_t *)arg)->rqs_entry_type) {
3020 		default:
3021 			isp_prt(isp, ISP_LOGWARN,
3022 			   "event 0x%x for unhandled target action",
3023 			    ((isphdr_t *)arg)->rqs_entry_type);
3024 			break;
3025 		case RQSTYPE_NOTIFY:
3026 			if (IS_SCSI(isp)) {
3027 				rv = isp_handle_platform_notify_scsi(isp,
3028 				    (in_entry_t *) arg);
3029 			} else {
3030 				rv = isp_handle_platform_notify_fc(isp,
3031 				    (in_fcentry_t *) arg);
3032 			}
3033 			break;
3034 		case RQSTYPE_ATIO:
3035 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3036 			break;
3037 		case RQSTYPE_ATIO2:
3038 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3039 			break;
3040 		case RQSTYPE_CTIO2:
3041 		case RQSTYPE_CTIO:
3042 			rv = isp_handle_platform_ctio(isp, arg);
3043 			break;
3044 		case RQSTYPE_ENABLE_LUN:
3045 		case RQSTYPE_MODIFY_LUN:
3046 			if (IS_DUALBUS(isp)) {
3047 				bus =
3048 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3049 			} else {
3050 				bus = 0;
3051 			}
3052 			isp_cv_signal_rqe(isp, bus,
3053 			    ((lun_entry_t *)arg)->le_status);
3054 			break;
3055 		}
3056 		break;
3057 #endif
3058 	case ISPASYNC_FW_CRASH:
3059 	{
3060 		u_int16_t mbox1, mbox6;
3061 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3062 		if (IS_DUALBUS(isp)) {
3063 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
3064 		} else {
3065 			mbox6 = 0;
3066 		}
3067                 isp_prt(isp, ISP_LOGERR,
3068                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3069                     mbox6, mbox1);
3070 #ifdef	ISP_FW_CRASH_DUMP
3071 		/*
3072 		 * XXX: really need a thread to do this right.
3073 		 */
3074 		if (IS_FC(isp)) {
3075 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3076 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3077 			isp_freeze_loopdown(isp, "f/w crash");
3078 			isp_fw_dump(isp);
3079 		}
3080 		isp_reinit(isp);
3081 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3082 #endif
3083 		break;
3084 	}
3085 	case ISPASYNC_UNHANDLED_RESPONSE:
3086 		break;
3087 	default:
3088 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3089 		break;
3090 	}
3091 	return (rv);
3092 }
3093 
3094 
3095 /*
3096  * Locks are held before coming here.
3097  */
3098 void
3099 isp_uninit(struct ispsoftc *isp)
3100 {
3101 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3102 	DISABLE_INTS(isp);
3103 }
3104 
3105 void
3106 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3107 {
3108 	va_list ap;
3109 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3110 		return;
3111 	}
3112 	printf("%s: ", device_get_nameunit(isp->isp_dev));
3113 	va_start(ap, fmt);
3114 	vprintf(fmt, ap);
3115 	va_end(ap);
3116 	printf("\n");
3117 }
3118