xref: /titanic_50/usr/src/uts/sun/io/scsi/adapters/fas.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * ISSUES
31  *
32  * - more consistent error messages
33  * - report name of device on errors?
34  * - if wide target renegotiates sync, back to narrow?
35  * - last_msgout is not accurate ????
36  * - resolve XXXX
37  * - improve msg reject code (use special msg reject handler)
38  * - better use of IDE message
39  * - keep track if ATN remains asserted and target not going into
40  *   a msg-out phase
41  * - improve comments
42  * - no slave accesses when start address is odd and dma hasn't started
43  *   this affect asserting ATN
44  */
45 
46 /*
47  * fas - QLogic fas366 wide/fast SCSI Processor HBA driver with
48  *	tagged and non-tagged queueing support
49  */
50 #if defined(lint) && !defined(DEBUG)
51 #define	DEBUG	1
52 #define	FASDEBUG
53 #endif
54 
55 #define	DMA_REG_TRACING 	/* enable dma register access tracing */
56 
57 
58 /*
59  * standard header files
60  */
61 #include <sys/note.h>
62 #include <sys/scsi/scsi.h>
63 #include <sys/file.h>
64 #include <sys/vtrace.h>
65 
66 /*
67  * private header files
68  */
69 #include <sys/scsi/adapters/fasdma.h>
70 #include <sys/scsi/adapters/fasreg.h>
71 #include <sys/scsi/adapters/fasvar.h>
72 #include <sys/scsi/adapters/fascmd.h>
73 #include <sys/scsi/impl/scsi_reset_notify.h>
74 
75 /*
76  * tunables
77  */
78 static int		fas_selection_timeout = 250; /* 250 milliseconds */
79 static uchar_t		fas_default_offset = DEFAULT_OFFSET;
80 
81 /*
82  * needed for presto support, do not remove
83  */
84 static int		fas_enable_sbus64 = 1;
85 
86 #ifdef	FASDEBUG
87 int			fasdebug = 0;
88 int			fasdebug_instance = -1; /* debug all instances */
89 static int		fas_burstsizes_limit = -1;
90 static int		fas_no_sync_wide_backoff = 0;
91 #endif	/* FASDEBUG */
92 
93 /*
94  * Local static data protected by global mutex
95  */
96 static kmutex_t 	fas_global_mutex; /* to allow concurrent attach */
97 
98 static int		fas_scsi_watchdog_tick; /* in seconds, for all	*/
99 					/* instances			*/
100 static clock_t		fas_tick;	/* fas_watch() interval in Hz	*/
101 static timeout_id_t	fas_reset_watch; /* timeout id for reset watch	*/
102 static timeout_id_t	fas_timeout_id = 0;
103 static int		fas_timeout_initted = 0;
104 
105 static krwlock_t	fas_global_rwlock;
106 
107 static void		*fas_state;	/* soft state ptr		*/
108 static struct fas	*fas_head;	/* link all softstate structures */
109 static struct fas	*fas_tail;	/* for fas_watch()		*/
110 
111 static kmutex_t		fas_log_mutex;
112 static char		fas_log_buf[256];
113 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
114 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
115 	fas_scsi_watchdog_tick fas_tick))
116 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", fas::f_quiesce_timeid))
117 
118 /*
119  * dma attribute structure for scsi engine
120  */
121 static ddi_dma_attr_t dma_fasattr	= {
122 	DMA_ATTR_V0, (unsigned long long)0,
123 	(unsigned long long)0xffffffff, (unsigned long long)((1<<24)-1),
124 	1, DEFAULT_BURSTSIZE, 1,
125 	(unsigned long long)0xffffffff, (unsigned long long)0xffffffff,
126 	1, 512, 0
127 };
128 
129 /*
130  * optional torture test stuff
131  */
132 #ifdef	FASDEBUG
133 #define	FAS_TEST
134 static int fas_ptest_emsgin;
135 static int fas_ptest_msgin;
136 static int fas_ptest_msg = -1;
137 static int fas_ptest_status;
138 static int fas_ptest_data_in;
139 static int fas_atest;
140 static int fas_atest_disc;
141 static int fas_atest_reconn;
142 static void fas_test_abort(struct fas *fas, int slot);
143 static int fas_rtest;
144 static int fas_rtest_type;
145 static void fas_test_reset(struct fas *fas, int slot);
146 static int fas_force_timeout;
147 static int fas_btest;
148 static int fas_test_stop;
149 static int fas_transport_busy;
150 static int fas_transport_busy_rqs;
151 static int fas_transport_reject;
152 static int fas_arqs_failure;
153 static int fas_tran_err;
154 static int fas_test_untagged;
155 static int fas_enable_untagged;
156 #endif
157 
158 /*
159  * warlock directives
160  */
161 _NOTE(DATA_READABLE_WITHOUT_LOCK(dma fasdebug))
162 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy))
163 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy_rqs))
164 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_reject))
165 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_arqs_failure))
166 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_tran_err))
167 _NOTE(MUTEX_PROTECTS_DATA(fas_log_mutex, fas_log_buf))
168 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
169 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
170 	fas_scsi_watchdog_tick fas_tick))
171 
172 /*
173  * function prototypes
174  *
175  * scsa functions are exported by means of the transport table:
176  */
177 static int fas_scsi_tgt_probe(struct scsi_device *sd,
178     int (*waitfunc)(void));
179 static int fas_scsi_tgt_init(dev_info_t *, dev_info_t *,
180     scsi_hba_tran_t *, struct scsi_device *);
181 static int fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
182 static int fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
183 static int fas_scsi_reset(struct scsi_address *ap, int level);
184 static int fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
185 static int fas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
186     int whom);
187 static struct scsi_pkt *fas_scsi_init_pkt(struct scsi_address *ap,
188     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
189     int tgtlen, int flags, int (*callback)(), caddr_t arg);
190 static void fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
191 static void fas_scsi_dmafree(struct scsi_address *ap,
192     struct scsi_pkt *pkt);
193 static void fas_scsi_sync_pkt(struct scsi_address *ap,
194     struct scsi_pkt *pkt);
195 
196 /*
197  * internal functions:
198  */
199 static int fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp);
200 static int fas_alloc_tag(struct fas *fas, struct fas_cmd *sp);
201 static int fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag);
202 static void fas_empty_waitQ(struct fas *fas);
203 static void fas_move_waitQ_to_readyQ(struct fas *fas);
204 static void fas_check_waitQ_and_mutex_exit(struct fas *fas);
205 static int fas_istart(struct fas *fas);
206 static int fas_ustart(struct fas *fas);
207 static int fas_startcmd(struct fas *fas, struct fas_cmd *sp);
208 
209 static int fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
210     int cmdlen, int tgtlen, int statuslen, int kf);
211 static void fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp);
212 static int fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
213 static void fas_kmem_cache_destructor(void *buf, void *cdrarg);
214 
215 static int fas_finish(struct fas *fas);
216 static void fas_handle_qfull(struct fas *fas, struct fas_cmd *sp);
217 static void fas_restart_cmd(void *);
218 static int fas_dopoll(struct fas *fas, int timeout);
219 static void fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp);
220 static uint_t fas_intr(caddr_t arg);
221 static int fas_intr_svc(struct	fas *fas);
222 static int fas_phasemanage(struct fas *fas);
223 static int fas_handle_unknown(struct fas *fas);
224 static int fas_handle_cmd_start(struct fas *fas);
225 static int fas_handle_cmd_done(struct fas *fas);
226 static int fas_handle_msg_out_start(struct fas *fas);
227 static int fas_handle_msg_out_done(struct fas *fas);
228 static int fas_handle_clearing(struct fas *fas);
229 static int fas_handle_data_start(struct fas *fas);
230 static int fas_handle_data_done(struct fas *fas);
231 static int fas_handle_c_cmplt(struct fas *fas);
232 static int fas_handle_msg_in_start(struct fas *fas);
233 static int fas_handle_more_msgin(struct fas *fas);
234 static int fas_handle_msg_in_done(struct fas *fas);
235 static int fas_onebyte_msg(struct fas *fas);
236 static int fas_twobyte_msg(struct fas *fas);
237 static int fas_multibyte_msg(struct fas *fas);
238 static void fas_revert_to_async(struct fas *fas, int tgt);
239 static int fas_finish_select(struct fas *fas);
240 static int fas_reselect_preempt(struct fas *fas);
241 static int fas_reconnect(struct fas *fas);
242 static int fas_handle_selection(struct fas *fas);
243 static void fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp);
244 static int fas_handle_gross_err(struct fas *fas);
245 static int fas_illegal_cmd_or_bus_reset(struct fas *fas);
246 static int fas_check_dma_error(struct fas *fas);
247 
248 static void fas_make_sdtr(struct fas *fas, int msgout_offset, int target);
249 static void fas_make_wdtr(struct fas *fas, int msgout_offset, int target,
250     int width);
251 static void fas_update_props(struct fas *fas, int tgt);
252 static void fas_update_this_prop(struct fas *fas, char *property, int value);
253 
254 static int fas_commoncap(struct scsi_address *ap, char *cap, int val,
255     int tgtonly, int doset);
256 
257 static void fas_watch(void *arg);
258 static void fas_watchsubr(struct fas *fas);
259 static void fas_cmd_timeout(struct fas *fas, int slot);
260 static void fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
261     int slot);
262 static void fas_reset_sync_wide(struct fas *fas);
263 static void fas_set_wide_conf3(struct fas *fas, int target, int width);
264 static void fas_force_renegotiation(struct fas *fas, int target);
265 
266 static int fas_set_new_window(struct fas *fas, struct fas_cmd *sp);
267 static int fas_restore_pointers(struct fas *fas, struct fas_cmd *sp);
268 static int fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end);
269 
270 /*PRINTFLIKE3*/
271 static void fas_log(struct fas *fas, int level, const char *fmt, ...);
272 /*PRINTFLIKE2*/
273 static void fas_printf(struct fas *fas, const char *fmt, ...);
274 static void fas_printstate(struct fas *fas, char *msg);
275 static void fas_dump_cmd(struct fas *fas, struct fas_cmd *sp);
276 static void fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp);
277 static char *fas_state_name(ushort_t state);
278 
279 static void fas_makeproxy_cmd(struct fas_cmd *sp,
280     struct scsi_address *ap, struct scsi_pkt *pkt, int nmsg, ...);
281 static int fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
282     struct scsi_address *ap, char *what);
283 
284 static void fas_internal_reset(struct fas *fas, int reset_action);
285 static int fas_alloc_active_slots(struct fas *fas, int slot, int flag);
286 
287 static int fas_abort_curcmd(struct fas *fas);
288 static int fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot);
289 static int fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
290 static int fas_do_scsi_reset(struct scsi_address *ap, int level);
291 static int fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp,
292     int slot);
293 static void fas_flush_readyQ(struct fas *fas, int slot);
294 static void fas_flush_tagQ(struct fas *fas, int slot);
295 static void fas_flush_cmd(struct fas *fas, struct fas_cmd *sp,
296     uchar_t reason, uint_t stat);
297 static int fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp,
298     uchar_t msg);
299 static int fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
300     struct fas_cmd *sp, uchar_t msg, int slot);
301 static void fas_mark_packets(struct fas *fas, int slot, uchar_t reason,
302     uint_t stat);
303 static void fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp,
304     uchar_t reason, uint_t stat);
305 
306 static int fas_reset_bus(struct fas *fas);
307 static int fas_reset_recovery(struct fas *fas);
308 static int fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap);
309 static int fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap);
310 static void fas_start_watch_reset_delay(struct fas *);
311 static void fas_setup_reset_delay(struct fas *fas);
312 static void fas_watch_reset_delay(void *arg);
313 static int fas_watch_reset_delay_subr(struct fas *fas);
314 static void fas_reset_cleanup(struct fas *fas, int slot);
315 static int fas_scsi_reset_notify(struct scsi_address *ap, int flag,
316     void (*callback)(caddr_t), caddr_t arg);
317 static int fas_scsi_quiesce(dev_info_t *hba_dip);
318 static int fas_scsi_unquiesce(dev_info_t *hba_dip);
319 
320 static void fas_set_throttles(struct fas *fas, int slot,
321     int n, int what);
322 static void fas_set_all_lun_throttles(struct fas *fas, int slot, int what);
323 static void fas_full_throttle(struct fas *fas, int slot);
324 static void fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int timeout);
325 static void fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp);
326 
327 static int fas_quiesce_bus(struct fas *fas);
328 static int fas_unquiesce_bus(struct fas *fas);
329 static void fas_ncmds_checkdrain(void *arg);
330 static int fas_check_outstanding(struct fas *fas);
331 
332 static int fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap);
333 static int fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap);
334 static int fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp);
335 void fas_complete_arq_pkt(struct scsi_pkt *pkt);
336 
337 void fas_call_pkt_comp(struct fas *fas, struct fas_cmd *sp);
338 void fas_empty_callbackQ(struct fas *fas);
339 int fas_init_callbacks(struct fas *fas);
340 void fas_destroy_callbacks(struct fas *fas);
341 
342 static int fas_check_dma_error(struct fas *fas);
343 static int fas_init_chip(struct fas *fas, uchar_t id);
344 
345 static void fas_read_fifo(struct fas *fas);
346 static void fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad);
347 
348 #ifdef FASDEBUG
349 static void fas_reg_cmd_write(struct fas *fas, uint8_t cmd);
350 static void fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what);
351 static uint8_t fas_reg_read(struct fas *fas, volatile uint8_t *p);
352 
353 static void fas_dma_reg_write(struct fas *fas, volatile uint32_t *p,
354     uint32_t what);
355 static uint32_t fas_dma_reg_read(struct fas *fas, volatile uint32_t *p);
356 #else
357 #define	fas_reg_cmd_write(fas, cmd) \
358 	fas->f_reg->fas_cmd = (cmd), fas->f_last_cmd = (cmd)
359 #define	fas_reg_write(fas, p, what)  *(p) = (what)
360 #define	fas_reg_read(fas, p) *(p)
361 #define	fas_dma_reg_write(fas, p, what)  *(p) = (what)
362 #define	fas_dma_reg_read(fas, p) *(p)
363 #endif
364 
365 /*
366  * autoconfiguration data and routines.
367  */
368 static int fas_attach(dev_info_t *dev, ddi_attach_cmd_t cmd);
369 static int fas_detach(dev_info_t *dev, ddi_detach_cmd_t cmd);
370 static int fas_dr_detach(dev_info_t *dev);
371 
372 static struct dev_ops fas_ops = {
373 	DEVO_REV,		/* devo_rev, */
374 	0,			/* refcnt  */
375 	ddi_no_info,		/* info */
376 	nulldev,		/* identify */
377 	nulldev,		/* probe */
378 	fas_attach,		/* attach */
379 	fas_detach,		/* detach */
380 	nodev,			/* reset */
381 	NULL,			/* driver operations */
382 	NULL,			/* bus operations */
383 	NULL			/* power */
384 };
385 
386 char _depends_on[] = "misc/scsi";
387 
388 static struct modldrv modldrv = {
389 	&mod_driverops, /* Type of module. This one is a driver */
390 	"FAS SCSI HBA Driver v%I%", /* Name of the module. */
391 	&fas_ops,	/* driver ops */
392 };
393 
394 static struct modlinkage modlinkage = {
395 	MODREV_1, (void *)&modldrv, NULL
396 };
397 
398 int
399 _init(void)
400 {
401 	int rval;
402 	/* CONSTCOND */
403 	ASSERT(NO_COMPETING_THREADS);
404 
405 	rval = ddi_soft_state_init(&fas_state, sizeof (struct fas),
406 	    FAS_INITIAL_SOFT_SPACE);
407 	if (rval != 0) {
408 		return (rval);
409 	}
410 
411 	if ((rval = scsi_hba_init(&modlinkage)) != 0) {
412 		ddi_soft_state_fini(&fas_state);
413 		return (rval);
414 	}
415 
416 	mutex_init(&fas_global_mutex, NULL, MUTEX_DRIVER, NULL);
417 	rw_init(&fas_global_rwlock, NULL, RW_DRIVER, NULL);
418 
419 	mutex_init(&fas_log_mutex, NULL, MUTEX_DRIVER, NULL);
420 
421 	if ((rval = mod_install(&modlinkage)) != 0) {
422 		mutex_destroy(&fas_log_mutex);
423 		rw_destroy(&fas_global_rwlock);
424 		mutex_destroy(&fas_global_mutex);
425 		ddi_soft_state_fini(&fas_state);
426 		scsi_hba_fini(&modlinkage);
427 		return (rval);
428 	}
429 
430 	return (rval);
431 }
432 
433 int
434 _fini(void)
435 {
436 	int	rval;
437 	/* CONSTCOND */
438 	ASSERT(NO_COMPETING_THREADS);
439 
440 	if ((rval = mod_remove(&modlinkage)) == 0) {
441 		ddi_soft_state_fini(&fas_state);
442 		scsi_hba_fini(&modlinkage);
443 		mutex_destroy(&fas_log_mutex);
444 		rw_destroy(&fas_global_rwlock);
445 		mutex_destroy(&fas_global_mutex);
446 	}
447 	return (rval);
448 }
449 
450 int
451 _info(struct modinfo *modinfop)
452 {
453 	/* CONSTCOND */
454 	ASSERT(NO_COMPETING_THREADS);
455 
456 	return (mod_info(&modlinkage, modinfop));
457 }
458 
459 static int
460 fas_scsi_tgt_probe(struct scsi_device *sd,
461     int (*waitfunc)(void))
462 {
463 	dev_info_t *dip = ddi_get_parent(sd->sd_dev);
464 	int rval = SCSIPROBE_FAILURE;
465 	scsi_hba_tran_t *tran;
466 	struct fas *fas;
467 	int tgt = sd->sd_address.a_target;
468 
469 	tran = ddi_get_driver_private(dip);
470 	ASSERT(tran != NULL);
471 	fas = TRAN2FAS(tran);
472 
473 	/*
474 	 * force renegotiation since inquiry cmds do not cause
475 	 * check conditions
476 	 */
477 	mutex_enter(FAS_MUTEX(fas));
478 	fas_force_renegotiation(fas, tgt);
479 	mutex_exit(FAS_MUTEX(fas));
480 	rval = scsi_hba_probe(sd, waitfunc);
481 
482 	/*
483 	 * the scsi-options precedence is:
484 	 *	target-scsi-options		highest
485 	 * 	device-type-scsi-options
486 	 *	per bus scsi-options
487 	 *	global scsi-options		lowest
488 	 */
489 	mutex_enter(FAS_MUTEX(fas));
490 	if ((rval == SCSIPROBE_EXISTS) &&
491 	    ((fas->f_target_scsi_options_defined & (1 << tgt)) == 0)) {
492 		int options;
493 
494 		options = scsi_get_device_type_scsi_options(dip, sd, -1);
495 		if (options != -1) {
496 			fas->f_target_scsi_options[tgt] = options;
497 			fas_log(fas, CE_NOTE,
498 				"?target%x-scsi-options = 0x%x\n", tgt,
499 				fas->f_target_scsi_options[tgt]);
500 			fas_force_renegotiation(fas, tgt);
501 		}
502 	}
503 	mutex_exit(FAS_MUTEX(fas));
504 
505 	IPRINTF2("target%x-scsi-options= 0x%x\n",
506 		tgt, fas->f_target_scsi_options[tgt]);
507 
508 	return (rval);
509 }
510 
511 
512 /*ARGSUSED*/
513 static int
514 fas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
515     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
516 {
517 	return (((sd->sd_address.a_target < NTARGETS_WIDE) &&
518 	    (sd->sd_address.a_lun < NLUNS_PER_TARGET)) ?
519 		DDI_SUCCESS : DDI_FAILURE);
520 }
521 
522 /*ARGSUSED*/
523 static int
524 fas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
525 {
526 	struct fas	*fas = NULL;
527 	volatile struct dma	*dmar = NULL;
528 	volatile struct fasreg	*fasreg;
529 	ddi_dma_attr_t		*fas_dma_attr;
530 	ddi_device_acc_attr_t	dev_attr;
531 
532 	int			instance, id, slot, i, hm_rev;
533 	size_t			rlen;
534 	uint_t			count;
535 	char			buf[64];
536 	scsi_hba_tran_t		*tran =	NULL;
537 	char			intr_added = 0;
538 	char			mutex_init_done = 0;
539 	char			hba_attached = 0;
540 	char			bound_handle = 0;
541 	char			*prop_template = "target%d-scsi-options";
542 	char			prop_str[32];
543 
544 	/* CONSTCOND */
545 	ASSERT(NO_COMPETING_THREADS);
546 
547 	switch (cmd) {
548 	case DDI_ATTACH:
549 		break;
550 
551 	case DDI_RESUME:
552 		if ((tran = ddi_get_driver_private(dip)) == NULL)
553 			return (DDI_FAILURE);
554 
555 		fas = TRAN2FAS(tran);
556 		if (!fas) {
557 			return (DDI_FAILURE);
558 		}
559 		/*
560 		 * Reset hardware and softc to "no outstanding commands"
561 		 * Note that a check condition can result on first command
562 		 * to a target.
563 		 */
564 		mutex_enter(FAS_MUTEX(fas));
565 		fas_internal_reset(fas,
566 		    FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
567 
568 		(void) fas_reset_bus(fas);
569 
570 		fas->f_suspended = 0;
571 
572 		/* make sure that things get started */
573 		(void) fas_istart(fas);
574 		fas_check_waitQ_and_mutex_exit(fas);
575 
576 		mutex_enter(&fas_global_mutex);
577 		if (fas_timeout_id == 0) {
578 			fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
579 			fas_timeout_initted = 1;
580 		}
581 		mutex_exit(&fas_global_mutex);
582 
583 		return (DDI_SUCCESS);
584 
585 	default:
586 		return (DDI_FAILURE);
587 	}
588 
589 	instance = ddi_get_instance(dip);
590 
591 	/*
592 	 * Since we know that some instantiations of this device can
593 	 * be plugged into slave-only SBus slots, check to see whether
594 	 * this is one such.
595 	 */
596 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
597 		cmn_err(CE_WARN,
598 		    "fas%d: device in slave-only slot", instance);
599 		return (DDI_FAILURE);
600 	}
601 
602 	if (ddi_intr_hilevel(dip, 0)) {
603 		/*
604 		 * Interrupt number '0' is a high-level interrupt.
605 		 * At this point you either add a special interrupt
606 		 * handler that triggers a soft interrupt at a lower level,
607 		 * or - more simply and appropriately here - you just
608 		 * fail the attach.
609 		 */
610 		cmn_err(CE_WARN,
611 		    "fas%d: Device is using a hilevel intr", instance);
612 		return (DDI_FAILURE);
613 	}
614 
615 	/*
616 	 * Allocate softc information.
617 	 */
618 	if (ddi_soft_state_zalloc(fas_state, instance) != DDI_SUCCESS) {
619 		cmn_err(CE_WARN,
620 		    "fas%d: cannot allocate soft state", instance);
621 		goto fail;
622 	}
623 
624 	fas = (struct fas *)ddi_get_soft_state(fas_state, instance);
625 
626 	if (fas == NULL) {
627 		goto fail;
628 	}
629 
630 	/*
631 	 * map in device registers
632 	 */
633 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
634 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
635 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
636 
637 	if (ddi_regs_map_setup(dip, (uint_t)0, (caddr_t *)&dmar,
638 	    (off_t)0, (off_t)sizeof (struct dma),
639 	    &dev_attr, &fas->f_dmar_acc_handle) != DDI_SUCCESS) {
640 		cmn_err(CE_WARN, "fas%d: cannot map dma", instance);
641 		goto fail;
642 	}
643 
644 	if (ddi_regs_map_setup(dip, (uint_t)1, (caddr_t *)&fasreg,
645 	    (off_t)0, (off_t)sizeof (struct fasreg),
646 	    &dev_attr, &fas->f_regs_acc_handle) != DDI_SUCCESS) {
647 		cmn_err(CE_WARN,
648 		    "fas%d: unable to map fas366 registers", instance);
649 		goto fail;
650 	}
651 
652 	fas_dma_attr = &dma_fasattr;
653 	if (ddi_dma_alloc_handle(dip, fas_dma_attr,
654 	    DDI_DMA_SLEEP, NULL, &fas->f_dmahandle) != DDI_SUCCESS) {
655 		cmn_err(CE_WARN,
656 		    "fas%d: cannot alloc dma handle", instance);
657 		goto fail;
658 	}
659 
660 	/*
661 	 * allocate cmdarea and its dma handle
662 	 */
663 	if (ddi_dma_mem_alloc(fas->f_dmahandle,
664 	    (uint_t)2*FIFOSIZE,
665 	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
666 	    NULL, (caddr_t *)&fas->f_cmdarea, &rlen,
667 		&fas->f_cmdarea_acc_handle) != DDI_SUCCESS) {
668 		cmn_err(CE_WARN,
669 		    "fas%d: cannot alloc cmd area", instance);
670 		goto fail;
671 	}
672 
673 	fas->f_reg = fasreg;
674 	fas->f_dma = dmar;
675 	fas->f_instance  = instance;
676 
677 	if (ddi_dma_addr_bind_handle(fas->f_dmahandle,
678 	    NULL, (caddr_t)fas->f_cmdarea,
679 	    rlen, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
680 	    &fas->f_dmacookie, &count) != DDI_DMA_MAPPED) {
681 		cmn_err(CE_WARN,
682 		    "fas%d: cannot bind cmdarea", instance);
683 		goto fail;
684 	}
685 	bound_handle++;
686 
687 	ASSERT(count == 1);
688 
689 	/*
690 	 * Allocate a transport structure
691 	 */
692 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
693 
694 	/*
695 	 * initialize transport structure
696 	 */
697 	fas->f_tran			= tran;
698 	fas->f_dev			= dip;
699 	tran->tran_hba_private		= fas;
700 	tran->tran_tgt_private		= NULL;
701 	tran->tran_tgt_init		= fas_scsi_tgt_init;
702 	tran->tran_tgt_probe		= fas_scsi_tgt_probe;
703 	tran->tran_tgt_free		= NULL;
704 	tran->tran_start		= fas_scsi_start;
705 	tran->tran_abort		= fas_scsi_abort;
706 	tran->tran_reset		= fas_scsi_reset;
707 	tran->tran_getcap		= fas_scsi_getcap;
708 	tran->tran_setcap		= fas_scsi_setcap;
709 	tran->tran_init_pkt		= fas_scsi_init_pkt;
710 	tran->tran_destroy_pkt		= fas_scsi_destroy_pkt;
711 	tran->tran_dmafree		= fas_scsi_dmafree;
712 	tran->tran_sync_pkt		= fas_scsi_sync_pkt;
713 	tran->tran_reset_notify 	= fas_scsi_reset_notify;
714 	tran->tran_get_bus_addr		= NULL;
715 	tran->tran_get_name		= NULL;
716 	tran->tran_quiesce		= fas_scsi_quiesce;
717 	tran->tran_unquiesce		= fas_scsi_unquiesce;
718 	tran->tran_bus_reset		= NULL;
719 	tran->tran_add_eventcall	= NULL;
720 	tran->tran_get_eventcookie	= NULL;
721 	tran->tran_post_event		= NULL;
722 	tran->tran_remove_eventcall	= NULL;
723 
724 	fas->f_force_async = 0;
725 
726 	/*
727 	 * disable tagged queuing and wide for all targets
728 	 * (will be enabled by target driver if required)
729 	 * sync is enabled by default
730 	 */
731 	fas->f_nowide = fas->f_notag = ALL_TARGETS;
732 	fas->f_force_narrow = ALL_TARGETS;
733 
734 	/*
735 	 * By default we assume embedded devices and save time
736 	 * checking for timeouts in fas_watch() by skipping
737 	 * the rest of luns
738 	 * If we're talking to any non-embedded devices,
739 	 * we can't cheat and skip over non-zero luns anymore
740 	 * in fas_watch() and fas_ustart().
741 	 */
742 	fas->f_dslot = NLUNS_PER_TARGET;
743 
744 	/*
745 	 * f_active is used for saving disconnected cmds;
746 	 * For tagged targets, we need to increase the size later
747 	 * Only allocate for Lun == 0, if we probe a lun > 0 then
748 	 * we allocate an active structure
749 	 * If TQ gets enabled then we need to increase the size
750 	 * to hold 256 cmds
751 	 */
752 	for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
753 		(void) fas_alloc_active_slots(fas, slot, KM_SLEEP);
754 	}
755 
756 	/*
757 	 * initialize the qfull retry counts
758 	 */
759 	for (i = 0; i < NTARGETS_WIDE; i++) {
760 		fas->f_qfull_retries[i] = QFULL_RETRIES;
761 		fas->f_qfull_retry_interval[i] =
762 			drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
763 
764 	}
765 
766 	/*
767 	 * Initialize throttles.
768 	 */
769 	fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
770 
771 	/*
772 	 * Initialize mask of deferred property updates
773 	 */
774 	fas->f_props_update = 0;
775 
776 	/*
777 	 * set host ID
778 	 */
779 	fas->f_fasconf = DEFAULT_HOSTID;
780 	id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "initiator-id", -1);
781 	if (id == -1) {
782 		id = ddi_prop_get_int(DDI_DEV_T_ANY,	dip, 0,
783 		    "scsi-initiator-id", -1);
784 	}
785 	if (id != DEFAULT_HOSTID && id >= 0 && id < NTARGETS_WIDE) {
786 		fas_log(fas, CE_NOTE, "?initiator SCSI ID now %d\n", id);
787 		fas->f_fasconf = (uchar_t)id;
788 	}
789 
790 	/*
791 	 * find the burstsize and reduce ours if necessary
792 	 */
793 	fas->f_dma_attr = fas_dma_attr;
794 	fas->f_dma_attr->dma_attr_burstsizes &=
795 	    ddi_dma_burstsizes(fas->f_dmahandle);
796 
797 #ifdef FASDEBUG
798 	fas->f_dma_attr->dma_attr_burstsizes &= fas_burstsizes_limit;
799 	IPRINTF1("dma burstsize=%x\n", fas->f_dma_attr->dma_attr_burstsizes);
800 #endif
801 	/*
802 	 * Attach this instance of the hba
803 	 */
804 	if (scsi_hba_attach_setup(dip, fas->f_dma_attr, tran, 0) !=
805 	    DDI_SUCCESS) {
806 		fas_log(fas, CE_WARN, "scsi_hba_attach_setup failed");
807 		goto fail;
808 	}
809 	hba_attached++;
810 
811 	/*
812 	 * if scsi-options property exists, use it
813 	 */
814 	fas->f_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY,
815 	    dip, 0, "scsi-options", DEFAULT_SCSI_OPTIONS);
816 
817 	/*
818 	 * if scsi-selection-timeout property exists, use it
819 	 */
820 	fas_selection_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
821 	    dip, 0, "scsi-selection-timeout", SCSI_DEFAULT_SELECTION_TIMEOUT);
822 
823 	/*
824 	 * if hm-rev property doesn't exist, use old scheme for rev
825 	 */
826 	hm_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
827 	    "hm-rev", -1);
828 
829 	if (hm_rev == 0xa0 || hm_rev == -1) {
830 		if (DMAREV(dmar) != 0) {
831 			fas->f_hm_rev = 0x20;
832 			fas_log(fas, CE_WARN,
833 			    "obsolete rev 2.0 FEPS chip, "
834 			    "possible data corruption");
835 		} else {
836 			fas->f_hm_rev = 0x10;
837 			fas_log(fas, CE_WARN,
838 			    "obsolete and unsupported rev 1.0 FEPS chip");
839 			goto fail;
840 		}
841 	} else if (hm_rev == 0x20) {
842 		fas->f_hm_rev = 0x21;
843 		fas_log(fas, CE_WARN, "obsolete rev 2.1 FEPS chip");
844 	} else {
845 		fas->f_hm_rev = (uchar_t)hm_rev;
846 		fas_log(fas, CE_NOTE, "?rev %x.%x FEPS chip\n",
847 		    (hm_rev >> 4) & 0xf, hm_rev & 0xf);
848 	}
849 
850 	if ((fas->f_scsi_options & SCSI_OPTIONS_SYNC) == 0) {
851 		fas->f_nosync = ALL_TARGETS;
852 	}
853 
854 	if ((fas->f_scsi_options & SCSI_OPTIONS_WIDE) == 0) {
855 		fas->f_nowide = ALL_TARGETS;
856 	}
857 
858 	/*
859 	 * if target<n>-scsi-options property exists, use it;
860 	 * otherwise use the f_scsi_options
861 	 */
862 	for (i = 0; i < NTARGETS_WIDE; i++) {
863 		(void) sprintf(prop_str, prop_template, i);
864 		fas->f_target_scsi_options[i] = ddi_prop_get_int(
865 			DDI_DEV_T_ANY, dip, 0, prop_str, -1);
866 
867 		if (fas->f_target_scsi_options[i] != -1) {
868 			fas_log(fas, CE_NOTE, "?target%x-scsi-options=0x%x\n",
869 			    i, fas->f_target_scsi_options[i]);
870 			fas->f_target_scsi_options_defined |= 1 << i;
871 		} else {
872 			fas->f_target_scsi_options[i] = fas->f_scsi_options;
873 		}
874 		if (((fas->f_target_scsi_options[i] &
875 		    SCSI_OPTIONS_DR) == 0) &&
876 		    (fas->f_target_scsi_options[i] & SCSI_OPTIONS_TAG)) {
877 			fas->f_target_scsi_options[i] &= ~SCSI_OPTIONS_TAG;
878 			fas_log(fas, CE_WARN,
879 			    "Disabled TQ since disconnects are disabled");
880 		}
881 	}
882 
883 	fas->f_scsi_tag_age_limit =
884 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-tag-age-limit",
885 		DEFAULT_TAG_AGE_LIMIT);
886 
887 	fas->f_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
888 	    dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
889 	if (fas->f_scsi_reset_delay == 0) {
890 		fas_log(fas, CE_NOTE,
891 			"scsi_reset_delay of 0 is not recommended,"
892 			" resetting to SCSI_DEFAULT_RESET_DELAY\n");
893 		fas->f_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
894 	}
895 
896 	/*
897 	 * get iblock cookie and initialize mutexes
898 	 */
899 	if (ddi_get_iblock_cookie(dip, (uint_t)0, &fas->f_iblock)
900 	    != DDI_SUCCESS) {
901 		cmn_err(CE_WARN, "fas_attach: cannot get iblock cookie");
902 		goto fail;
903 	}
904 
905 	mutex_init(&fas->f_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
906 	cv_init(&fas->f_cv, NULL, CV_DRIVER, NULL);
907 
908 	/*
909 	 * initialize mutex for waitQ
910 	 */
911 	mutex_init(&fas->f_waitQ_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
912 	mutex_init_done++;
913 
914 	/*
915 	 * initialize callback mechanism (immediate callback)
916 	 */
917 	mutex_enter(&fas_global_mutex);
918 	if (fas_init_callbacks(fas)) {
919 		mutex_exit(&fas_global_mutex);
920 		goto fail;
921 	}
922 	mutex_exit(&fas_global_mutex);
923 
924 	/*
925 	 * kstat_intr support
926 	 */
927 	(void) sprintf(buf, "fas%d", instance);
928 	fas->f_intr_kstat = kstat_create("fas", instance, buf, "controller", \
929 			KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
930 	if (fas->f_intr_kstat)
931 		kstat_install(fas->f_intr_kstat);
932 
933 	/*
934 	 * install interrupt handler
935 	 */
936 	mutex_enter(FAS_MUTEX(fas));
937 	if (ddi_add_intr(dip, (uint_t)0, &fas->f_iblock, NULL,
938 	    fas_intr, (caddr_t)fas)) {
939 		cmn_err(CE_WARN, "fas: cannot add intr");
940 		mutex_exit(FAS_MUTEX(fas));
941 		goto fail;
942 	}
943 	intr_added++;
944 
945 	/*
946 	 * initialize fas chip
947 	 */
948 	if (fas_init_chip(fas, id))	{
949 		cmn_err(CE_WARN, "fas: cannot initialize");
950 		mutex_exit(FAS_MUTEX(fas));
951 		goto fail;
952 	}
953 	mutex_exit(FAS_MUTEX(fas));
954 
955 	/*
956 	 * create kmem cache for packets
957 	 */
958 	(void) sprintf(buf, "fas%d_cache", instance);
959 	fas->f_kmem_cache = kmem_cache_create(buf,
960 		EXTCMD_SIZE, 8,
961 		fas_kmem_cache_constructor, fas_kmem_cache_destructor,
962 		NULL, (void *)fas, NULL, 0);
963 	if (fas->f_kmem_cache == NULL) {
964 		cmn_err(CE_WARN, "fas: cannot create kmem_cache");
965 		goto fail;
966 	}
967 
968 	/*
969 	 * at this point, we are not going to fail the attach
970 	 * so there is no need to undo the rest:
971 	 *
972 	 * add this fas to the list, this makes debugging easier
973 	 * and fas_watch() needs it to walk thru all fas's
974 	 */
975 	rw_enter(&fas_global_rwlock, RW_WRITER);
976 	if (fas_head == NULL) {
977 		fas_head = fas;
978 	} else {
979 		fas_tail->f_next = fas;
980 	}
981 	fas_tail = fas; 	/* point to last fas in list */
982 	rw_exit(&fas_global_rwlock);
983 
984 	/*
985 	 * there is one watchdog handler for all driver instances.
986 	 * start the watchdog if it hasn't been done yet
987 	 */
988 	mutex_enter(&fas_global_mutex);
989 	if (fas_scsi_watchdog_tick == 0) {
990 		fas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
991 			dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
992 		if (fas_scsi_watchdog_tick != DEFAULT_WD_TICK) {
993 			fas_log(fas, CE_NOTE, "?scsi-watchdog-tick=%d\n",
994 			    fas_scsi_watchdog_tick);
995 		}
996 		fas_tick = drv_usectohz((clock_t)
997 		    fas_scsi_watchdog_tick * 1000000);
998 		IPRINTF2("fas scsi watchdog tick=%x, fas_tick=%lx\n",
999 		    fas_scsi_watchdog_tick, fas_tick);
1000 		if (fas_timeout_id == 0) {
1001 			fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
1002 			fas_timeout_initted = 1;
1003 		}
1004 	}
1005 	mutex_exit(&fas_global_mutex);
1006 
1007 	ddi_report_dev(dip);
1008 
1009 	return (DDI_SUCCESS);
1010 
1011 fail:
1012 	cmn_err(CE_WARN, "fas%d: cannot attach", instance);
1013 	if (fas) {
1014 		for (slot = 0; slot < N_SLOTS; slot++) {
1015 			struct f_slots *active = fas->f_active[slot];
1016 			if (active) {
1017 				kmem_free(active, active->f_size);
1018 				fas->f_active[slot] = NULL;
1019 			}
1020 		}
1021 		if (mutex_init_done) {
1022 			mutex_destroy(&fas->f_mutex);
1023 			mutex_destroy(&fas->f_waitQ_mutex);
1024 			cv_destroy(&fas->f_cv);
1025 		}
1026 		if (intr_added) {
1027 			ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1028 		}
1029 		/*
1030 		 * kstat_intr support
1031 		 */
1032 		if (fas->f_intr_kstat) {
1033 			kstat_delete(fas->f_intr_kstat);
1034 		}
1035 		if (hba_attached) {
1036 			(void) scsi_hba_detach(dip);
1037 		}
1038 		if (tran) {
1039 			scsi_hba_tran_free(tran);
1040 		}
1041 		if (fas->f_kmem_cache) {
1042 			kmem_cache_destroy(fas->f_kmem_cache);
1043 		}
1044 		if (fas->f_cmdarea) {
1045 			if (bound_handle) {
1046 				(void) ddi_dma_unbind_handle(fas->f_dmahandle);
1047 			}
1048 			ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1049 		}
1050 		if (fas->f_dmahandle) {
1051 			ddi_dma_free_handle(&fas->f_dmahandle);
1052 		}
1053 		fas_destroy_callbacks(fas);
1054 		if (fas->f_regs_acc_handle) {
1055 			ddi_regs_map_free(&fas->f_regs_acc_handle);
1056 		}
1057 		if (fas->f_dmar_acc_handle) {
1058 			ddi_regs_map_free(&fas->f_dmar_acc_handle);
1059 		}
1060 		ddi_soft_state_free(fas_state, instance);
1061 
1062 		ddi_remove_minor_node(dip, NULL);
1063 	}
1064 	return (DDI_FAILURE);
1065 }
1066 
1067 /*ARGSUSED*/
1068 static int
1069 fas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1070 {
1071 	struct fas	*fas, *nfas;
1072 	scsi_hba_tran_t 	*tran;
1073 
1074 	/* CONSTCOND */
1075 	ASSERT(NO_COMPETING_THREADS);
1076 
1077 	switch (cmd) {
1078 	case DDI_DETACH:
1079 		return (fas_dr_detach(dip));
1080 
1081 	case DDI_SUSPEND:
1082 		if ((tran = ddi_get_driver_private(dip)) == NULL)
1083 			return (DDI_FAILURE);
1084 
1085 		fas = TRAN2FAS(tran);
1086 		if (!fas) {
1087 			return (DDI_FAILURE);
1088 		}
1089 
1090 		mutex_enter(FAS_MUTEX(fas));
1091 
1092 		fas->f_suspended = 1;
1093 
1094 		if (fas->f_ncmds) {
1095 			(void) fas_reset_bus(fas);
1096 			(void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
1097 		}
1098 		/*
1099 		 * disable dma and fas interrupt
1100 		 */
1101 		fas->f_dma_csr &= ~DMA_INTEN;
1102 		fas->f_dma_csr &= ~DMA_ENDVMA;
1103 		fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1104 
1105 		mutex_exit(FAS_MUTEX(fas));
1106 
1107 		if (fas->f_quiesce_timeid) {
1108 			(void) untimeout(fas->f_quiesce_timeid);
1109 				fas->f_quiesce_timeid = 0;
1110 		}
1111 
1112 		if (fas->f_restart_cmd_timeid) {
1113 			(void) untimeout(fas->f_restart_cmd_timeid);
1114 				fas->f_restart_cmd_timeid = 0;
1115 		}
1116 
1117 		/* Last fas? */
1118 		rw_enter(&fas_global_rwlock, RW_WRITER);
1119 		for (nfas = fas_head; nfas; nfas = nfas->f_next) {
1120 			if (!nfas->f_suspended) {
1121 				rw_exit(&fas_global_rwlock);
1122 				return (DDI_SUCCESS);
1123 			}
1124 		}
1125 		rw_exit(&fas_global_rwlock);
1126 
1127 		mutex_enter(&fas_global_mutex);
1128 		if (fas_timeout_id != 0) {
1129 			timeout_id_t tid = fas_timeout_id;
1130 			fas_timeout_id = 0;
1131 			fas_timeout_initted = 0;
1132 			mutex_exit(&fas_global_mutex);
1133 			(void) untimeout(tid);
1134 		} else {
1135 			mutex_exit(&fas_global_mutex);
1136 		}
1137 
1138 		mutex_enter(&fas_global_mutex);
1139 		if (fas_reset_watch) {
1140 			timeout_id_t tid = fas_reset_watch;
1141 			fas_reset_watch = 0;
1142 			mutex_exit(&fas_global_mutex);
1143 			(void) untimeout(tid);
1144 		} else {
1145 			mutex_exit(&fas_global_mutex);
1146 		}
1147 
1148 		return (DDI_SUCCESS);
1149 
1150 	default:
1151 		return (DDI_FAILURE);
1152 	}
1153 	_NOTE(NOT_REACHED)
1154 	/* NOTREACHED */
1155 }
1156 
1157 static int
1158 fas_dr_detach(dev_info_t *dip)
1159 {
1160 	struct fas 	*fas, *f;
1161 	scsi_hba_tran_t		*tran;
1162 	short		slot;
1163 	int			i, j;
1164 
1165 	if ((tran = ddi_get_driver_private(dip)) == NULL)
1166 		return (DDI_FAILURE);
1167 
1168 	fas = TRAN2FAS(tran);
1169 	if (!fas) {
1170 		return (DDI_FAILURE);
1171 	}
1172 
1173 	/*
1174 	 * disable interrupts
1175 	 */
1176 	fas->f_dma_csr &= ~DMA_INTEN;
1177 	fas->f_dma->dma_csr = fas->f_dma_csr;
1178 	ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1179 
1180 	/*
1181 	 * Remove device instance from the global linked list
1182 	 */
1183 	rw_enter(&fas_global_rwlock, RW_WRITER);
1184 
1185 	if (fas_head == fas) {
1186 		f = fas_head = fas->f_next;
1187 	} else {
1188 		for (f = fas_head; f != (struct fas *)NULL; f = f->f_next) {
1189 			if (f->f_next == fas) {
1190 				f->f_next = fas->f_next;
1191 				break;
1192 			}
1193 		}
1194 
1195 		/*
1196 		 * Instance not in softc list. Since the
1197 		 * instance is not there in softc list, don't
1198 		 * enable interrupts, the instance is effectively
1199 		 * unusable.
1200 		 */
1201 		if (f == (struct fas *)NULL) {
1202 			cmn_err(CE_WARN, "fas_dr_detach: fas instance not"
1203 				" in softc list!");
1204 			rw_exit(&fas_global_rwlock);
1205 			return (DDI_FAILURE);
1206 		}
1207 
1208 
1209 	}
1210 
1211 	if (fas_tail == fas)
1212 		fas_tail = f;
1213 
1214 	rw_exit(&fas_global_rwlock);
1215 
1216 	if (fas->f_intr_kstat)
1217 		kstat_delete(fas->f_intr_kstat);
1218 
1219 	fas_destroy_callbacks(fas);
1220 
1221 	scsi_hba_reset_notify_tear_down(fas->f_reset_notify_listf);
1222 
1223 	mutex_enter(&fas_global_mutex);
1224 	/*
1225 	 * destroy any outstanding tagged command info
1226 	 */
1227 	for (slot = 0; slot < N_SLOTS; slot++) {
1228 		struct f_slots *active = fas->f_active[slot];
1229 		if (active) {
1230 			ushort_t	tag;
1231 			for (tag = 0; tag < active->f_n_slots; tag++) {
1232 				struct fas_cmd	*sp = active->f_slot[tag];
1233 				if (sp) {
1234 					struct scsi_pkt *pkt = sp->cmd_pkt;
1235 					if (pkt) {
1236 						(void) fas_scsi_destroy_pkt(
1237 						    &pkt->pkt_address, pkt);
1238 					}
1239 					/* sp freed in fas_scsi_destroy_pkt */
1240 					active->f_slot[tag] = NULL;
1241 				}
1242 			}
1243 			kmem_free(active, active->f_size);
1244 			fas->f_active[slot] = NULL;
1245 		}
1246 		ASSERT(fas->f_tcmds[slot] == 0);
1247 	}
1248 
1249 	/*
1250 	 * disallow timeout thread rescheduling
1251 	 */
1252 	fas->f_flags |= FAS_FLG_NOTIMEOUTS;
1253 	mutex_exit(&fas_global_mutex);
1254 
1255 	if (fas->f_quiesce_timeid) {
1256 		(void) untimeout(fas->f_quiesce_timeid);
1257 	}
1258 
1259 	/*
1260 	 * last fas? ... if active, CANCEL watch threads.
1261 	 */
1262 	mutex_enter(&fas_global_mutex);
1263 	if (fas_head == (struct fas *)NULL) {
1264 		if (fas_timeout_initted) {
1265 			timeout_id_t tid = fas_timeout_id;
1266 			fas_timeout_initted = 0;
1267 			fas_timeout_id = 0;		/* don't resched */
1268 			mutex_exit(&fas_global_mutex);
1269 			(void) untimeout(tid);
1270 			mutex_enter(&fas_global_mutex);
1271 		}
1272 
1273 		if (fas_reset_watch) {
1274 			mutex_exit(&fas_global_mutex);
1275 			(void) untimeout(fas_reset_watch);
1276 			mutex_enter(&fas_global_mutex);
1277 			fas_reset_watch = 0;
1278 		}
1279 	}
1280 	mutex_exit(&fas_global_mutex);
1281 
1282 	if (fas->f_restart_cmd_timeid) {
1283 		(void) untimeout(fas->f_restart_cmd_timeid);
1284 		fas->f_restart_cmd_timeid = 0;
1285 	}
1286 
1287 	/*
1288 	 * destroy outstanding ARQ pkts
1289 	 */
1290 	for (i = 0; i < NTARGETS_WIDE; i++) {
1291 		for (j = 0; j < NLUNS_PER_TARGET; j++) {
1292 			int slot = i * NLUNS_PER_TARGET | j;
1293 			if (fas->f_arq_pkt[slot]) {
1294 				struct scsi_address	sa;
1295 				sa.a_hba_tran = NULL;		/* not used */
1296 				sa.a_target = (ushort_t)i;
1297 				sa.a_lun = (uchar_t)j;
1298 				(void) fas_delete_arq_pkt(fas, &sa);
1299 			}
1300 		}
1301 	}
1302 
1303 	/*
1304 	 * Remove device MT locks and CV
1305 	 */
1306 	mutex_destroy(&fas->f_waitQ_mutex);
1307 	mutex_destroy(&fas->f_mutex);
1308 	cv_destroy(&fas->f_cv);
1309 
1310 	/*
1311 	 * Release miscellaneous device resources
1312 	 */
1313 
1314 	if (fas->f_kmem_cache) {
1315 		kmem_cache_destroy(fas->f_kmem_cache);
1316 	}
1317 
1318 	if (fas->f_cmdarea != (uchar_t *)NULL) {
1319 		(void) ddi_dma_unbind_handle(fas->f_dmahandle);
1320 		ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1321 	}
1322 
1323 	if (fas->f_dmahandle != (ddi_dma_handle_t)NULL) {
1324 		ddi_dma_free_handle(&fas->f_dmahandle);
1325 	}
1326 
1327 	if (fas->f_regs_acc_handle) {
1328 		ddi_regs_map_free(&fas->f_regs_acc_handle);
1329 	}
1330 	if (fas->f_dmar_acc_handle) {
1331 		ddi_regs_map_free(&fas->f_dmar_acc_handle);
1332 	}
1333 
1334 	/*
1335 	 * Remove properties created during attach()
1336 	 */
1337 	ddi_prop_remove_all(dip);
1338 
1339 	/*
1340 	 * Delete the DMA limits, transport vectors and remove the device
1341 	 * links to the scsi_transport layer.
1342 	 *	-- ddi_set_driver_private(dip, NULL)
1343 	 */
1344 	(void) scsi_hba_detach(dip);
1345 
1346 	/*
1347 	 * Free the scsi_transport structure for this device.
1348 	 */
1349 	scsi_hba_tran_free(tran);
1350 
1351 	ddi_soft_state_free(fas_state, ddi_get_instance(dip));
1352 
1353 	return (DDI_SUCCESS);
1354 }
1355 
1356 static int
1357 fas_quiesce_bus(struct fas *fas)
1358 {
1359 	mutex_enter(FAS_MUTEX(fas));
1360 	IPRINTF("fas_quiesce: QUIESCEing\n");
1361 	IPRINTF3("fas_quiesce: ncmds (%d) ndisc (%d) state (%d)\n",
1362 		fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1363 	fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1364 	if (fas_check_outstanding(fas)) {
1365 		fas->f_softstate |= FAS_SS_DRAINING;
1366 		fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1367 		    fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1368 		if (cv_wait_sig(FAS_CV(fas), FAS_MUTEX(fas)) == 0) {
1369 			/*
1370 			 * quiesce has been interrupted.
1371 			 */
1372 			IPRINTF("fas_quiesce: abort QUIESCE\n");
1373 			fas->f_softstate &= ~FAS_SS_DRAINING;
1374 			fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1375 			(void) fas_istart(fas);
1376 			if (fas->f_quiesce_timeid != 0) {
1377 				mutex_exit(FAS_MUTEX(fas));
1378 #ifndef __lock_lint	/* warlock complains but there is a NOTE on this */
1379 				(void) untimeout(fas->f_quiesce_timeid);
1380 				fas->f_quiesce_timeid = 0;
1381 #endif
1382 				return (-1);
1383 			}
1384 			mutex_exit(FAS_MUTEX(fas));
1385 			return (-1);
1386 		} else {
1387 			IPRINTF("fas_quiesce: bus is QUIESCED\n");
1388 			ASSERT(fas->f_quiesce_timeid == 0);
1389 			fas->f_softstate &= ~FAS_SS_DRAINING;
1390 			fas->f_softstate |= FAS_SS_QUIESCED;
1391 			mutex_exit(FAS_MUTEX(fas));
1392 			return (0);
1393 		}
1394 	}
1395 	IPRINTF("fas_quiesce: bus was not busy QUIESCED\n");
1396 	mutex_exit(FAS_MUTEX(fas));
1397 	return (0);
1398 }
1399 
1400 static int
1401 fas_unquiesce_bus(struct fas *fas)
1402 {
1403 	mutex_enter(FAS_MUTEX(fas));
1404 	fas->f_softstate &= ~FAS_SS_QUIESCED;
1405 	fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1406 	(void) fas_istart(fas);
1407 	IPRINTF("fas_quiesce: bus has been UNQUIESCED\n");
1408 	mutex_exit(FAS_MUTEX(fas));
1409 
1410 	return (0);
1411 }
1412 
1413 /*
1414  * invoked from timeout() to check the number of outstanding commands
1415  */
1416 static void
1417 fas_ncmds_checkdrain(void *arg)
1418 {
1419 	struct fas *fas = arg;
1420 
1421 	mutex_enter(FAS_MUTEX(fas));
1422 	IPRINTF3("fas_checkdrain: ncmds (%d) ndisc (%d) state (%d)\n",
1423 		fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1424 	if (fas->f_softstate & FAS_SS_DRAINING) {
1425 		fas->f_quiesce_timeid = 0;
1426 		if (fas_check_outstanding(fas) == 0) {
1427 			IPRINTF("fas_drain: bus has drained\n");
1428 			cv_signal(FAS_CV(fas));
1429 		} else {
1430 			/*
1431 			 * throttle may have been reset by a bus reset
1432 			 * or fas_runpoll()
1433 			 * XXX shouldn't be necessary
1434 			 */
1435 			fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1436 			IPRINTF("fas_drain: rescheduling timeout\n");
1437 			fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1438 			    fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1439 		}
1440 	}
1441 	mutex_exit(FAS_MUTEX(fas));
1442 }
1443 
1444 static int
1445 fas_check_outstanding(struct fas *fas)
1446 {
1447 	uint_t slot;
1448 	uint_t d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
1449 	int ncmds = 0;
1450 
1451 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
1452 
1453 	for (slot = 0; slot < N_SLOTS; slot += d)
1454 		ncmds += fas->f_tcmds[slot];
1455 
1456 	return (ncmds);
1457 }
1458 
1459 
1460 #ifdef	FASDEBUG
1461 /*
1462  * fas register read/write functions with tracing
1463  */
1464 static void
1465 fas_reg_tracing(struct fas *fas, int type, int regno, uint32_t what)
1466 {
1467 	fas->f_reg_trace[fas->f_reg_trace_index++] = type;
1468 	fas->f_reg_trace[fas->f_reg_trace_index++] = regno;
1469 	fas->f_reg_trace[fas->f_reg_trace_index++] = what;
1470 	fas->f_reg_trace[fas->f_reg_trace_index++] = gethrtime();
1471 	fas->f_reg_trace[fas->f_reg_trace_index] = 0xff;
1472 	if (fas->f_reg_trace_index >= REG_TRACE_BUF_SIZE) {
1473 		fas->f_reg_trace_index = 0;
1474 	}
1475 }
1476 
1477 static void
1478 fas_reg_cmd_write(struct fas *fas, uint8_t cmd)
1479 {
1480 	volatile struct fasreg *fasreg = fas->f_reg;
1481 	int regno = (uintptr_t)&fasreg->fas_cmd - (uintptr_t)fasreg;
1482 
1483 	fasreg->fas_cmd = cmd;
1484 	fas->f_last_cmd = cmd;
1485 
1486 	EPRINTF1("issuing cmd %x\n", (uchar_t)cmd);
1487 	fas_reg_tracing(fas, 0, regno, cmd);
1488 
1489 	fas->f_reg_cmds++;
1490 }
1491 
1492 static void
1493 fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what)
1494 {
1495 	int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1496 
1497 	*p = what;
1498 
1499 	EPRINTF2("writing reg%x = %x\n", regno, what);
1500 	fas_reg_tracing(fas, 1, regno, what);
1501 
1502 	fas->f_reg_writes++;
1503 }
1504 
1505 static uint8_t
1506 fas_reg_read(struct fas *fas, volatile uint8_t *p)
1507 {
1508 	uint8_t what;
1509 	int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1510 
1511 	what = *p;
1512 
1513 	EPRINTF2("reading reg%x => %x\n", regno, what);
1514 	fas_reg_tracing(fas, 2, regno, what);
1515 
1516 	fas->f_reg_reads++;
1517 
1518 	return (what);
1519 }
1520 
1521 /*
1522  * dma register access routines
1523  */
1524 static void
1525 fas_dma_reg_write(struct fas *fas, volatile uint32_t *p, uint32_t what)
1526 {
1527 	*p = what;
1528 	fas->f_reg_dma_writes++;
1529 
1530 #ifdef DMA_REG_TRACING
1531 {
1532 	int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1533 	EPRINTF2("writing dma reg%x = %x\n", regno, what);
1534 	fas_reg_tracing(fas, 3, regno, what);
1535 }
1536 #endif
1537 }
1538 
1539 static uint32_t
1540 fas_dma_reg_read(struct fas *fas, volatile uint32_t *p)
1541 {
1542 	uint32_t what = *p;
1543 	fas->f_reg_dma_reads++;
1544 
1545 #ifdef DMA_REG_TRACING
1546 {
1547 	int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1548 	EPRINTF2("reading dma reg%x => %x\n", regno, what);
1549 	fas_reg_tracing(fas, 4, regno, what);
1550 }
1551 #endif
1552 	return (what);
1553 }
1554 #endif
1555 
1556 #define	FIFO_EMPTY(fas)  (fas_reg_read(fas, &fas->f_reg->fas_stat2) & \
1557 		FAS_STAT2_EMPTY)
1558 #define	FIFO_CNT(fas) \
1559 	(fas_reg_read(fas, &fas->f_reg->fas_fifo_flag) & FIFO_CNT_MASK)
1560 
1561 #ifdef FASDEBUG
1562 static void
1563 fas_assert_atn(struct fas *fas)
1564 {
1565 	fas_reg_cmd_write(fas, CMD_SET_ATN);
1566 #ifdef FAS_TEST
1567 	if (fas_test_stop > 1)
1568 		debug_enter("asserted atn");
1569 #endif
1570 }
1571 #else
1572 #define	fas_assert_atn(fas)  fas_reg_cmd_write(fas, CMD_SET_ATN)
1573 #endif
1574 
1575 /*
1576  * DMA macros; we use a shadow copy of the dma_csr to	save unnecessary
1577  * reads
1578  */
1579 #define	FAS_DMA_WRITE(fas, count, base, cmd) { \
1580 	volatile struct fasreg *fasreg = fas->f_reg; \
1581 	volatile struct dma *dmar = fas->f_dma; \
1582 	ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1583 	SET_FAS_COUNT(fasreg, count); \
1584 	fas_reg_cmd_write(fas, cmd); \
1585 	fas_dma_reg_write(fas, &dmar->dma_count, count); \
1586 	fas->f_dma_csr |= \
1587 	    DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1588 	fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1589 	fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1590 }
1591 
1592 #define	FAS_DMA_WRITE_SETUP(fas, count, base) { \
1593 	volatile struct fasreg *fasreg = fas->f_reg; \
1594 	volatile struct dma *dmar = fas->f_dma; \
1595 	ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1596 	SET_FAS_COUNT(fasreg, count); \
1597 	fas_dma_reg_write(fas, &dmar->dma_count, count); \
1598 	fas->f_dma_csr |= \
1599 	    DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1600 	fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1601 }
1602 
1603 
1604 #define	FAS_DMA_READ(fas, count, base, dmacount, cmd) { \
1605 	volatile struct fasreg *fasreg = fas->f_reg; \
1606 	volatile struct dma *dmar = fas->f_dma; \
1607 	ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1608 	SET_FAS_COUNT(fasreg, count); \
1609 	fas_reg_cmd_write(fas, cmd); \
1610 	fas->f_dma_csr |= \
1611 	    (fas->f_dma_csr &	~DMA_WRITE) | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1612 	fas_dma_reg_write(fas, &dmar->dma_count, dmacount); \
1613 	fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1614 	fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1615 }
1616 
1617 static void
1618 FAS_FLUSH_DMA(struct fas *fas)
1619 {
1620 	fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1621 	fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1622 		DMA_DSBL_DRAIN);
1623 	fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1624 	fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1625 	fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1626 	fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1627 }
1628 
1629 /*
1630  * FAS_FLUSH_DMA_HARD checks on REQPEND before taking away the reset
1631  */
1632 static void
1633 FAS_FLUSH_DMA_HARD(struct fas *fas)
1634 {
1635 	fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1636 	fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1637 		DMA_DSBL_DRAIN);
1638 	fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1639 	while (fas_dma_reg_read(fas, &fas->f_dma->dma_csr) & DMA_REQPEND);
1640 	fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1641 	fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1642 	fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1643 }
1644 
1645 /*
1646  * update period, conf3, offset reg, if necessary
1647  */
1648 #define	FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target) \
1649 { \
1650 	uchar_t period, offset, conf3; \
1651 	period = fas->f_sync_period[target] & SYNC_PERIOD_MASK; \
1652 	offset = fas->f_offset[target]; \
1653 	conf3  = fas->f_fasconf3[target]; \
1654 	if ((period != fas->f_period_reg_last) || \
1655 	    (offset != fas->f_offset_reg_last) || \
1656 	    (conf3 != fas->f_fasconf3_reg_last)) { \
1657 		fas->f_period_reg_last = period; \
1658 		fas->f_offset_reg_last = offset; \
1659 		fas->f_fasconf3_reg_last = conf3; \
1660 		fas_reg_write(fas, &fasreg->fas_sync_period, period); \
1661 		fas_reg_write(fas, &fasreg->fas_sync_offset, offset); \
1662 		fas_reg_write(fas, &fasreg->fas_conf3, conf3); \
1663 	} \
1664 }
1665 
1666 /*
1667  * fifo read/write routines
1668  * always read the fifo bytes before reading the interrupt register
1669  */
1670 
1671 static void
1672 fas_read_fifo(struct fas *fas)
1673 {
1674 	int stat = fas->f_stat;
1675 	volatile struct fasreg	 *fasreg = fas->f_reg;
1676 	int		 i;
1677 
1678 	i = fas_reg_read(fas, &fasreg->fas_fifo_flag) & FIFO_CNT_MASK;
1679 	EPRINTF2("fas_read_fifo: fifo cnt=%x, stat=%x\n", i, stat);
1680 	ASSERT(i <= FIFOSIZE);
1681 
1682 	fas->f_fifolen = 0;
1683 	while (i-- > 0) {
1684 		fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1685 			&fasreg->fas_fifo_data);
1686 		fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1687 			&fasreg->fas_fifo_data);
1688 	}
1689 	if (fas->f_stat2 & FAS_STAT2_ISHUTTLE)	{
1690 
1691 		/* write pad byte */
1692 		fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1693 		fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1694 			&fasreg->fas_fifo_data);
1695 		/* flush pad byte */
1696 		fas_reg_cmd_write(fas, CMD_FLUSH);
1697 	}
1698 	EPRINTF2("fas_read_fifo: fifo len=%x, stat2=%x\n",
1699 		fas->f_fifolen, stat);
1700 } /* fas_read_fifo */
1701 
1702 static void
1703 fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad)
1704 {
1705 	int i;
1706 	volatile struct fasreg	 *fasreg = fas->f_reg;
1707 
1708 	EPRINTF1("writing fifo %x bytes\n", length);
1709 	ASSERT(length <= 15);
1710 	fas_reg_cmd_write(fas, CMD_FLUSH);
1711 	for (i = 0; i < length; i++) {
1712 		fas_reg_write(fas, &fasreg->fas_fifo_data, buf[i]);
1713 		if (pad) {
1714 			fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1715 		}
1716 	}
1717 }
1718 
1719 /*
1720  * Hardware and Software internal reset routines
1721  */
1722 static int
1723 fas_init_chip(struct fas *fas, uchar_t initiator_id)
1724 {
1725 	int		i;
1726 	uchar_t		clock_conv;
1727 	uchar_t		initial_conf3;
1728 	uint_t		ticks;
1729 	static char	*prop_cfreq = "clock-frequency";
1730 
1731 	/*
1732 	 * Determine clock frequency of attached FAS chip.
1733 	 */
1734 	i = ddi_prop_get_int(DDI_DEV_T_ANY,
1735 		fas->f_dev, DDI_PROP_DONTPASS, prop_cfreq, -1);
1736 	clock_conv = (i + FIVE_MEG - 1) / FIVE_MEG;
1737 	if (clock_conv != CLOCK_40MHZ) {
1738 		fas_log(fas, CE_WARN, "Bad clock frequency");
1739 		return (-1);
1740 	}
1741 
1742 	fas->f_clock_conv = clock_conv;
1743 	fas->f_clock_cycle = CLOCK_PERIOD(i);
1744 	ticks = FAS_CLOCK_TICK(fas);
1745 	fas->f_stval = FAS_CLOCK_TIMEOUT(ticks, fas_selection_timeout);
1746 
1747 	DPRINTF5("%d mhz, clock_conv %d, clock_cycle %d, ticks %d, stval %d\n",
1748 		i, fas->f_clock_conv, fas->f_clock_cycle,
1749 		ticks, fas->f_stval);
1750 	/*
1751 	 * set up conf registers
1752 	 */
1753 	fas->f_fasconf |= FAS_CONF_PAREN;
1754 	fas->f_fasconf2 = (uchar_t)(FAS_CONF2_FENABLE | FAS_CONF2_XL32);
1755 
1756 	if (initiator_id < NTARGETS) {
1757 		initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO;
1758 	} else {
1759 		initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO |
1760 		    FAS_CONF3_IDBIT3;
1761 	}
1762 
1763 	for (i = 0; i < NTARGETS_WIDE; i++) {
1764 		fas->f_fasconf3[i] = initial_conf3;
1765 	}
1766 
1767 	/*
1768 	 * Avoid resetting the scsi bus since this causes a few seconds
1769 	 * delay per fas in boot and also causes busy conditions in some
1770 	 * tape devices.
1771 	 */
1772 	fas_internal_reset(fas, FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
1773 
1774 	/*
1775 	 * initialize period and offset for each target
1776 	 */
1777 	for (i = 0; i < NTARGETS_WIDE; i++) {
1778 		if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_SYNC) {
1779 			fas->f_offset[i] = fas_default_offset |
1780 				fas->f_req_ack_delay;
1781 		} else {
1782 			fas->f_offset[i] = 0;
1783 		}
1784 		if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_FAST) {
1785 			fas->f_neg_period[i] =
1786 			    (uchar_t)MIN_SYNC_PERIOD(fas);
1787 		} else {
1788 			fas->f_neg_period[i] =
1789 			    (uchar_t)CONVERT_PERIOD(DEFAULT_SYNC_PERIOD);
1790 		}
1791 	}
1792 	return (0);
1793 }
1794 
1795 /*
1796  * reset bus, chip, dma, or soft state
1797  */
1798 static void
1799 fas_internal_reset(struct fas *fas, int reset_action)
1800 {
1801 	volatile struct fasreg *fasreg = fas->f_reg;
1802 	volatile struct dma *dmar = fas->f_dma;
1803 
1804 	if (reset_action & FAS_RESET_SCSIBUS)	{
1805 		fas_reg_cmd_write(fas, CMD_RESET_SCSI);
1806 		fas_setup_reset_delay(fas);
1807 	}
1808 
1809 	FAS_FLUSH_DMA_HARD(fas); /* resets and reinits the dma */
1810 
1811 	/*
1812 	 * NOTE: if dma is aborted while active, indefinite hangs
1813 	 * may occur; it is preferable to stop the target first before
1814 	 * flushing the dma
1815 	 */
1816 	if (reset_action & FAS_RESET_DMA) {
1817 		int burstsizes = fas->f_dma_attr->dma_attr_burstsizes;
1818 		if (burstsizes & BURST64) {
1819 			IPRINTF("64 byte burstsize\n");
1820 			fas->f_dma_csr |= DMA_BURST64;
1821 		} else if	(burstsizes & BURST32) {
1822 			IPRINTF("32 byte burstsize\n");
1823 			fas->f_dma_csr |= DMA_BURST32;
1824 		} else {
1825 			IPRINTF("16 byte burstsize\n");
1826 		}
1827 		if ((fas->f_hm_rev > 0x20) && (fas_enable_sbus64) &&
1828 		    (ddi_dma_set_sbus64(fas->f_dmahandle, burstsizes) ==
1829 		    DDI_SUCCESS)) {
1830 			IPRINTF("enabled 64 bit sbus\n");
1831 			fas->f_dma_csr |= DMA_WIDE_EN;
1832 		}
1833 	}
1834 
1835 	if (reset_action & FAS_RESET_FAS) {
1836 		/*
1837 		 * 2 NOPs with DMA are required here
1838 		 * id_code is unreliable if we don't do this)
1839 		 */
1840 		uchar_t idcode, fcode;
1841 		int dmarev;
1842 
1843 		fas_reg_cmd_write(fas, CMD_RESET_FAS);
1844 		fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1845 		fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1846 
1847 		/*
1848 		 * Re-load chip configurations
1849 		 * Only load registers which are not loaded in fas_startcmd()
1850 		 */
1851 		fas_reg_write(fas, &fasreg->fas_clock_conv,
1852 			(fas->f_clock_conv & CLOCK_MASK));
1853 
1854 		fas_reg_write(fas, &fasreg->fas_timeout, fas->f_stval);
1855 
1856 		/*
1857 		 * enable default configurations
1858 		 */
1859 		fas->f_idcode = idcode =
1860 			fas_reg_read(fas, &fasreg->fas_id_code);
1861 		fcode = (uchar_t)(idcode & FAS_FCODE_MASK) >> (uchar_t)3;
1862 		fas->f_type = FAS366;
1863 		IPRINTF2("Family code %d, revision %d\n",
1864 		    fcode, (idcode & FAS_REV_MASK));
1865 		dmarev = fas_dma_reg_read(fas, &dmar->dma_csr);
1866 		dmarev = (dmarev >> 11) & 0xf;
1867 		IPRINTF1("DMA channel revision %d\n", dmarev);
1868 
1869 		fas_reg_write(fas, &fasreg->fas_conf, fas->f_fasconf);
1870 		fas_reg_write(fas, &fasreg->fas_conf2, fas->f_fasconf2);
1871 
1872 		fas->f_req_ack_delay = DEFAULT_REQ_ACK_DELAY;
1873 
1874 		/*
1875 		 * Just in case... clear interrupt
1876 		 */
1877 		(void) fas_reg_read(fas, &fasreg->fas_intr);
1878 	}
1879 
1880 	if (reset_action & FAS_RESET_SOFTC) {
1881 		fas->f_wdtr_sent = fas->f_sdtr_sent = 0;
1882 		fas->f_wide_known = fas->f_sync_known = 0;
1883 		fas->f_wide_enabled = fas->f_sync_enabled = 0;
1884 		fas->f_omsglen = 0;
1885 		fas->f_cur_msgout[0] = fas->f_last_msgout =
1886 		    fas->f_last_msgin = INVALID_MSG;
1887 		fas->f_abort_msg_sent = fas->f_reset_msg_sent = 0;
1888 		fas->f_next_slot = 0;
1889 		fas->f_current_sp = NULL;
1890 		fas->f_fifolen = 0;
1891 		fas->f_fasconf3_reg_last = fas->f_offset_reg_last =
1892 			fas->f_period_reg_last = 0xff;
1893 
1894 		New_state(fas, STATE_FREE);
1895 	}
1896 }
1897 
1898 
1899 #ifdef FASDEBUG
1900 /*
1901  * check if ncmds still reflects the truth
1902  * count all cmds for this driver instance and compare with ncmds
1903  */
1904 static void
1905 fas_check_ncmds(struct fas *fas)
1906 {
1907 	int slot = 0;
1908 	ushort_t tag, t;
1909 	int n, total = 0;
1910 
1911 	do {
1912 		if (fas->f_active[slot]) {
1913 			struct fas_cmd *sp = fas->f_readyf[slot];
1914 			t = fas->f_active[slot]->f_n_slots;
1915 			while (sp != 0) {
1916 				sp = sp->cmd_forw;
1917 				total++;
1918 			}
1919 			for (n = tag = 0; tag < t; tag++) {
1920 				if (fas->f_active[slot]->f_slot[tag] != 0) {
1921 					n++;
1922 					total++;
1923 				}
1924 			}
1925 			ASSERT(n == fas->f_tcmds[slot]);
1926 		}
1927 		slot = NEXTSLOT(slot, fas->f_dslot);
1928 	} while (slot != 0);
1929 
1930 	if (total != fas->f_ncmds) {
1931 		IPRINTF2("fas_check_ncmds: total=%x, ncmds=%x\n",
1932 			total, fas->f_ncmds);
1933 	}
1934 	ASSERT(fas->f_ncmds >= fas->f_ndisc);
1935 }
1936 #else
1937 #define	fas_check_ncmds(fas)
1938 #endif
1939 
1940 /*
1941  * SCSA Interface functions
1942  *
1943  * Visible to the external world via the transport structure.
1944  *
1945  * fas_scsi_abort: abort a current cmd or all cmds for a target
1946  */
1947 /*ARGSUSED*/
1948 static int
1949 fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1950 {
1951 	struct fas *fas = ADDR2FAS(ap);
1952 	int rval;
1953 
1954 	IPRINTF2("fas_scsi_abort: target %d.%d\n", ap->a_target, ap->a_lun);
1955 
1956 	mutex_enter(FAS_MUTEX(fas));
1957 	rval =	fas_do_scsi_abort(ap, pkt);
1958 	fas_check_waitQ_and_mutex_exit(fas);
1959 	return (rval);
1960 }
1961 
1962 /*
1963  * reset handling: reset bus or target
1964  */
1965 /*ARGSUSED*/
1966 static int
1967 fas_scsi_reset(struct scsi_address *ap, int level)
1968 {
1969 	struct fas *fas = ADDR2FAS(ap);
1970 	int rval;
1971 
1972 	IPRINTF3("fas_scsi_reset: target %d.%d, level %d\n",
1973 		ap->a_target, ap->a_lun, level);
1974 
1975 	mutex_enter(FAS_MUTEX(fas));
1976 	rval = fas_do_scsi_reset(ap, level);
1977 	fas_check_waitQ_and_mutex_exit(fas);
1978 	return (rval);
1979 }
1980 
1981 /*
1982  * entry point for reset notification setup, to register or to cancel.
1983  */
1984 static int
1985 fas_scsi_reset_notify(struct scsi_address *ap, int flag,
1986     void (*callback)(caddr_t), caddr_t arg)
1987 {
1988 	struct fas	*fas = ADDR2FAS(ap);
1989 
1990 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1991 		&fas->f_mutex, &fas->f_reset_notify_listf));
1992 }
1993 
1994 /*
1995  * capability interface
1996  */
1997 /*ARGSUSED*/
1998 static int
1999 fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
2000 {
2001 	struct fas *fas = ADDR2FAS(ap);
2002 	DPRINTF3("fas_scsi_getcap: tgt=%x, cap=%s, whom=%x\n",
2003 		ap->a_target, cap, whom);
2004 	return (fas_commoncap(ap, cap, 0, whom, 0));
2005 }
2006 
2007 /*ARGSUSED*/
2008 static int
2009 fas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2010 {
2011 	struct fas *fas = ADDR2FAS(ap);
2012 	IPRINTF4("fas_scsi_setcap: tgt=%x, cap=%s, value=%x, whom=%x\n",
2013 		ap->a_target, cap, value, whom);
2014 	return (fas_commoncap(ap, cap, value, whom, 1));
2015 }
2016 
2017 /*
2018  * pkt and dma allocation and deallocation
2019  */
2020 /*ARGSUSED*/
2021 static void
2022 fas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2023 {
2024 	struct fas_cmd *cmd = PKT2CMD(pkt);
2025 
2026 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2027 	    "fas_scsi_dmafree_start");
2028 
2029 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
2030 		/*
2031 		 * Free the mapping.
2032 		 */
2033 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
2034 		cmd->cmd_flags ^= CFLAG_DMAVALID;
2035 	}
2036 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2037 	    "fas_scsi_dmafree_end");
2038 }
2039 
2040 /*ARGSUSED*/
2041 static void
2042 fas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2043 {
2044 	struct fas_cmd *sp = PKT2CMD(pkt);
2045 
2046 	if (sp->cmd_flags & CFLAG_DMAVALID) {
2047 		if (ddi_dma_sync(sp->cmd_dmahandle, 0, 0,
2048 		    (sp->cmd_flags & CFLAG_DMASEND) ?
2049 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
2050 		    DDI_SUCCESS) {
2051 			fas_log(ADDR2FAS(ap), CE_WARN,
2052 			    "sync of pkt (%p) failed", (void *)pkt);
2053 		}
2054 	}
2055 }
2056 
2057 /*
2058  * initialize pkt and allocate DVMA resources
2059  */
2060 static struct scsi_pkt *
2061 fas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
2062 	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
2063 	int flags, int (*callback)(), caddr_t arg)
2064 {
2065 	int kf;
2066 	int failure = 1;
2067 	struct fas_cmd *cmd;
2068 	struct fas *fas = ADDR2FAS(ap);
2069 	struct fas_cmd *new_cmd;
2070 	int rval;
2071 
2072 /* #define	FAS_TEST_EXTRN_ALLOC */
2073 #ifdef FAS_TEST_EXTRN_ALLOC
2074 	cmdlen *= 4; statuslen *= 4; tgtlen *= 4;
2075 #endif
2076 	/*
2077 	 * if no pkt was passed then allocate a pkt first
2078 	 */
2079 	if (pkt == NULL) {
2080 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_START,
2081 		    "fas_scsi_impl_pktalloc_start");
2082 
2083 		kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
2084 
2085 		/*
2086 		 * only one size of pkt (with arq).
2087 		 */
2088 		cmd = kmem_cache_alloc(fas->f_kmem_cache, kf);
2089 
2090 		if (cmd) {
2091 
2092 			ddi_dma_handle_t	save_dma_handle;
2093 
2094 			save_dma_handle = cmd->cmd_dmahandle;
2095 			bzero(cmd, EXTCMD_SIZE);
2096 			cmd->cmd_dmahandle = save_dma_handle;
2097 
2098 			pkt = (struct scsi_pkt *)((uchar_t *)cmd +
2099 			    sizeof (struct fas_cmd));
2100 			cmd->cmd_pkt		= pkt;
2101 			pkt->pkt_ha_private	= (opaque_t)cmd;
2102 			pkt->pkt_scbp	= (opaque_t)&cmd->cmd_scb;
2103 			pkt->pkt_cdbp	= (opaque_t)&cmd->cmd_cdb;
2104 			pkt->pkt_address	= *ap;
2105 
2106 			pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2107 			pkt->pkt_private = cmd->cmd_pkt_private;
2108 
2109 			cmd->cmd_cdblen 	= cmdlen;
2110 			cmd->cmd_scblen 	= statuslen;
2111 			cmd->cmd_privlen	= tgtlen;
2112 			cmd->cmd_slot		=
2113 				(Tgt(cmd) * NLUNS_PER_TARGET) | Lun(cmd);
2114 			failure = 0;
2115 		}
2116 		if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
2117 		    (tgtlen > PKT_PRIV_LEN) ||
2118 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
2119 			if (failure == 0) {
2120 				/*
2121 				 * if extern alloc fails, all will be
2122 				 * deallocated, including cmd
2123 				 */
2124 				failure = fas_pkt_alloc_extern(fas, cmd,
2125 				    cmdlen, tgtlen, statuslen, kf);
2126 			}
2127 			if (failure) {
2128 				/*
2129 				 * nothing to deallocate so just return
2130 				 */
2131 				TRACE_0(TR_FAC_SCSI_FAS,
2132 					TR_FAS_SCSI_IMPL_PKTALLOC_END,
2133 					"fas_scsi_impl_pktalloc_end");
2134 				return (NULL);
2135 			}
2136 		}
2137 
2138 		new_cmd = cmd;
2139 
2140 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_END,
2141 			"fas_scsi_impl_pktalloc_end");
2142 	} else {
2143 		cmd = PKT2CMD(pkt);
2144 		new_cmd = NULL;
2145 	}
2146 
2147 	/*
2148 	 * Second step of fas_scsi_init_pkt:
2149 	 * bind the buf to the handle
2150 	 */
2151 	if (bp && bp->b_bcount != 0 &&
2152 		    (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
2153 
2154 		int cmd_flags, dma_flags;
2155 		uint_t dmacookie_count;
2156 
2157 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_START,
2158 		    "fas_scsi_impl_dmaget_start");
2159 
2160 		cmd_flags = cmd->cmd_flags;
2161 
2162 		if (bp->b_flags & B_READ) {
2163 			cmd_flags &= ~CFLAG_DMASEND;
2164 			dma_flags = DDI_DMA_READ | DDI_DMA_PARTIAL;
2165 		} else {
2166 			cmd_flags |= CFLAG_DMASEND;
2167 			dma_flags = DDI_DMA_WRITE | DDI_DMA_PARTIAL;
2168 		}
2169 		if (flags & PKT_CONSISTENT) {
2170 			cmd_flags |= CFLAG_CMDIOPB;
2171 			dma_flags |= DDI_DMA_CONSISTENT;
2172 		}
2173 
2174 		/*
2175 		 * bind the handle to the buf
2176 		 */
2177 		ASSERT(cmd->cmd_dmahandle != NULL);
2178 		rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
2179 			dma_flags, callback, arg, &cmd->cmd_dmacookie,
2180 			&dmacookie_count);
2181 
2182 		if (rval && rval != DDI_DMA_PARTIAL_MAP) {
2183 			switch (rval) {
2184 			case DDI_DMA_NORESOURCES:
2185 				bioerror(bp, 0);
2186 				break;
2187 			case DDI_DMA_BADATTR:
2188 			case DDI_DMA_NOMAPPING:
2189 				bioerror(bp, EFAULT);
2190 				break;
2191 			case DDI_DMA_TOOBIG:
2192 			default:
2193 				bioerror(bp, EINVAL);
2194 				break;
2195 			}
2196 			cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
2197 			if (new_cmd) {
2198 				fas_scsi_destroy_pkt(ap, pkt);
2199 			}
2200 			TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2201 				"fas_scsi_impl_dmaget_end");
2202 			return ((struct scsi_pkt *)NULL);
2203 		}
2204 		ASSERT(dmacookie_count == 1);
2205 		cmd->cmd_dmacount = bp->b_bcount;
2206 		cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
2207 
2208 		ASSERT(cmd->cmd_dmahandle != NULL);
2209 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2210 		    "fas_scsi_impl_dmaget_end");
2211 	}
2212 
2213 	return (pkt);
2214 }
2215 
2216 /*
2217  * unbind dma resources and deallocate the pkt
2218  */
2219 static void
2220 fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2221 {
2222 	struct fas_cmd *sp = PKT2CMD(pkt);
2223 	struct fas *fas = ADDR2FAS(ap);
2224 
2225 	/*
2226 	 * fas_scsi_impl_dmafree inline to speed things up
2227 	 */
2228 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2229 	    "fas_scsi_impl_dmafree_start");
2230 
2231 	if (sp->cmd_flags & CFLAG_DMAVALID) {
2232 		/*
2233 		 * Free the mapping.
2234 		 */
2235 		(void) ddi_dma_unbind_handle(sp->cmd_dmahandle);
2236 		sp->cmd_flags ^= CFLAG_DMAVALID;
2237 	}
2238 
2239 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2240 	    "fas_scsi_impl_dmafree_end");
2241 
2242 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_START,
2243 	    "fas_scsi_impl_pktfree_start");
2244 
2245 	if ((sp->cmd_flags &
2246 	    (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
2247 	    CFLAG_SCBEXTERN)) == 0) {
2248 		sp->cmd_flags = CFLAG_FREE;
2249 		kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2250 	} else {
2251 		fas_pkt_destroy_extern(fas, sp);
2252 	}
2253 
2254 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_END,
2255 	    "fas_scsi_impl_pktfree_end");
2256 }
2257 
2258 /*
2259  * allocate and deallocate external pkt space (ie. not part of fas_cmd) for
2260  * non-standard length cdb, pkt_private, status areas
2261  * if allocation fails, then deallocate all external space and the pkt
2262  */
2263 /* ARGSUSED */
2264 static int
2265 fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
2266     int cmdlen, int tgtlen, int statuslen, int kf)
2267 {
2268 	caddr_t cdbp, scbp, tgt;
2269 	int failure = 0;
2270 
2271 	tgt = cdbp = scbp = NULL;
2272 	if (cmdlen > sizeof (sp->cmd_cdb)) {
2273 		if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
2274 			failure++;
2275 		} else {
2276 			sp->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
2277 			sp->cmd_flags |= CFLAG_CDBEXTERN;
2278 		}
2279 	}
2280 	if (tgtlen > PKT_PRIV_LEN) {
2281 		if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
2282 			failure++;
2283 		} else {
2284 			sp->cmd_flags |= CFLAG_PRIVEXTERN;
2285 			sp->cmd_pkt->pkt_private = tgt;
2286 		}
2287 	}
2288 	if (statuslen > EXTCMDS_STATUS_SIZE) {
2289 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
2290 			failure++;
2291 		} else {
2292 			sp->cmd_flags |= CFLAG_SCBEXTERN;
2293 			sp->cmd_pkt->pkt_scbp = (opaque_t)scbp;
2294 		}
2295 	}
2296 	if (failure) {
2297 		fas_pkt_destroy_extern(fas, sp);
2298 	}
2299 	return (failure);
2300 }
2301 
2302 /*
2303  * deallocate external pkt space and deallocate the pkt
2304  */
2305 static void
2306 fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp)
2307 {
2308 	if (sp->cmd_flags & CFLAG_FREE) {
2309 		panic("fas_pkt_destroy_extern: freeing free packet");
2310 		_NOTE(NOT_REACHED)
2311 		/* NOTREACHED */
2312 	}
2313 	if (sp->cmd_flags & CFLAG_CDBEXTERN) {
2314 		kmem_free((caddr_t)sp->cmd_pkt->pkt_cdbp,
2315 		    (size_t)sp->cmd_cdblen);
2316 	}
2317 	if (sp->cmd_flags & CFLAG_SCBEXTERN) {
2318 		kmem_free((caddr_t)sp->cmd_pkt->pkt_scbp,
2319 		    (size_t)sp->cmd_scblen);
2320 	}
2321 	if (sp->cmd_flags & CFLAG_PRIVEXTERN) {
2322 		kmem_free((caddr_t)sp->cmd_pkt->pkt_private,
2323 		    (size_t)sp->cmd_privlen);
2324 	}
2325 	sp->cmd_flags = CFLAG_FREE;
2326 	kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2327 }
2328 
2329 /*
2330  * kmem cache constructor and destructor:
2331  * When constructing, we bzero the cmd and allocate the dma handle
2332  * When destructing, just free the dma handle
2333  */
2334 static int
2335 fas_kmem_cache_constructor(void	*buf, void *cdrarg, int kmflags)
2336 {
2337 	struct fas_cmd *cmd = buf;
2338 	struct fas *fas = cdrarg;
2339 	int  (*callback)(caddr_t) = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP:
2340 				DDI_DMA_DONTWAIT;
2341 
2342 	bzero(buf, EXTCMD_SIZE);
2343 
2344 	/*
2345 	 * allocate a dma handle
2346 	 */
2347 	if ((ddi_dma_alloc_handle(fas->f_dev, fas->f_dma_attr, callback,
2348 	    NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
2349 		return (-1);
2350 	}
2351 	return (0);
2352 }
2353 
2354 /*ARGSUSED*/
2355 static void
2356 fas_kmem_cache_destructor(void *buf, void *cdrarg)
2357 {
2358 	struct fas_cmd *cmd = buf;
2359 	if (cmd->cmd_dmahandle) {
2360 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
2361 	}
2362 }
2363 
2364 /*
2365  * fas_scsi_start - Accept commands for transport
2366  */
2367 static int
2368 fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2369 {
2370 	struct fas_cmd *sp = PKT2CMD(pkt);
2371 	struct fas *fas = ADDR2FAS(ap);
2372 	int rval;
2373 	int intr = 0;
2374 
2375 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_START, "fas_scsi_start_start");
2376 
2377 #ifdef FAS_TEST
2378 	if (fas_transport_busy > 0) {
2379 		fas_transport_busy--;
2380 		return (TRAN_BUSY);
2381 	}
2382 	if ((fas_transport_busy_rqs > 0) &&
2383 	    (*(sp->cmd_pkt->pkt_cdbp) == SCMD_REQUEST_SENSE)) {
2384 		fas_transport_busy_rqs--;
2385 		return (TRAN_BUSY);
2386 	}
2387 	if (fas_transport_reject > 0) {
2388 		fas_transport_reject--;
2389 		return (TRAN_BADPKT);
2390 	}
2391 #endif
2392 	/*
2393 	 * prepare packet before taking the mutex
2394 	 */
2395 	rval = fas_prepare_pkt(fas, sp);
2396 	if (rval != TRAN_ACCEPT) {
2397 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_PREPARE_PKT_END,
2398 		    "fas_scsi_start_end (prepare_pkt)");
2399 		return (rval);
2400 	}
2401 
2402 	/*
2403 	 * fas mutex can be held for a long time; therefore, if the mutex is
2404 	 * held, we queue the packet in a waitQ; we now should check
2405 	 * the waitQ on every mutex_exit(FAS_MUTEX(fas)) but we really only
2406 	 * need to do this when the bus is free
2407 	 * don't put NOINTR cmds including proxy cmds in waitQ! These
2408 	 * cmds are handled by fas_runpoll()
2409 	 * if the waitQ is non-empty, queue the pkt anyway to preserve
2410 	 * order
2411 	 * the goal is to queue in waitQ as much as possible so at
2412 	 * interrupt time, we can move the packets to readyQ or start
2413 	 * a packet immediately. It helps to do this at interrupt
2414 	 * time because we can then field more interrupts
2415 	 */
2416 	if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
2417 
2418 		/*
2419 		 * if the bus is not free, we will get an interrupt shortly
2420 		 * so we don't want to take the fas mutex but queue up
2421 		 * the packet in the waitQ
2422 		 * also, if the waitQ is non-empty or there is an interrupt
2423 		 * pending then queue up the packet in the waitQ and let the
2424 		 * interrupt handler empty the waitQ
2425 		 */
2426 		mutex_enter(&fas->f_waitQ_mutex);
2427 
2428 		if ((fas->f_state != STATE_FREE) ||
2429 		    fas->f_waitf || (intr = INTPENDING(fas))) {
2430 			goto queue_in_waitQ;
2431 		}
2432 
2433 		/*
2434 		 * we didn't queue up in the waitQ, so now try to accept
2435 		 * the packet. if we fail to get the fas mutex, go back to
2436 		 * the waitQ again
2437 		 * do not release the waitQ mutex yet because that
2438 		 * leaves a window where the interrupt handler has
2439 		 * emptied the waitQ but not released the fas mutex yet
2440 		 *
2441 		 * the interrupt handler gets the locks in opposite order
2442 		 * but because we do a tryenter, there is no deadlock
2443 		 *
2444 		 * if another thread has the fas mutex then either this
2445 		 * thread or the other may find the bus free and
2446 		 * empty the waitQ
2447 		 */
2448 		if (mutex_tryenter(FAS_MUTEX(fas))) {
2449 			mutex_exit(&fas->f_waitQ_mutex);
2450 			rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2451 		} else {
2452 			/*
2453 			 * we didn't get the fas mutex so
2454 			 * the packet has to go in the waitQ now
2455 			 */
2456 			goto queue_in_waitQ;
2457 		}
2458 	} else {
2459 		/*
2460 		 * for polled cmds, we have to take the mutex and
2461 		 * start the packet using fas_runpoll()
2462 		 */
2463 		mutex_enter(FAS_MUTEX(fas));
2464 		rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2465 	}
2466 
2467 	/*
2468 	 * if the bus is free then empty waitQ and release the mutex
2469 	 * (this should be unlikely that the bus is still free after
2470 	 * accepting the packet. it may be the relatively unusual case
2471 	 * that we are throttling)
2472 	 */
2473 	if (fas->f_state == STATE_FREE) {
2474 		FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2475 	} else {
2476 		mutex_exit(FAS_MUTEX(fas));
2477 	}
2478 
2479 done:
2480 	TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2481 		"fas_scsi_start_end: fas 0x%p", fas);
2482 	return (rval);
2483 
2484 queue_in_waitQ:
2485 	if (fas->f_waitf == NULL) {
2486 		fas->f_waitb = fas->f_waitf = sp;
2487 		sp->cmd_forw = NULL;
2488 	} else {
2489 		struct fas_cmd *dp = fas->f_waitb;
2490 		dp->cmd_forw = fas->f_waitb = sp;
2491 		sp->cmd_forw = NULL;
2492 	}
2493 
2494 	/*
2495 	 * check again the fas mutex
2496 	 * if there was an interrupt then the interrupt
2497 	 * handler will eventually empty the waitQ
2498 	 */
2499 	if ((intr == 0) && (fas->f_state == STATE_FREE) &&
2500 	    mutex_tryenter(FAS_MUTEX(fas))) {
2501 		/*
2502 		 * double check if the bus is still free
2503 		 * (this actually reduced mutex contention a bit)
2504 		 */
2505 		if (fas->f_state == STATE_FREE) {
2506 			fas_empty_waitQ(fas);
2507 		}
2508 		mutex_exit(FAS_MUTEX(fas));
2509 	}
2510 	mutex_exit(&fas->f_waitQ_mutex);
2511 
2512 	TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2513 		"fas_scsi_start_end: fas 0x%p", fas);
2514 	return (rval);
2515 }
2516 
2517 /*
2518  * prepare the pkt:
2519  * the pkt may have been resubmitted or just reused so
2520  * initialize some fields, reset the dma window, and do some checks
2521  */
2522 static int
2523 fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp)
2524 {
2525 	struct scsi_pkt *pkt = CMD2PKT(sp);
2526 
2527 	/*
2528 	 * Reinitialize some fields that need it; the packet may
2529 	 * have been resubmitted
2530 	 */
2531 	pkt->pkt_reason = CMD_CMPLT;
2532 	pkt->pkt_state	= 0;
2533 	pkt->pkt_statistics = 0;
2534 	pkt->pkt_resid	= 0;
2535 	sp->cmd_age	= 0;
2536 	sp->cmd_pkt_flags = pkt->pkt_flags;
2537 
2538 	/*
2539 	 * Copy the cdb pointer to the pkt wrapper area as we
2540 	 * might modify this pointer. Zero status byte
2541 	 */
2542 	sp->cmd_cdbp = pkt->pkt_cdbp;
2543 	*(pkt->pkt_scbp) = 0;
2544 
2545 	if (sp->cmd_flags & CFLAG_DMAVALID) {
2546 		pkt->pkt_resid	= sp->cmd_dmacount;
2547 
2548 		/*
2549 		 * if the pkt was resubmitted then the
2550 		 * windows may be at the wrong number
2551 		 */
2552 		if (sp->cmd_cur_win) {
2553 			sp->cmd_cur_win = 0;
2554 			if (fas_set_new_window(fas, sp)) {
2555 				IPRINTF("cannot reset window\n");
2556 				return (TRAN_BADPKT);
2557 			}
2558 		}
2559 		sp->cmd_saved_cur_addr =
2560 		    sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
2561 
2562 		/*
2563 		 * the common case is just one window, we worry
2564 		 * about multiple windows when we run out of the
2565 		 * current window
2566 		 */
2567 		sp->cmd_nwin = sp->cmd_saved_win = 0;
2568 		sp->cmd_data_count = sp->cmd_saved_data_count = 0;
2569 
2570 		/*
2571 		 * consistent packets need to be sync'ed first
2572 		 * (only for data going out)
2573 		 */
2574 		if ((sp->cmd_flags & (CFLAG_CMDIOPB | CFLAG_DMASEND)) ==
2575 				(CFLAG_CMDIOPB | CFLAG_DMASEND)) {
2576 			(void) ddi_dma_sync(sp->cmd_dmahandle,	0, (uint_t)0,
2577 			    DDI_DMA_SYNC_FORDEV);
2578 		}
2579 	}
2580 
2581 	sp->cmd_actual_cdblen = sp->cmd_cdblen;
2582 
2583 #ifdef FAS_TEST
2584 #ifndef __lock_lint
2585 	if (fas_test_untagged > 0) {
2586 		if (TAGGED(Tgt(sp))) {
2587 			int slot = sp->cmd_slot;
2588 			sp->cmd_pkt_flags &= ~FLAG_TAGMASK;
2589 			sp->cmd_pkt_flags &= ~FLAG_NODISCON;
2590 			sp->cmd_pkt_flags |= 0x80000000;
2591 			fas_log(fas, CE_NOTE,
2592 			    "starting untagged cmd, target=%d,"
2593 			    " tcmds=%d, sp=0x%p, throttle=%d\n",
2594 			    Tgt(sp), fas->f_tcmds[slot], (void *)sp,
2595 			    fas->f_throttle[slot]);
2596 			fas_test_untagged = -10;
2597 		}
2598 	}
2599 #endif
2600 #endif
2601 
2602 #ifdef FASDEBUG
2603 	if (NOTAG(Tgt(sp)) && (pkt->pkt_flags & FLAG_TAGMASK)) {
2604 		IPRINTF2("tagged packet for non-tagged target %d.%d\n",
2605 		    Tgt(sp), Lun(sp));
2606 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2607 		    "fas_prepare_pkt_end (tran_badpkt)");
2608 		return (TRAN_BADPKT);
2609 	}
2610 
2611 	/*
2612 	 * the SCSA spec states that it is an error to have no
2613 	 * completion function when FLAG_NOINTR is not set
2614 	 */
2615 	if ((pkt->pkt_comp == NULL) &&
2616 	    ((pkt->pkt_flags & FLAG_NOINTR) == 0)) {
2617 		IPRINTF("intr packet with pkt_comp == 0\n");
2618 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2619 		    "fas_prepare_pkt_end (tran_badpkt)");
2620 		return (TRAN_BADPKT);
2621 	}
2622 #endif /* FASDEBUG */
2623 
2624 	if ((fas->f_target_scsi_options[Tgt(sp)] & SCSI_OPTIONS_DR) == 0) {
2625 		/*
2626 		 * no need to reset tag bits since tag queueing will
2627 		 * not be enabled if disconnects are disabled
2628 		 */
2629 		sp->cmd_pkt_flags |= FLAG_NODISCON;
2630 	}
2631 
2632 	sp->cmd_flags = (sp->cmd_flags & ~CFLAG_TRANFLAG) |
2633 		CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
2634 
2635 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_ACCEPT_END,
2636 	    "fas_prepare_pkt_end (tran_accept)");
2637 	return (TRAN_ACCEPT);
2638 }
2639 
2640 /*
2641  * emptying the waitQ just before releasing FAS_MUTEX is a bit
2642  * tricky; if we release the waitQ mutex and then the FAS_MUTEX,
2643  * another thread could queue a cmd in the waitQ, just before
2644  * the FAS_MUTEX is released. This cmd is then stuck in the waitQ unless
2645  * another cmd comes in or fas_intr() or fas_watch() checks the waitQ.
2646  * Therefore, by releasing the FAS_MUTEX before releasing the waitQ mutex,
2647  * we prevent fas_scsi_start() filling the waitQ
2648  *
2649  * By setting NO_TRAN_BUSY, we force fas_accept_pkt() to queue up
2650  * the waitQ pkts in the readyQ.
2651  * If a QFull condition occurs, the target driver may set its throttle
2652  * too high because of the requests queued up in the readyQ but this
2653  * is not a big problem. The throttle should be periodically reset anyway.
2654  */
2655 static void
2656 fas_empty_waitQ(struct fas *fas)
2657 {
2658 	struct fas_cmd *sp;
2659 	int rval;
2660 	struct fas_cmd *waitf, *waitb;
2661 
2662 	ASSERT(mutex_owned(&fas->f_waitQ_mutex));
2663 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_START,
2664 	    "fas_empty_waitQ_start");
2665 
2666 	while (fas->f_waitf) {
2667 
2668 		/* copy waitQ, zero the waitQ and release the mutex */
2669 		waitf = fas->f_waitf;
2670 		waitb = fas->f_waitb;
2671 		fas->f_waitf = fas->f_waitb = NULL;
2672 		mutex_exit(&fas->f_waitQ_mutex);
2673 
2674 		do {
2675 			sp = waitf;
2676 			waitf = sp->cmd_forw;
2677 			if (waitb == sp)	{
2678 				waitb = NULL;
2679 			}
2680 
2681 			rval = fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
2682 
2683 			/*
2684 			 * If the  packet was rejected for other reasons then
2685 			 * complete it here
2686 			 */
2687 			if (rval != TRAN_ACCEPT) {
2688 				ASSERT(rval != TRAN_BUSY);
2689 				fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
2690 				if (sp->cmd_pkt->pkt_comp) {
2691 					sp->cmd_flags |= CFLAG_FINISHED;
2692 					fas_call_pkt_comp(fas, sp);
2693 				}
2694 			}
2695 
2696 			if (INTPENDING(fas)) {
2697 				/*
2698 				 * stop processing the waitQ and put back
2699 				 * the remaining packets on the waitQ
2700 				 */
2701 				mutex_enter(&fas->f_waitQ_mutex);
2702 				if (waitf) {
2703 					ASSERT(waitb != NULL);
2704 					waitb->cmd_forw = fas->f_waitf;
2705 					fas->f_waitf = waitf;
2706 					if (fas->f_waitb == NULL) {
2707 						fas->f_waitb = waitb;
2708 					}
2709 				}
2710 				return;
2711 			}
2712 		} while (waitf);
2713 
2714 		mutex_enter(&fas->f_waitQ_mutex);
2715 	}
2716 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_END,
2717 	    "fas_empty_waitQ_end");
2718 }
2719 
2720 static void
2721 fas_move_waitQ_to_readyQ(struct fas *fas)
2722 {
2723 	/*
2724 	 * this may actually start cmds but it is most likely
2725 	 * that if waitQ is not empty that the bus is not free
2726 	 */
2727 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
2728 	mutex_enter(&fas->f_waitQ_mutex);
2729 	fas_empty_waitQ(fas);
2730 	mutex_exit(&fas->f_waitQ_mutex);
2731 }
2732 
2733 
2734 /*
2735  * function wrapper for two frequently used macros. for the non-critical
2736  * path we use the function
2737  */
2738 static void
2739 fas_check_waitQ_and_mutex_exit(struct fas *fas)
2740 {
2741 	_NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(fas->f_mutex))
2742 	FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2743 	FAS_EMPTY_CALLBACKQ(fas);
2744 }
2745 
2746 /*
2747  * fas_accept_pkt():
2748  * the flag argument is to force fas_accept_pkt to accept the pkt;
2749  * the caller cannot take the pkt back and it has to be queued up in
2750  * the readyQ
2751  */
2752 static int
2753 fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag)
2754 {
2755 	short slot = sp->cmd_slot;
2756 	int rval = TRAN_ACCEPT;
2757 
2758 	TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_START, "fas_accept_pkt_start");
2759 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
2760 	ASSERT(fas->f_ncmds >= 0 && fas->f_ndisc >= 0);
2761 	ASSERT(fas->f_ncmds >= fas->f_ndisc);
2762 	ASSERT(fas->f_tcmds[slot] >= 0);
2763 
2764 	/*
2765 	 * prepare packet for transport if this hasn't been done yet and
2766 	 * do some checks
2767 	 */
2768 	if ((sp->cmd_flags & CFLAG_PREPARED) == 0) {
2769 		rval = fas_prepare_pkt(fas, sp);
2770 		if (rval != TRAN_ACCEPT) {
2771 			IPRINTF1("prepare pkt failed, slot=%x\n", slot);
2772 			sp->cmd_flags &= ~CFLAG_TRANFLAG;
2773 			goto done;
2774 		}
2775 	}
2776 
2777 	if (Lun(sp)) {
2778 		EPRINTF("fas_accept_pkt: switching target and lun slot scan\n");
2779 		fas->f_dslot = 1;
2780 
2781 		if ((fas->f_active[slot] == NULL) ||
2782 		    ((fas->f_active[slot]->f_n_slots != NTAGS) &&
2783 		    TAGGED(Tgt(sp)))) {
2784 			(void) fas_alloc_active_slots(fas, slot, KM_NOSLEEP);
2785 		}
2786 		if ((fas->f_active[slot] == NULL) ||
2787 		    (NOTAG(Tgt(sp)) && (sp->cmd_pkt_flags & FLAG_TAGMASK))) {
2788 			IPRINTF("fatal error on non-zero lun pkt\n");
2789 			return (TRAN_FATAL_ERROR);
2790 		}
2791 	}
2792 
2793 	/*
2794 	 * we accepted the command; increment the count
2795 	 * (we may still reject later if TRAN_BUSY_OK)
2796 	 */
2797 	fas_check_ncmds(fas);
2798 	fas->f_ncmds++;
2799 
2800 	/*
2801 	 * if it is a nointr packet, start it now
2802 	 * (NO_INTR pkts are not queued in the waitQ)
2803 	 */
2804 	if (sp->cmd_pkt_flags & FLAG_NOINTR) {
2805 		EPRINTF("starting a nointr cmd\n");
2806 		fas_runpoll(fas, slot, sp);
2807 		sp->cmd_flags &= ~CFLAG_TRANFLAG;
2808 		goto done;
2809 	}
2810 
2811 	/*
2812 	 * reset the throttle if we were draining
2813 	 */
2814 	if ((fas->f_tcmds[slot] == 0) &&
2815 	    (fas->f_throttle[slot] == DRAIN_THROTTLE)) {
2816 		DPRINTF("reset throttle\n");
2817 		ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
2818 		fas_full_throttle(fas, slot);
2819 	}
2820 
2821 	/*
2822 	 * accept the command:
2823 	 * If no readyQ and no bus free, and throttle is OK,
2824 	 * run cmd immediately.
2825 	 */
2826 #ifdef FASDEBUG
2827 	fas->f_total_cmds++;
2828 #endif
2829 
2830 	if ((fas->f_readyf[slot] == NULL) && (fas->f_state == STATE_FREE) &&
2831 	    (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
2832 		ASSERT(fas->f_current_sp == 0);
2833 		(void) fas_startcmd(fas, sp);
2834 		goto exit;
2835 	} else {
2836 		/*
2837 		 * If FLAG_HEAD is set, run cmd if target and bus are
2838 		 * available. if first cmd in ready Q is request sense
2839 		 * then insert after this command, there shouldn't be more
2840 		 * than one request sense.
2841 		 */
2842 		if (sp->cmd_pkt_flags & FLAG_HEAD) {
2843 			struct fas_cmd *ssp = fas->f_readyf[slot];
2844 			EPRINTF("que head\n");
2845 			if (ssp &&
2846 			    *(ssp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
2847 				fas_head_of_readyQ(fas, sp);
2848 			} else if (ssp) {
2849 				struct fas_cmd *dp = ssp->cmd_forw;
2850 				ssp->cmd_forw = sp;
2851 				sp->cmd_forw = dp;
2852 				if (fas->f_readyb[slot] == ssp) {
2853 					fas->f_readyb[slot] = sp;
2854 				}
2855 			} else {
2856 				fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2857 				sp->cmd_forw = NULL;
2858 			}
2859 
2860 		/*
2861 		 * for tagged targets, check for qfull condition and
2862 		 * return TRAN_BUSY (if permitted), if throttle has been
2863 		 * exceeded
2864 		 */
2865 		} else if (TAGGED(Tgt(sp)) &&
2866 			    (fas->f_tcmds[slot] >= fas->f_throttle[slot]) &&
2867 			    (fas->f_throttle[slot] > HOLD_THROTTLE) &&
2868 			    (flag == TRAN_BUSY_OK)) {
2869 				IPRINTF2(
2870 				    "transport busy, slot=%x, ncmds=%x\n",
2871 				    slot, fas->f_ncmds);
2872 				rval = TRAN_BUSY;
2873 				fas->f_ncmds--;
2874 				sp->cmd_flags &=
2875 				    ~(CFLAG_PREPARED | CFLAG_IN_TRANSPORT);
2876 				goto done;
2877 		/*
2878 		 * append to readyQ or start a new readyQ
2879 		 */
2880 		} else if (fas->f_readyf[slot]) {
2881 			struct fas_cmd *dp = fas->f_readyb[slot];
2882 			ASSERT(dp != 0);
2883 			fas->f_readyb[slot] = sp;
2884 			sp->cmd_forw = NULL;
2885 			dp->cmd_forw = sp;
2886 		} else {
2887 			fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2888 			sp->cmd_forw = NULL;
2889 		}
2890 
2891 	}
2892 
2893 done:
2894 	/*
2895 	 * just in case that the bus is free and we haven't
2896 	 * been able to restart for some reason
2897 	 */
2898 	if (fas->f_state == STATE_FREE) {
2899 		(void) fas_istart(fas);
2900 	}
2901 
2902 exit:
2903 	fas_check_ncmds(fas);
2904 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
2905 	TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_END,	"fas_accept_pkt_end");
2906 	return (rval);
2907 }
2908 
2909 /*
2910  * allocate a tag byte and check for tag aging
2911  */
2912 static char fas_tag_lookup[] =
2913 	{0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
2914 
2915 static int
2916 fas_alloc_tag(struct fas *fas, struct fas_cmd *sp)
2917 {
2918 	struct f_slots *tag_slots;
2919 	int tag;
2920 	short slot = sp->cmd_slot;
2921 
2922 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_START, "fas_alloc_tag_start");
2923 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
2924 
2925 	tag_slots = fas->f_active[slot];
2926 	ASSERT(tag_slots->f_n_slots == NTAGS);
2927 
2928 alloc_tag:
2929 	tag = (fas->f_active[slot]->f_tags)++;
2930 	if (fas->f_active[slot]->f_tags >= NTAGS) {
2931 		/*
2932 		 * we reserve tag 0 for non-tagged cmds
2933 		 */
2934 		fas->f_active[slot]->f_tags = 1;
2935 	}
2936 	EPRINTF1("tagged cmd, tag = %d\n", tag);
2937 
2938 	/* Validate tag, should never fail. */
2939 	if (tag_slots->f_slot[tag] == 0) {
2940 		/*
2941 		 * Store assigned tag and tag queue type.
2942 		 * Note, in case of multiple choice, default to simple queue.
2943 		 */
2944 		ASSERT(tag < NTAGS);
2945 		sp->cmd_tag[1] = (uchar_t)tag;
2946 		sp->cmd_tag[0] = fas_tag_lookup[((sp->cmd_pkt_flags &
2947 			FLAG_TAGMASK) >> 12)];
2948 		EPRINTF1("tag= %d\n", tag);
2949 		tag_slots->f_slot[tag] = sp;
2950 		(fas->f_tcmds[slot])++;
2951 		ASSERT(mutex_owned(FAS_MUTEX(fas)));
2952 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
2953 		    "fas_alloc_tag_end");
2954 		return (0);
2955 
2956 	} else {
2957 		int age, i;
2958 
2959 		/*
2960 		 * Check tag age.  If timeouts enabled and
2961 		 * tag age greater than 1, print warning msg.
2962 		 * If timeouts enabled and tag age greater than
2963 		 * age limit, begin draining tag que to check for
2964 		 * lost tag cmd.
2965 		 */
2966 		age = tag_slots->f_slot[tag]->cmd_age++;
2967 		if (age >= fas->f_scsi_tag_age_limit &&
2968 		    tag_slots->f_slot[tag]->cmd_pkt->pkt_time) {
2969 			IPRINTF2("tag %d in use, age= %d\n", tag, age);
2970 			DPRINTF("draining tag queue\n");
2971 			if (fas->f_reset_delay[Tgt(sp)] == 0) {
2972 				fas->f_throttle[slot] = DRAIN_THROTTLE;
2973 			}
2974 		}
2975 
2976 		/* If tag in use, scan until a free one is found. */
2977 		for (i = 1; i < NTAGS; i++) {
2978 			tag = fas->f_active[slot]->f_tags;
2979 			if (!tag_slots->f_slot[tag]) {
2980 				EPRINTF1("found free tag %d\n", tag);
2981 				break;
2982 			}
2983 			if (++(fas->f_active[slot]->f_tags) >= NTAGS) {
2984 			/*
2985 			 * we reserve tag 0 for non-tagged cmds
2986 			 */
2987 				fas->f_active[slot]->f_tags = 1;
2988 			}
2989 			EPRINTF1("found in use tag %d\n", tag);
2990 		}
2991 
2992 		/*
2993 		 * If no free tags, we're in serious trouble.
2994 		 * the target driver submitted more than 255
2995 		 * requests
2996 		 */
2997 		if (tag_slots->f_slot[tag]) {
2998 			IPRINTF1("slot %x: All tags in use!!!\n", slot);
2999 			goto fail;
3000 		}
3001 		goto alloc_tag;
3002 	}
3003 
3004 fail:
3005 	fas_head_of_readyQ(fas, sp);
3006 
3007 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
3008 	    "fas_alloc_tag_end");
3009 	return (-1);
3010 }
3011 
3012 /*
3013  * Internal Search Routine.
3014  *
3015  * Search for a command to start.
3016  */
3017 static int
3018 fas_istart(struct fas *fas)
3019 {
3020 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_START,
3021 	    "fas_istart_start");
3022 	EPRINTF("fas_istart:\n");
3023 
3024 	if (fas->f_state == STATE_FREE && fas->f_ncmds > fas->f_ndisc) {
3025 		(void) fas_ustart(fas);
3026 	}
3027 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_END,
3028 	    "fas_istart_end");
3029 	return (ACTION_RETURN);
3030 }
3031 
3032 static int
3033 fas_ustart(struct fas *fas)
3034 {
3035 	struct fas_cmd *sp;
3036 	short slot = fas->f_next_slot;
3037 	short start_slot = slot;
3038 	short dslot = fas->f_dslot;
3039 
3040 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_START, "fas_ustart_start");
3041 	EPRINTF1("fas_ustart: start_slot=%x\n", fas->f_next_slot);
3042 	ASSERT(fas->f_current_sp == NULL);
3043 	ASSERT(dslot != 0);
3044 	if (dslot == NLUNS_PER_TARGET) {
3045 		ASSERT((slot % NLUNS_PER_TARGET) == 0);
3046 	}
3047 
3048 	/*
3049 	 * if readyQ not empty and we are not draining, then we
3050 	 * can start another cmd
3051 	 */
3052 	do {
3053 		/*
3054 		 * If all cmds drained from tag Q, back to full throttle and
3055 		 * start queueing up new cmds again.
3056 		 */
3057 		if (fas->f_throttle[slot] == DRAIN_THROTTLE &&
3058 		    fas->f_tcmds[slot] == 0) {
3059 			fas_full_throttle(fas, slot);
3060 		}
3061 
3062 		if (fas->f_readyf[slot] &&
3063 		    (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
3064 			sp = fas->f_readyf[slot];
3065 			fas->f_readyf[slot] = sp->cmd_forw;
3066 			if (sp->cmd_forw == NULL) {
3067 				fas->f_readyb[slot] = NULL;
3068 			}
3069 			fas->f_next_slot = NEXTSLOT(slot, dslot);
3070 			ASSERT((sp->cmd_pkt_flags & FLAG_NOINTR) == 0);
3071 			TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_END,
3072 			    "fas_ustart_end");
3073 			return (fas_startcmd(fas, sp));
3074 		} else {
3075 			slot = NEXTSLOT(slot, dslot);
3076 		}
3077 	} while (slot != start_slot);
3078 
3079 	EPRINTF("fas_ustart: no cmds to start\n");
3080 	fas->f_next_slot = NEXTSLOT(slot, dslot);
3081 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_NOT_FOUND_END,
3082 	    "fas_ustart_end (not_found)");
3083 	return (FALSE);
3084 }
3085 
3086 /*
3087  * Start a command off
3088  */
3089 static int
3090 fas_startcmd(struct fas *fas, struct fas_cmd *sp)
3091 {
3092 	volatile struct fasreg *fasreg = fas->f_reg;
3093 	ushort_t  nstate;
3094 	uchar_t cmd, target, lun;
3095 	ushort_t tshift;
3096 	volatile uchar_t *tp = fas->f_cmdarea;
3097 	struct scsi_pkt *pkt = CMD2PKT(sp);
3098 	int slot = sp->cmd_slot;
3099 	struct f_slots *slots = fas->f_active[slot];
3100 	int i, cdb_len;
3101 
3102 #define	LOAD_CMDP	*(tp++)
3103 
3104 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_START, "fas_startcmd_start");
3105 
3106 	EPRINTF2("fas_startcmd: sp=0x%p flags=%x\n",
3107 	    (void *)sp, sp->cmd_pkt_flags);
3108 	ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3109 	ASSERT((sp->cmd_flags & CFLAG_COMPLETED) == 0);
3110 	ASSERT(fas->f_current_sp == NULL && fas->f_state == STATE_FREE);
3111 	if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3112 		ASSERT(fas->f_throttle[slot] > 0);
3113 		ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
3114 	}
3115 
3116 	target		= Tgt(sp);
3117 	lun		= Lun(sp);
3118 
3119 	/*
3120 	 * if a non-tagged cmd is submitted to an active tagged target
3121 	 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
3122 	 * to be untagged
3123 	 */
3124 	if (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
3125 	    TAGGED(target) && fas->f_tcmds[slot] &&
3126 	    ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) &&
3127 	    (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
3128 		if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3129 			struct fas_cmd *dp;
3130 
3131 			IPRINTF("untagged cmd, start draining\n");
3132 
3133 			if (fas->f_reset_delay[Tgt(sp)] == 0) {
3134 				fas->f_throttle[slot] = DRAIN_THROTTLE;
3135 			}
3136 			dp = fas->f_readyf[slot];
3137 			fas->f_readyf[slot] = sp;
3138 			sp->cmd_forw = dp;
3139 			if (fas->f_readyb[slot] == NULL) {
3140 				fas->f_readyb[slot] = sp;
3141 			}
3142 		}
3143 		return (FALSE);
3144 	}
3145 
3146 	/*
3147 	 * allocate a tag; if no tag available then put request back
3148 	 * on the ready queue and return; eventually a cmd returns and we
3149 	 * get going again or we timeout
3150 	 */
3151 	if (TAGGED(target) && (sp->cmd_pkt_flags & FLAG_TAGMASK)) {
3152 		if (fas_alloc_tag(fas, sp)) {
3153 			return (FALSE);
3154 		}
3155 	} else {
3156 		/*
3157 		 * tag slot 0 is reserved for non-tagged cmds
3158 		 * and should be empty because we have drained
3159 		 */
3160 		if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3161 			ASSERT(fas->f_active[slot]->f_slot[0] == NULL);
3162 			fas->f_active[slot]->f_slot[0] = sp;
3163 			sp->cmd_tag[1] = 0;
3164 			if (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
3165 				ASSERT(fas->f_tcmds[slot] == 0);
3166 				/*
3167 				 * don't start any other cmd until this
3168 				 * one is finished. The throttle is reset
3169 				 * later in fas_watch()
3170 				 */
3171 				fas->f_throttle[slot] = 1;
3172 			}
3173 			(fas->f_tcmds[slot])++;
3174 
3175 		}
3176 	}
3177 
3178 	fas->f_current_sp = sp;
3179 	fas->f_omsglen	= 0;
3180 	tshift		= 1<<target;
3181 	fas->f_sdtr_sent = fas->f_wdtr_sent =	0;
3182 	cdb_len 	= sp->cmd_actual_cdblen;
3183 
3184 	if (sp->cmd_pkt_flags & FLAG_RENEGOTIATE_WIDE_SYNC) {
3185 		fas_force_renegotiation(fas, Tgt(sp));
3186 	}
3187 
3188 	/*
3189 	 * first send identify message, with or without disconnect priv.
3190 	 */
3191 	if (sp->cmd_pkt_flags & FLAG_NODISCON) {
3192 		LOAD_CMDP = fas->f_last_msgout = MSG_IDENTIFY | lun;
3193 		ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3194 	} else {
3195 		LOAD_CMDP = fas->f_last_msgout = MSG_DR_IDENTIFY | lun;
3196 	}
3197 
3198 	/*
3199 	 * normal case, tagQ and we have negotiated wide and sync
3200 	 * or we don't need to renegotiate because wide and sync
3201 	 * have been disabled
3202 	 * (proxy msg's don't have tag flag set)
3203 	 */
3204 	if ((sp->cmd_pkt_flags & FLAG_TAGMASK) &&
3205 	    ((fas->f_wide_known | fas->f_nowide) &
3206 	    (fas->f_sync_known | fas->f_nosync) & tshift)) {
3207 
3208 		EPRINTF("tag cmd\n");
3209 		ASSERT((sp->cmd_pkt_flags & FLAG_NODISCON) == 0);
3210 
3211 		fas->f_last_msgout = LOAD_CMDP = sp->cmd_tag[0];
3212 		LOAD_CMDP = sp->cmd_tag[1];
3213 
3214 		nstate = STATE_SELECT_NORMAL;
3215 		cmd = CMD_SEL_ATN3 | CMD_DMA;
3216 
3217 	/*
3218 	 * is this a proxy message
3219 	 */
3220 	} else if (sp->cmd_flags & CFLAG_CMDPROXY) {
3221 
3222 		IPRINTF2("proxy cmd, len=%x, msg=%x\n",
3223 		    sp->cmd_cdb[FAS_PROXY_DATA],
3224 		    sp->cmd_cdb[FAS_PROXY_DATA+1]);
3225 		/*
3226 		 * This is a proxy command. It will have
3227 		 * a message to send as part of post-selection
3228 		 * (e.g, MSG_ABORT or MSG_DEVICE_RESET)
3229 		 */
3230 		fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
3231 		for (i = 0; i < (uint_t)fas->f_omsglen; i++) {
3232 			fas->f_cur_msgout[i] =
3233 			    sp->cmd_cdb[FAS_PROXY_DATA+1+i];
3234 		}
3235 		sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
3236 		cdb_len = 0;
3237 		cmd = CMD_SEL_STOP | CMD_DMA;
3238 		nstate = STATE_SELECT_N_SENDMSG;
3239 
3240 	/*
3241 	 * always negotiate wide first and sync after wide
3242 	 */
3243 	} else if (((fas->f_wide_known | fas->f_nowide) & tshift) == 0) {
3244 		int i = 0;
3245 
3246 		/* First the tag message bytes */
3247 		if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3248 			fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3249 			fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3250 		}
3251 
3252 		/*
3253 		 * Set up to send wide negotiating message.  This is getting
3254 		 * a bit tricky as we dma out the identify message and
3255 		 * send the other messages via the fifo buffer.
3256 		 */
3257 		EPRINTF1("cmd with wdtr msg, tag=%x\n", sp->cmd_tag[1]);
3258 
3259 		fas_make_wdtr(fas, i, target, FAS_XFER_WIDTH);
3260 
3261 		cdb_len = 0;
3262 		nstate = STATE_SELECT_N_SENDMSG;
3263 		cmd = CMD_SEL_STOP | CMD_DMA;
3264 
3265 	/*
3266 	 * negotiate sync xfer rate
3267 	 */
3268 	} else if (((fas->f_sync_known | fas->f_nosync) & tshift) == 0) {
3269 		int i = 0;
3270 		/*
3271 		 * Set up to send sync negotiating message.  This is getting
3272 		 * a bit tricky as we dma out the identify message and
3273 		 * send the other messages via the fifo buffer.
3274 		 */
3275 		if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3276 			fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3277 			fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3278 		}
3279 
3280 		fas_make_sdtr(fas, i, target);
3281 
3282 		cdb_len = 0;
3283 		cmd = CMD_SEL_STOP | CMD_DMA;
3284 		nstate = STATE_SELECT_N_SENDMSG;
3285 
3286 	/*
3287 	 * normal cmds, no negotiations and not a proxy and no TQ
3288 	 */
3289 	} else {
3290 
3291 		ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3292 		EPRINTF("std. cmd\n");
3293 
3294 		nstate = STATE_SELECT_NORMAL;
3295 		cmd = CMD_SEL_ATN | CMD_DMA;
3296 	}
3297 
3298 	/*
3299 	 * Now load cdb (if any)
3300 	 */
3301 	for (i = 0; i < cdb_len; i++) {
3302 		LOAD_CMDP = sp->cmd_cdbp[i];
3303 	}
3304 
3305 	/*
3306 	 * calculate total dma amount:
3307 	 */
3308 	fas->f_lastcount = (uintptr_t)tp - (uintptr_t)fas->f_cmdarea;
3309 
3310 	/*
3311 	 * load target id and enable bus id encoding and 32 bit counter
3312 	 */
3313 	fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
3314 	    (target & 0xf) | FAS_BUSID_ENCODID | FAS_BUSID_32BIT_COUNTER);
3315 
3316 	FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
3317 
3318 	fas_reg_cmd_write(fas, CMD_FLUSH);
3319 
3320 	FAS_DMA_READ(fas, fas->f_lastcount,
3321 	    fas->f_dmacookie.dmac_address, 16, cmd);
3322 
3323 	New_state(fas, (int)nstate);
3324 
3325 #ifdef FASDEBUG
3326 	if (DDEBUGGING) {
3327 		fas_dump_cmd(fas, sp);
3328 	}
3329 #endif /* FASDEBUG */
3330 
3331 	/*
3332 	 * if timeout == 0, then it has no effect on the timeout
3333 	 * handling; we deal with this when an actual timeout occurs.
3334 	 */
3335 	if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3336 		ASSERT(fas->f_tcmds[slot] >= 1);
3337 	}
3338 	i = pkt->pkt_time - slots->f_timebase;
3339 
3340 	if (i == 0) {
3341 		EPRINTF("dup timeout\n");
3342 		(slots->f_dups)++;
3343 		slots->f_timeout = slots->f_timebase;
3344 	} else if (i > 0) {
3345 		EPRINTF("new timeout\n");
3346 		slots->f_timeout = slots->f_timebase = pkt->pkt_time;
3347 		slots->f_dups = 1;
3348 	}
3349 
3350 	fas_check_ncmds(fas);
3351 
3352 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_END, "fas_startcmd_end");
3353 
3354 	return (TRUE);
3355 }
3356 
3357 /*
3358  * Interrupt Entry Point.
3359  * Poll interrupts until they go away
3360  */
3361 static uint_t
3362 fas_intr(caddr_t arg)
3363 {
3364 	struct fas *fas = (struct fas *)arg;
3365 	int rval = DDI_INTR_UNCLAIMED;
3366 	int kstat_updated = 0;
3367 
3368 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_START, "fas_intr_start");
3369 
3370 	do {
3371 		mutex_enter(FAS_MUTEX(fas));
3372 
3373 		do {
3374 			if (fas_intr_svc(fas)) {
3375 				/*
3376 				 * do not return immediately here because
3377 				 * we have to guarantee to always empty
3378 				 * the waitQ and callbackQ in the interrupt
3379 				 * handler
3380 				 */
3381 				if (fas->f_polled_intr) {
3382 					rval = DDI_INTR_CLAIMED;
3383 					fas->f_polled_intr = 0;
3384 				}
3385 			} else {
3386 				rval = DDI_INTR_CLAIMED;
3387 			}
3388 		} while (INTPENDING(fas));
3389 
3390 		if (!kstat_updated && fas->f_intr_kstat &&
3391 					rval == DDI_INTR_CLAIMED) {
3392 			FAS_KSTAT_INTR(fas);
3393 			kstat_updated++;
3394 		}
3395 
3396 		/*
3397 		 * check and empty the waitQ and the callbackQ
3398 		 */
3399 		FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
3400 		FAS_EMPTY_CALLBACKQ(fas);
3401 
3402 	} while (INTPENDING(fas));
3403 
3404 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_END, "fas_intr_end");
3405 
3406 	return (rval);
3407 }
3408 
3409 /*
3410  * General interrupt service routine.
3411  */
3412 static char *dma_bits	= DMA_BITS;
3413 
3414 static int
3415 fas_intr_svc(struct fas *fas)
3416 {
3417 	static int (*evec[])(struct fas *fas) = {
3418 		fas_finish_select,
3419 		fas_reconnect,
3420 		fas_phasemanage,
3421 		fas_finish,
3422 		fas_reset_recovery,
3423 		fas_istart,
3424 		fas_abort_curcmd,
3425 		fas_reset_bus,
3426 		fas_reset_bus,
3427 		fas_handle_selection
3428 	};
3429 	int action;
3430 	uchar_t intr, stat;
3431 	volatile struct fasreg *fasreg = fas->f_reg;
3432 	int i = 0;
3433 
3434 	TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_START, "fas_intr_svc_start");
3435 
3436 	/*
3437 	 * A read of FAS interrupt register clears interrupt,
3438 	 * so any other volatile information needs to be latched
3439 	 * up prior to reading the interrupt register.
3440 	 */
3441 	fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
3442 
3443 	EPRINTF2("fas_intr_svc: state=%x stat=%x\n", fas->f_state,
3444 		fas->f_stat);
3445 
3446 	/*
3447 	 * this wasn't our interrupt?
3448 	 */
3449 	if ((fas->f_stat & FAS_STAT_IPEND) == 0) {
3450 		if (fas_check_dma_error(fas)) {
3451 			action = ACTION_RESET;
3452 			goto start_action;
3453 		}
3454 		return (-1);
3455 	}
3456 
3457 	/*
3458 	 * if we are reset state, handle this first
3459 	 */
3460 	if (fas->f_state == ACTS_RESET) {
3461 		action = ACTION_FINRST;
3462 		goto start_action;
3463 	}
3464 
3465 	/*
3466 	 * check for gross error.  fas366 hardware seems to register
3467 	 * the gross error bit when a parity error is found.  Make sure
3468 	 * to ignore the gross error bit when a parity error is detected.
3469 	 */
3470 	if ((fas->f_stat & FAS_STAT_GERR) &&
3471 	    (fas->f_stat & FAS_STAT_PERR) == 0) {
3472 		action = fas_handle_gross_err(fas);
3473 		goto start_action;
3474 	}
3475 
3476 	/*
3477 	 * now it is finally safe to read the interrupt register
3478 	 * if we haven't done so yet
3479 	 * Note: we don't read step register here but only in
3480 	 * fas_finish_select(). It is not entirely safe but saves
3481 	 * redundant PIOs or extra code in this critical path
3482 	 */
3483 	fas->f_intr =
3484 		intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
3485 
3486 	/*
3487 	 * read the fifo if there is something there or still in the
3488 	 * input shuttle
3489 	 */
3490 	stat = fas->f_stat & FAS_PHASE_MASK;
3491 
3492 	if ((intr & FAS_INT_RESEL) ||
3493 	    ((stat != FAS_PHASE_DATA_IN) && (stat != FAS_PHASE_DATA_OUT) &&
3494 	    ((fas->f_state & STATE_SELECTING) == 0) &&
3495 	    (fas->f_state != ACTS_DATA_DONE) &&
3496 	    (fas->f_state != ACTS_C_CMPLT))) {
3497 
3498 		fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
3499 
3500 		if (((fas->f_stat2 & FAS_STAT2_EMPTY) == 0) ||
3501 			(fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
3502 			fas_read_fifo(fas);
3503 		}
3504 	}
3505 
3506 	EPRINTF2("fas_intr_svc: intr=%x, stat=%x\n", fas->f_intr, fas->f_stat);
3507 	EPRINTF2("dmacsr=%b\n", fas->f_dma->dma_csr, dma_bits);
3508 
3509 	/*
3510 	 * Based upon the current state of the host adapter driver
3511 	 * we should be able to figure out what to do with an interrupt.
3512 	 *
3513 	 * The FAS asserts an interrupt with one or more of 8 possible
3514 	 * bits set in its interrupt register. These conditions are
3515 	 * SCSI bus reset detected, an illegal command fed to the FAS,
3516 	 * one of DISCONNECT, BUS SERVICE, FUNCTION COMPLETE conditions
3517 	 * for the FAS, a Reselection interrupt, or one of Selection
3518 	 * or Selection with Attention.
3519 	 *
3520 	 * Of these possible interrupts, we can deal with some right
3521 	 * here and now, irrespective of the current state of the driver.
3522 	 *
3523 	 * take care of the most likely interrupts first and call the action
3524 	 * immediately
3525 	 */
3526 	if ((intr & (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN|
3527 	    FAS_INT_RESEL)) == 0) {
3528 		/*
3529 		 * The rest of the reasons for an interrupt can
3530 		 * be handled based purely on the state that the driver
3531 		 * is currently in now.
3532 		 */
3533 		if (fas->f_state & STATE_SELECTING) {
3534 			action = fas_finish_select(fas);
3535 
3536 		} else if (fas->f_state & STATE_ITPHASES) {
3537 			action = fas_phasemanage(fas);
3538 
3539 		} else {
3540 			fas_log(fas, CE_WARN, "spurious interrupt");
3541 			action = ACTION_RETURN;
3542 		}
3543 
3544 	} else if ((intr & FAS_INT_RESEL) && ((intr &
3545 	    (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN)) == 0)) {
3546 
3547 		if ((fas->f_state & STATE_SELECTING) == 0) {
3548 			ASSERT(fas->f_state == STATE_FREE);
3549 			action = fas_reconnect(fas);
3550 		} else {
3551 			action = fas_reselect_preempt(fas);
3552 		}
3553 
3554 	} else if (intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
3555 		action = fas_illegal_cmd_or_bus_reset(fas);
3556 
3557 	} else if (intr & (FAS_INT_SEL|FAS_INT_SELATN)) {
3558 		action = ACTION_SELECT;
3559 	}
3560 
3561 start_action:
3562 	while (action != ACTION_RETURN) {
3563 		ASSERT((action >= 0) && (action <= ACTION_SELECT));
3564 		TRACE_3(TR_FAC_SCSI_FAS, TR_FASSVC_ACTION_CALL,
3565 			"fas_intr_svc call: fas 0x%p, action %d (%d)",
3566 			fas, action, i);
3567 		i++;
3568 		action = (*evec[action])(fas);
3569 	}
3570 exit:
3571 	TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_END, "fas_intr_svc_end");
3572 
3573 	return (0);
3574 }
3575 
3576 /*
3577  * Manage phase transitions.
3578  */
3579 static int
3580 fas_phasemanage(struct fas *fas)
3581 {
3582 	ushort_t state;
3583 	int action;
3584 	static int (*pvecs[])(struct fas *fas) = {
3585 		fas_handle_cmd_start,
3586 		fas_handle_cmd_done,
3587 		fas_handle_msg_out_start,
3588 		fas_handle_msg_out_done,
3589 		fas_handle_msg_in_start,
3590 		fas_handle_more_msgin,
3591 		fas_handle_msg_in_done,
3592 		fas_handle_clearing,
3593 		fas_handle_data_start,
3594 		fas_handle_data_done,
3595 		fas_handle_c_cmplt,
3596 		fas_reconnect,
3597 		fas_handle_unknown,
3598 		fas_reset_recovery
3599 	};
3600 	int i = 0;
3601 
3602 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_START,
3603 		"fas_phasemanage_start");
3604 
3605 	do {
3606 		EPRINTF1("fas_phasemanage: %s\n",
3607 		    fas_state_name(fas->f_state & STATE_ITPHASES));
3608 
3609 		TRACE_2(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_CALL,
3610 			"fas_phasemanage_call: fas 0x%p (%d)", fas, i++);
3611 
3612 		state = fas->f_state;
3613 
3614 		if (!(state == STATE_FREE || state > ACTS_ENDVEC)) {
3615 			ASSERT(pvecs[state-1] != NULL);
3616 			action = (*pvecs[state-1]) (fas);
3617 		} else {
3618 			fas_log(fas, CE_WARN, "lost state in phasemanage");
3619 			action = ACTION_ABORT_ALLCMDS;
3620 		}
3621 
3622 	} while (action == ACTION_PHASEMANAGE);
3623 
3624 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_END,
3625 		"fas_phasemanage_end");
3626 	return (action);
3627 }
3628 
3629 /*
3630  * remove a cmd from active list and if timeout flag is set, then
3631  * adjust timeouts; if a the same cmd will be resubmitted soon, don't
3632  * bother to adjust timeouts (ie. don't set this flag)
3633  */
3634 static void
3635 fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int new_timeout_flag)
3636 {
3637 	int tag = sp->cmd_tag[1];
3638 	int slot = sp->cmd_slot;
3639 	struct f_slots *tag_slots = fas->f_active[slot];
3640 
3641 	ASSERT(sp != NULL);
3642 	EPRINTF4("remove tag %d slot %d for target %d.%d\n",
3643 	    tag, slot, Tgt(sp), Lun(sp));
3644 
3645 	if (sp == tag_slots->f_slot[tag]) {
3646 		tag_slots->f_slot[tag] = NULL;
3647 		fas->f_tcmds[slot]--;
3648 	}
3649 	if (fas->f_current_sp == sp) {
3650 		fas->f_current_sp = NULL;
3651 	}
3652 
3653 	ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
3654 
3655 	if (new_timeout_flag != NEW_TIMEOUT) {
3656 		return;
3657 	}
3658 
3659 	/*
3660 	 * Figure out what to set tag Q timeout for...
3661 	 *
3662 	 * Optimize: If we have duplicate's of same timeout
3663 	 * we're using, then we'll use it again until we run
3664 	 * out of duplicates.  This should be the normal case
3665 	 * for block and raw I/O.
3666 	 * If no duplicates, we have to scan through tag que and
3667 	 * find the longest timeout value and use it.  This is
3668 	 * going to take a while...
3669 	 */
3670 	if (sp->cmd_pkt->pkt_time == tag_slots->f_timebase) {
3671 		if (--(tag_slots->f_dups) <= 0) {
3672 			if (fas->f_tcmds[slot]) {
3673 				struct fas_cmd *ssp;
3674 				uint_t n = 0;
3675 				ushort_t t = tag_slots->f_n_slots;
3676 				ushort_t i;
3677 				/*
3678 				 * This crude check assumes we don't do
3679 				 * this too often which seems reasonable
3680 				 * for block and raw I/O.
3681 				 */
3682 				for (i = 0; i < t; i++) {
3683 					ssp = tag_slots->f_slot[i];
3684 					if (ssp &&
3685 					    (ssp->cmd_pkt->pkt_time > n)) {
3686 						n = ssp->cmd_pkt->pkt_time;
3687 						tag_slots->f_dups = 1;
3688 					} else if (ssp &&
3689 					    (ssp->cmd_pkt->pkt_time == n)) {
3690 						tag_slots->f_dups++;
3691 					}
3692 				}
3693 				tag_slots->f_timebase = n;
3694 				EPRINTF1("searching, new_timeout= %d\n", n);
3695 			} else {
3696 				tag_slots->f_dups = 0;
3697 				tag_slots->f_timebase = 0;
3698 			}
3699 		}
3700 	}
3701 	tag_slots->f_timeout = tag_slots->f_timebase;
3702 
3703 	ASSERT(fas->f_ncmds >= fas->f_ndisc);
3704 }
3705 
3706 /*
3707  * decrement f_ncmds and f_ndisc for this cmd before completing
3708  */
3709 static void
3710 fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp)
3711 {
3712 	ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3713 	if ((sp->cmd_flags & CFLAG_FINISHED) == 0) {
3714 		fas->f_ncmds--;
3715 		if (sp->cmd_flags & CFLAG_CMDDISC) {
3716 			fas->f_ndisc--;
3717 		}
3718 		sp->cmd_flags |= CFLAG_FINISHED;
3719 		sp->cmd_flags &= ~CFLAG_CMDDISC;
3720 	}
3721 	ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
3722 	ASSERT(fas->f_ncmds >= fas->f_ndisc);
3723 }
3724 
3725 /*
3726  * Most commonly called phase handlers:
3727  *
3728  * Finish routines
3729  */
3730 static int
3731 fas_finish(struct fas *fas)
3732 {
3733 	struct fas_cmd *sp = fas->f_current_sp;
3734 	struct scsi_pkt *pkt = CMD2PKT(sp);
3735 	int action = ACTION_SEARCH;
3736 	struct scsi_status *status =
3737 	    (struct  scsi_status *)sp->cmd_pkt->pkt_scbp;
3738 
3739 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_START,
3740 	    "fas_finish_start");
3741 	EPRINTF("fas_finish\n");
3742 
3743 #ifdef FAS_TEST
3744 	if (fas_test_stop && (sp->cmd_pkt_flags & 0x80000000)) {
3745 		debug_enter("untagged cmd completed");
3746 	}
3747 #endif
3748 
3749 	/*
3750 	 * immediately enable reselects
3751 	 */
3752 	fas_reg_cmd_write(fas, CMD_EN_RESEL);
3753 	if (status->sts_chk) {
3754 		/*
3755 		 * In the case that we are getting a check condition
3756 		 * clear our knowledge of synchronous capabilities.
3757 		 * This will unambiguously force a renegotiation
3758 		 * prior to any possible data transfer (we hope),
3759 		 * including the data transfer for a UNIT ATTENTION
3760 		 * condition generated by somebody powering on and
3761 		 * off a target.
3762 		 */
3763 		fas_force_renegotiation(fas, Tgt(sp));
3764 	}
3765 
3766 	/*
3767 	 * backoff sync/wide if there were parity errors
3768 	 */
3769 	if (sp->cmd_pkt->pkt_statistics & STAT_PERR) {
3770 		fas_sync_wide_backoff(fas, sp, sp->cmd_slot);
3771 #ifdef FAS_TEST
3772 		if (fas_test_stop) {
3773 			debug_enter("parity error");
3774 		}
3775 #endif
3776 	}
3777 
3778 	/*
3779 	 * Free from active list and update counts
3780 	 * We need to clean up this cmd now, just in case fas_ustart()
3781 	 * hits a reset or other fatal transport error
3782 	 */
3783 	fas_check_ncmds(fas);
3784 	fas_remove_cmd(fas, sp, NEW_TIMEOUT);
3785 	fas_decrement_ncmds(fas, sp);
3786 	fas_check_ncmds(fas);
3787 
3788 	/*
3789 	 * go to state free and try to start a new cmd now
3790 	 */
3791 	New_state(fas, STATE_FREE);
3792 
3793 	if ((fas->f_ncmds > fas->f_ndisc) && (*((char *)status) == 0) &&
3794 	    (INTPENDING(fas) == 0)) {
3795 		if (fas_ustart(fas)) {
3796 			action = ACTION_RETURN;
3797 		}
3798 	}
3799 
3800 	/*
3801 	 * if there was a data xfer then calculate residue and
3802 	 * sync data for consistent memory xfers
3803 	 */
3804 	if (pkt->pkt_state & STATE_XFERRED_DATA) {
3805 		pkt->pkt_resid = sp->cmd_dmacount - sp->cmd_data_count;
3806 		if (sp->cmd_flags & CFLAG_CMDIOPB) {
3807 			(void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
3808 			    DDI_DMA_SYNC_FORCPU);
3809 		}
3810 		if (pkt->pkt_resid) {
3811 			IPRINTF3("%d.%d finishes with %ld resid\n",
3812 			    Tgt(sp), Lun(sp), pkt->pkt_resid);
3813 		}
3814 	}
3815 
3816 	if (sp->cmd_pkt_flags & FLAG_NOINTR) {
3817 		fas_call_pkt_comp(fas, sp);
3818 		action = ACTION_RETURN;
3819 	} else {
3820 		/*
3821 		 * start an autorequest sense if there was a check condition.
3822 		 * if arq has not been enabled, fas_handle_sts_chk will do
3823 		 * do the callback
3824 		 */
3825 		if (status->sts_chk) {
3826 			if (fas_handle_sts_chk(fas, sp)) {
3827 				/*
3828 				 * we can't start an arq because one is
3829 				 * already in progress. the target is
3830 				 * probably confused
3831 				 */
3832 				action = ACTION_ABORT_CURCMD;
3833 			}
3834 		} else if ((*((char *)status) & STATUS_MASK) ==
3835 		    STATUS_QFULL) {
3836 			fas_handle_qfull(fas, sp);
3837 		} else {
3838 #ifdef FAS_TEST
3839 			if (fas_arqs_failure && (status->sts_chk == 0)) {
3840 				struct scsi_arq_status *arqstat;
3841 				status->sts_chk = 1;
3842 				arqstat = (struct scsi_arq_status *)
3843 					(sp->cmd_pkt->pkt_scbp);
3844 				arqstat->sts_rqpkt_reason = CMD_TRAN_ERR;
3845 				sp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
3846 				fas_arqs_failure = 0;
3847 			}
3848 			if (fas_tran_err) {
3849 				sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
3850 				fas_tran_err = 0;
3851 			}
3852 #endif
3853 			fas_call_pkt_comp(fas, sp);
3854 		}
3855 	}
3856 
3857 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_END, "fas_finish_end");
3858 	return (action);
3859 }
3860 
3861 /*
3862  * Complete the process of selecting a target
3863  */
3864 static int
3865 fas_finish_select(struct fas *fas)
3866 {
3867 	volatile struct dma *dmar = fas->f_dma;
3868 	struct fas_cmd *sp = fas->f_current_sp;
3869 	uchar_t intr = fas->f_intr;
3870 	uchar_t step;
3871 
3872 	step = fas_reg_read(fas, &fas->f_reg->fas_step) & FAS_STEP_MASK;
3873 
3874 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_START,
3875 	    "fas_finish_select_start");
3876 	EPRINTF("fas_finish_select:\n");
3877 	ASSERT(sp != 0);
3878 
3879 	/*
3880 	 * Check for DMA gate array errors
3881 	 */
3882 	if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr))
3883 	    & DMA_ERRPEND) {
3884 		/*
3885 		 * It would be desirable to set the ATN* line and attempt to
3886 		 * do the whole schmear of INITIATOR DETECTED ERROR here,
3887 		 * but that is too hard to do at present.
3888 		 */
3889 		fas_log(fas, CE_WARN,
3890 		    "Unrecoverable DMA error during selection");
3891 		fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
3892 
3893 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET1_END,
3894 		    "fas_finish_select_end (ACTION_RESET1)");
3895 		return (ACTION_RESET);
3896 	}
3897 
3898 	/*
3899 	 * Shut off DMA gate array
3900 	 */
3901 	FAS_FLUSH_DMA(fas);
3902 
3903 	/*
3904 	 * Did something respond to selection?
3905 	 */
3906 	if (intr == (FAS_INT_BUS|FAS_INT_FCMP)) {
3907 		/*
3908 		 * We succesfully selected a target (we think).
3909 		 * Now we figure out how botched things are
3910 		 * based upon the kind of selection we were
3911 		 * doing and the state of the step register.
3912 		 */
3913 		switch (step) {
3914 		case FAS_STEP_ARBSEL:
3915 			/*
3916 			 * In this case, we selected the target, but went
3917 			 * neither into MESSAGE OUT nor COMMAND phase.
3918 			 * However, this isn't a fatal error, so we just
3919 			 * drive on.
3920 			 *
3921 			 * This might be a good point to note that we have
3922 			 * a target that appears to not accomodate
3923 			 * disconnecting,
3924 			 * but it really isn't worth the effort to distinguish
3925 			 * such targets fasecially from others.
3926 			 */
3927 			/* FALLTHROUGH */
3928 
3929 		case FAS_STEP_SENTID:
3930 			/*
3931 			 * In this case, we selected the target and sent
3932 			 * message byte and have stopped with ATN* still on.
3933 			 * This case should only occur if we use the SELECT
3934 			 * AND STOP command.
3935 			 */
3936 			/* FALLTHROUGH */
3937 
3938 		case FAS_STEP_NOTCMD:
3939 			/*
3940 			 * In this case, we either didn't transition to command
3941 			 * phase, or,
3942 			 * if we were using the SELECT WITH ATN3 command,
3943 			 * we possibly didn't send all message bytes.
3944 			 */
3945 			break;
3946 
3947 		case FAS_STEP_PCMD:
3948 			/*
3949 			 * In this case, not all command bytes transferred.
3950 			 */
3951 			/* FALLTHROUGH */
3952 
3953 		case FAS_STEP_DONE:
3954 			/*
3955 			 * This is the usual 'good' completion point.
3956 			 * If we we sent message byte(s), we subtract
3957 			 * off the number of message bytes that were
3958 			 * ahead of the command.
3959 			 */
3960 			sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
3961 			break;
3962 
3963 		default:
3964 			fas_log(fas, CE_WARN,
3965 			    "bad sequence step (0x%x) in selection", step);
3966 			TRACE_0(TR_FAC_SCSI_FAS,
3967 			    TR_FAS_FINISH_SELECT_RESET3_END,
3968 			    "fas_finish_select_end (ACTION_RESET3)");
3969 			return (ACTION_RESET);
3970 		}
3971 
3972 		/*
3973 		 * OR in common state...
3974 		 */
3975 		sp->cmd_pkt->pkt_state |= (STATE_GOT_BUS|STATE_GOT_TARGET);
3976 
3977 		/*
3978 		 * data pointer initialization has already been done
3979 		 */
3980 		New_state(fas, ACTS_UNKNOWN);
3981 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_ACTION3_END,
3982 		    "fas_finish_select_end (action3)");
3983 		return (fas_handle_unknown(fas));
3984 
3985 	} else if (intr == FAS_INT_DISCON) {
3986 		/*
3987 		 * make sure we negotiate when this target comes
3988 		 * on line later on
3989 		 */
3990 		fas_force_renegotiation(fas, Tgt(sp));
3991 
3992 		fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3993 		sp->cmd_pkt->pkt_state |= STATE_GOT_BUS;
3994 
3995 		/*
3996 		 * Set the throttle to DRAIN_THROTTLE to make
3997 		 * sure any disconnected commands will get timed out
3998 		 * incase the drive dies
3999 		 */
4000 
4001 		if (fas->f_reset_delay[Tgt(sp)] == 0) {
4002 			fas->f_throttle[sp->cmd_slot] = DRAIN_THROTTLE;
4003 		}
4004 
4005 		fas_set_pkt_reason(fas, sp, CMD_INCOMPLETE, 0);
4006 
4007 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_FINISH_END,
4008 		    "fas_finish_select_end (ACTION_FINISH)");
4009 		return (ACTION_FINISH);
4010 	} else	{
4011 		fas_printstate(fas, "undetermined selection failure");
4012 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET2_END,
4013 		    "fas_finish_select_end (ACTION_RESET2)");
4014 		return (ACTION_RESET);
4015 	}
4016 	_NOTE(NOT_REACHED)
4017 	/* NOTREACHED */
4018 }
4019 
4020 /*
4021  * a selection got preempted by a reselection; shut down dma
4022  * and put back cmd in the ready queue unless NOINTR
4023  */
4024 static int
4025 fas_reselect_preempt(struct fas *fas)
4026 {
4027 	int rval;
4028 
4029 	/*
4030 	 * A reselection attempt glotzed our selection attempt.
4031 	 * we put request back in the ready queue
4032 	 */
4033 	struct fas_cmd *sp = fas->f_current_sp;
4034 
4035 	/*
4036 	 * Shut off DMA gate array
4037 	 */
4038 	FAS_FLUSH_DMA(fas);
4039 
4040 	/*
4041 	 * service the reconnect now and clean up later
4042 	 */
4043 	New_state(fas, STATE_FREE);
4044 	rval = fas_reconnect(fas);
4045 
4046 	/*
4047 	 * If selection for a non-tagged command is preempted, the
4048 	 * command could be stuck because throttle was set to DRAIN,
4049 	 * and a disconnected command timeout follows.
4050 	 */
4051 	if ((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0)
4052 		fas->f_throttle[sp->cmd_slot] = 1;
4053 
4054 	if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4055 		fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4056 	}
4057 
4058 	/*
4059 	 * if we attempted to renegotiate on this cmd, undo this now
4060 	 */
4061 	if (fas->f_wdtr_sent) {
4062 		fas->f_wide_known &= ~(1<<Tgt(sp));
4063 		fas->f_wdtr_sent = 0;
4064 	}
4065 	if (fas->f_sdtr_sent) {
4066 		fas->f_sync_known &= ~(1<<Tgt(sp));
4067 		fas->f_sdtr_sent = 0;
4068 	}
4069 
4070 	fas_head_of_readyQ(fas, sp);
4071 
4072 	return (rval);
4073 }
4074 
4075 /*
4076  * Handle the reconnection of a target
4077  */
4078 static int
4079 fas_reconnect(struct fas *fas)
4080 {
4081 	volatile struct fasreg *fasreg = fas->f_reg;
4082 	struct fas_cmd *sp = NULL;
4083 	uchar_t target, lun;
4084 	uchar_t tmp;
4085 	uchar_t slot;
4086 	char *bad_reselect = NULL;
4087 
4088 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_START,
4089 	    "fas_reconnect_start");
4090 	EPRINTF("fas_reconnect:\n");
4091 
4092 	fas_check_ncmds(fas);
4093 
4094 	switch (fas->f_state) {
4095 	default:
4096 		/*
4097 		 * Pick up target id from fifo
4098 		 *
4099 		 * There should only be the reselecting target's id
4100 		 * and an identify message in the fifo.
4101 		 */
4102 		target = fas->f_fifo[0];
4103 
4104 		/*
4105 		 * we know the target so update period, conf3,
4106 		 * offset reg, if necessary, and accept the msg
4107 		 */
4108 		FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
4109 
4110 		/*
4111 		 * now we can accept the message. an untagged
4112 		 * target will go immediately into data phase so
4113 		 * the period/offset/conf3 registers need to be
4114 		 * updated before accepting the message
4115 		 */
4116 		fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4117 
4118 		if (fas->f_fifolen != 2) {
4119 			bad_reselect = "bad reselect bytes";
4120 			break;
4121 		}
4122 
4123 		/*
4124 		 * normal initial reconnect; we get another interrupt later
4125 		 * for the tag
4126 		 */
4127 		New_state(fas, ACTS_RESEL);
4128 
4129 		if (fas->f_stat & FAS_STAT_PERR) {
4130 			break;
4131 		}
4132 
4133 		/*
4134 		 * Check sanity of message.
4135 		 */
4136 		tmp = fas->f_fifo[1];
4137 		fas->f_last_msgin = tmp;
4138 
4139 		if (!(IS_IDENTIFY_MSG(tmp)) || (tmp & INI_CAN_DISCON)) {
4140 			bad_reselect = "bad identify msg";
4141 			break;
4142 		}
4143 
4144 		lun = tmp & (NLUNS_PER_TARGET-1);
4145 
4146 		EPRINTF2("fas_reconnect: target=%x, idmsg=%x\n",
4147 			target, tmp);
4148 
4149 		fas->f_resel_slot = slot = (target * NLUNS_PER_TARGET) | lun;
4150 
4151 		fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
4152 			(target & 0xf) | FAS_BUSID_ENCODID |
4153 			FAS_BUSID_32BIT_COUNTER);
4154 
4155 		/*
4156 		 * If tag queueing in use, DMA in tag.
4157 		 * Otherwise, we're ready to go.
4158 		 * if tag 0 slot is non-empty, a non-tagged cmd is
4159 		 * reconnecting
4160 		 */
4161 		if (TAGGED(target) && fas->f_tcmds[slot] &&
4162 		    (fas->f_active[slot]->f_slot[0] == NULL)) {
4163 			volatile uchar_t *c =
4164 					(uchar_t *)fas->f_cmdarea;
4165 
4166 			/*
4167 			 * If we've been doing tagged queueing and this
4168 			 * request doesn't  do it,
4169 			 * maybe it was disabled for this one.	This is rather
4170 			 * dangerous as it blows all pending tagged cmds away.
4171 			 * But if target is confused, then we'll blow up
4172 			 * shortly.
4173 			 */
4174 			*c++ = INVALID_MSG;
4175 			*c   = INVALID_MSG;
4176 
4177 			FAS_DMA_WRITE_SETUP(fas, 2,
4178 				fas->f_dmacookie.dmac_address);
4179 
4180 			/*
4181 			 * For tagged queuing, we should still be in msgin
4182 			 * phase.
4183 			 * If not, then either we aren't running tagged
4184 			 * queueing like we thought or the target died.
4185 			 */
4186 			if (INTPENDING(fas) == 0) {
4187 				EPRINTF1("slow reconnect, slot=%x\n", slot);
4188 				TRACE_0(TR_FAC_SCSI_FAS,
4189 				    TR_FAS_RECONNECT_RETURN1_END,
4190 				    "fas_reconnect_end (_RETURN1)");
4191 				return (ACTION_RETURN);
4192 			}
4193 
4194 			fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
4195 			fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
4196 			if (fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET)) {
4197 				return (fas_illegal_cmd_or_bus_reset(fas));
4198 			}
4199 
4200 			if ((fas->f_stat & FAS_PHASE_MASK) !=
4201 			    FAS_PHASE_MSG_IN) {
4202 				bad_reselect = "not in msgin phase";
4203 				break;
4204 			}
4205 
4206 			if (fas->f_intr & FAS_INT_DISCON) {
4207 				bad_reselect = "unexpected bus free";
4208 				break;
4209 			}
4210 		} else {
4211 			fas->f_current_sp = sp = fas->f_active[slot]->f_slot[0];
4212 			break;
4213 		}
4214 		/*FALLTHROUGH*/
4215 
4216 	case ACTS_RESEL:
4217 		{
4218 			volatile uchar_t *c =
4219 					(uchar_t *)fas->f_cmdarea;
4220 			struct f_slots *tag_slots;
4221 			int id, tag;
4222 			uint_t i;
4223 
4224 			slot = fas->f_resel_slot;
4225 			target = slot/NLUNS_PER_TARGET;
4226 
4227 			if ((fas->f_stat & FAS_PHASE_MASK) !=
4228 			    FAS_PHASE_MSG_IN) {
4229 				IPRINTF1("no tag for slot %x\n", slot);
4230 				if (fas->f_intr & ~(FAS_INT_BUS |
4231 				    FAS_INT_FCMP)) {
4232 					New_state(fas, ACTS_UNKNOWN);
4233 					TRACE_0(TR_FAC_SCSI_FAS,
4234 					    TR_FAS_RECONNECT_PHASEMANAGE_END,
4235 					    "fas_reconnect_end (_PHASEMANAGE)");
4236 					return (ACTION_PHASEMANAGE);
4237 				} else {
4238 					bad_reselect = "not in msgin phase";
4239 					break;
4240 				}
4241 			}
4242 			fas_reg_cmd_write(fas, CMD_TRAN_INFO|CMD_DMA);
4243 			fas_dma_reg_write(fas, &fas->f_dma->dma_csr,
4244 				fas->f_dma_csr);
4245 
4246 			fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4247 
4248 			for (i = 0; i < (uint_t)RECONNECT_TAG_RCV_TIMEOUT;
4249 			    i++) {
4250 				/*
4251 				 * timeout is not very accurate but this
4252 				 * should take no time at all
4253 				 */
4254 				if (INTPENDING(fas)) {
4255 					fas->f_stat = fas_reg_read(fas,
4256 					(uchar_t *)&fas->f_reg->fas_stat);
4257 					fas->f_intr = fas_reg_read(fas,
4258 					(uchar_t *)&fas->f_reg->fas_intr);
4259 					if (fas->f_intr & (FAS_INT_RESET |
4260 					    FAS_INT_ILLEGAL)) {
4261 					    return (
4262 					    fas_illegal_cmd_or_bus_reset(fas));
4263 					}
4264 					if (fas->f_intr & FAS_INT_FCMP) {
4265 						break;
4266 					}
4267 				}
4268 			}
4269 
4270 			if (i == (uint_t)RECONNECT_TAG_RCV_TIMEOUT) {
4271 				bad_reselect = "timeout on receiving tag msg";
4272 				break;
4273 			}
4274 
4275 			FAS_FLUSH_DMA(fas);
4276 
4277 			/*
4278 			 * we should really do a sync here but that
4279 			 * hurts performance too much; we'll just hang
4280 			 * around till the tag byte flips
4281 			 * This is necessary on any system with an
4282 			 * XBox
4283 			 */
4284 			if (*c == INVALID_MSG) {
4285 				EPRINTF(
4286 				    "fas_reconnect: invalid msg, polling\n");
4287 				for (i = 0; i < 1000000; i++) {
4288 					if (*c != INVALID_MSG)
4289 						break;
4290 				}
4291 			}
4292 
4293 			if (fas->f_stat & FAS_STAT_PERR) {
4294 				break;
4295 			}
4296 
4297 			if ((fas->f_stat & FAS_STAT_XZERO) == 0 ||
4298 			    (id = *c++) < MSG_SIMPLE_QTAG ||
4299 			    id > MSG_ORDERED_QTAG) {
4300 				/*
4301 				 * Target agreed to do tagged queueing
4302 				 * and lied!
4303 				 * This problem implies the drive firmware is
4304 				 * broken.
4305 				 */
4306 				bad_reselect = "botched tag";
4307 				break;
4308 			}
4309 			tag = *c;
4310 
4311 			/* Set ptr to reconnecting scsi pkt */
4312 			tag_slots = fas->f_active[slot];
4313 			if (tag_slots != NULL) {
4314 				sp = tag_slots->f_slot[tag];
4315 			} else {
4316 				bad_reselect = "Invalid tag";
4317 				break;
4318 			}
4319 
4320 			fas->f_current_sp = sp;
4321 		}
4322 	}
4323 
4324 	if (fas->f_stat & FAS_STAT_PERR) {
4325 		sp = NULL;
4326 		bad_reselect = "Parity error in reconnect msg's";
4327 	}
4328 
4329 	if ((sp == NULL ||
4330 #ifdef FAS_TEST
4331 	    (fas_atest_reconn & (1<<Tgt(sp))) ||
4332 #endif
4333 	    (sp->cmd_flags & (CFLAG_CMDDISC|CFLAG_CMDPROXY)) == 0)) {
4334 		/*
4335 		 * this shouldn't really happen, so it is better
4336 		 * to reset the bus; some disks accept the abort
4337 		 * and then still reconnect
4338 		 */
4339 		if (bad_reselect == NULL) {
4340 			bad_reselect = "no command";
4341 		}
4342 #ifdef FAS_TEST
4343 		if (sp && !(fas_atest_reconn & (1<<Tgt(sp))) &&
4344 			fas_test_stop) {
4345 			debug_enter("bad reconnect");
4346 		} else {
4347 			fas_atest_reconn = 0;
4348 		}
4349 #endif
4350 		goto bad;
4351 
4352 	/*
4353 	 *  XXX remove this case or make it an ASSERT
4354 	 */
4355 	} else if (sp->cmd_flags & CFLAG_CMDPROXY) {
4356 		/*
4357 		 * If we got here, we were already attempting to
4358 		 * run a polled proxy command for this target.
4359 		 * Set ATN and, copy in the message, and drive
4360 		 * on (ignoring any parity error on the identify).
4361 		 */
4362 		IPRINTF1("fas_reconnect: fielding proxy cmd for %d\n",
4363 		    target);
4364 		fas_assert_atn(fas);
4365 		fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
4366 		tmp = 0;
4367 		while (tmp < fas->f_omsglen) {
4368 			fas->f_cur_msgout[tmp] =
4369 			    sp->cmd_cdb[FAS_PROXY_DATA+1+tmp];
4370 			tmp++;
4371 		}
4372 		sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
4373 
4374 		/*
4375 		 * pretend that the disconnected cmd is still disconnected
4376 		 * (this prevents ndisc from going negative)
4377 		 */
4378 		fas->f_ndisc++;
4379 		ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4380 		ASSERT(fas->f_ncmds >= fas->f_ndisc);
4381 	}
4382 
4383 	ASSERT(fas->f_resel_slot == slot);
4384 	ASSERT(fas->f_ndisc > 0);
4385 	fas->f_ndisc--;
4386 	sp->cmd_flags &= ~CFLAG_CMDDISC;
4387 	New_state(fas, ACTS_UNKNOWN);
4388 
4389 	/*
4390 	 * A reconnect may imply a restore pointers operation
4391 	 * Note that some older disks (Micropolis in Pbox) do not
4392 	 * send a save data ptr on disconnect if all data has been
4393 	 * xferred. So, we cannot restore ptrs yet here.
4394 	 */
4395 	if ((sp->cmd_flags & CFLAG_DMAVALID) &&
4396 	    (sp->cmd_data_count != sp->cmd_saved_data_count)) {
4397 		sp->cmd_flags |= CFLAG_RESTORE_PTRS;
4398 	}
4399 
4400 	/*
4401 	 * Return to await the FUNCTION COMPLETE interrupt we
4402 	 * should get out of accepting the IDENTIFY message.
4403 	 */
4404 	EPRINTF2("Reconnecting %d.%d\n", target, slot % NLUNS_PER_TARGET);
4405 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RETURN2_END,
4406 	    "fas_reconnect_end (_RETURN2)");
4407 	return (ACTION_RETURN);
4408 
4409 bad:
4410 	if (sp && (fas->f_stat	& FAS_STAT_PERR)) {
4411 		sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4412 	}
4413 	fas_log(fas, CE_WARN, "target %x: failed reselection (%s)",
4414 		target, bad_reselect);
4415 
4416 #ifdef FASDEBUG
4417 	fas_printstate(fas, "failed reselection");
4418 #endif
4419 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RESET5_END,
4420 	    "fas_reconnect_end (_RESET5)");
4421 	return (ACTION_RESET);
4422 }
4423 
4424 /*
4425  * handle unknown bus phase
4426  * we don't know what to expect so check status register for current
4427  * phase
4428  */
4429 int
4430 fas_handle_unknown(struct fas *fas)
4431 {
4432 	TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_START,
4433 	    "fas_handle_unknown_start: fas 0x%p", fas);
4434 	EPRINTF("fas_handle_unknown:\n");
4435 
4436 	if ((fas->f_intr & FAS_INT_DISCON) == 0) {
4437 		/*
4438 		 * we call actions here rather than returning to phasemanage
4439 		 * (this is the most frequently called action)
4440 		 */
4441 		switch (fas->f_stat & FAS_PHASE_MASK) {
4442 		case FAS_PHASE_DATA_IN:
4443 		case FAS_PHASE_DATA_OUT:
4444 			New_state(fas, ACTS_DATA);
4445 			TRACE_0(TR_FAC_SCSI_FAS,
4446 			    TR_FAS_HANDLE_UNKNOWN_PHASE_DATA_END,
4447 			    "fas_handle_unknown_end (phase_data)");
4448 			return (fas_handle_data_start(fas));
4449 
4450 		case FAS_PHASE_MSG_OUT:
4451 			New_state(fas, ACTS_MSG_OUT);
4452 			TRACE_0(TR_FAC_SCSI_FAS,
4453 			    TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_OUT_END,
4454 			    "fas_handle_unknown_end (phase_msg_out)");
4455 			return (fas_handle_msg_out_start(fas));
4456 
4457 		case FAS_PHASE_MSG_IN:
4458 			New_state(fas, ACTS_MSG_IN);
4459 			TRACE_0(TR_FAC_SCSI_FAS,
4460 			    TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_IN_END,
4461 			    "fas_handle_unknown_end (phase_msg_in)");
4462 			return (fas_handle_msg_in_start(fas));
4463 
4464 		case FAS_PHASE_STATUS:
4465 			fas_reg_cmd_write(fas, CMD_FLUSH);
4466 #ifdef	FAS_TEST
4467 			if (fas_ptest_status & (1<<Tgt(fas->f_current_sp))) {
4468 				fas_assert_atn(fas);
4469 			}
4470 #endif	/* FAS_TEST */
4471 
4472 			fas_reg_cmd_write(fas, CMD_COMP_SEQ);
4473 			New_state(fas, ACTS_C_CMPLT);
4474 
4475 			TRACE_0(TR_FAC_SCSI_FAS,
4476 			    TR_FAS_HANDLE_UNKNOWN_PHASE_STATUS_END,
4477 			    "fas_handle_unknown_end (phase_status)");
4478 			return (fas_handle_c_cmplt(fas));
4479 
4480 		case FAS_PHASE_COMMAND:
4481 			New_state(fas, ACTS_CMD_START);
4482 			TRACE_0(TR_FAC_SCSI_FAS,
4483 			    TR_FAS_HANDLE_UNKNOWN_PHASE_CMD_END,
4484 			    "fas_handle_unknown_end (phase_cmd)");
4485 			return (fas_handle_cmd_start(fas));
4486 		}
4487 
4488 		fas_printstate(fas, "Unknown bus phase");
4489 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_RESET_END,
4490 		    "fas_handle_unknown_end (reset)");
4491 		return (ACTION_RESET);
4492 
4493 	} else {
4494 		/*
4495 		 * Okay. What to do now? Let's try (for the time being)
4496 		 * assuming that the target went south and dropped busy,
4497 		 * as a disconnect implies that either we received
4498 		 * a completion or a disconnect message, or that we
4499 		 * had sent an ABORT OPERATION or BUS DEVICE RESET
4500 		 * message. In either case, we expected the disconnect
4501 		 * and should have fielded it elsewhere.
4502 		 *
4503 		 * If we see a chip disconnect here, this is an unexpected
4504 		 * loss of BSY*. Clean up the state of the chip and return.
4505 		 *
4506 		 */
4507 		int msgout = fas->f_cur_msgout[0];
4508 		struct fas_cmd *sp = fas->f_current_sp;
4509 		int target = Tgt(sp);
4510 
4511 		if (msgout == MSG_HEAD_QTAG || msgout == MSG_SIMPLE_QTAG) {
4512 			msgout = fas->f_cur_msgout[2];
4513 		}
4514 		EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4515 			fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4516 			fas->f_cur_msgout[2], fas->f_last_msgout);
4517 
4518 		if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG ||
4519 		    msgout == MSG_DEVICE_RESET) {
4520 			IPRINTF2("Successful %s message to target %d\n",
4521 			    scsi_mname(msgout), Tgt(sp));
4522 			if (sp->cmd_flags & CFLAG_CMDPROXY) {
4523 				sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
4524 			}
4525 			if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
4526 				fas->f_abort_msg_sent++;
4527 				if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4528 					fas_set_pkt_reason(fas, sp,
4529 					    CMD_ABORTED, STAT_ABORTED);
4530 				}
4531 			} else if (msgout == MSG_DEVICE_RESET) {
4532 				fas->f_reset_msg_sent++;
4533 				if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4534 					fas_set_pkt_reason(fas, sp,
4535 					    CMD_RESET, STAT_DEV_RESET);
4536 				}
4537 				fas_force_renegotiation(fas, target);
4538 			}
4539 		} else {
4540 			if ((fas->f_last_msgout == MSG_EXTENDED) &&
4541 			    (fas->f_last_msgin == MSG_REJECT)) {
4542 				/*
4543 				 * the target rejected the negotiations,
4544 				 * so resubmit again (no_sync/no_wide
4545 				 * is now set)
4546 				 */
4547 				New_state(fas, STATE_FREE);
4548 				fas_reg_cmd_write(fas, CMD_EN_RESEL);
4549 				fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4550 				fas_decrement_ncmds(fas, sp);
4551 				fas_check_ncmds(fas);
4552 				sp->cmd_flags &= ~CFLAG_TRANFLAG;
4553 				(void) fas_accept_pkt(fas, sp,	NO_TRAN_BUSY);
4554 				fas_check_ncmds(fas);
4555 				TRACE_0(TR_FAC_SCSI_FAS,
4556 				    TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4557 				    "fas_handle_unknown_end (int_discon)");
4558 				return (ACTION_SEARCH);
4559 
4560 			} else if (fas->f_last_msgout == MSG_EXTENDED)	{
4561 				/*
4562 				 * target dropped off the bus during
4563 				 * negotiations
4564 				 */
4565 				fas_reset_sync_wide(fas);
4566 				fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
4567 			}
4568 
4569 			fas_set_pkt_reason(fas, sp, CMD_UNX_BUS_FREE, 0);
4570 #ifdef FASDEBUG
4571 			fas_printstate(fas, "unexpected bus free");
4572 #endif
4573 		}
4574 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4575 		    "fas_handle_unknown_end (int_discon)");
4576 		return (ACTION_FINISH);
4577 	}
4578 	_NOTE(NOT_REACHED)
4579 	/* NOTREACHED */
4580 }
4581 
4582 /*
4583  * handle target disconnecting
4584  */
4585 static int
4586 fas_handle_clearing(struct fas *fas)
4587 {
4588 	struct fas_cmd *sp = fas->f_current_sp;
4589 
4590 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_START,
4591 	    "fas_handle_clearing_start");
4592 	EPRINTF("fas_handle_clearing:\n");
4593 
4594 	if (fas->f_laststate == ACTS_C_CMPLT ||
4595 	    fas->f_laststate == ACTS_MSG_IN_DONE) {
4596 		if (INTPENDING(fas)) {
4597 			volatile struct fasreg *fasreg = fas->f_reg;
4598 
4599 			fas->f_stat = fas_reg_read(fas,
4600 				(uchar_t *)&fasreg->fas_stat);
4601 			fas->f_intr = fas_reg_read(fas,
4602 				(uchar_t *)&fasreg->fas_intr);
4603 			if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
4604 				return (fas_illegal_cmd_or_bus_reset(fas));
4605 			}
4606 		} else {
4607 			/*
4608 			 * change e_laststate for the next time around
4609 			 */
4610 			fas->f_laststate = ACTS_CLEARING;
4611 			TRACE_0(TR_FAC_SCSI_FAS,
4612 			    TR_FAS_HANDLE_CLEARING_RETURN1_END,
4613 			    "fas_handle_clearing_end (ACTION_RETURN1)");
4614 			return (ACTION_RETURN);
4615 		}
4616 	}
4617 
4618 	if (fas->f_intr == FAS_INT_DISCON) {
4619 		/*
4620 		 * At this point the FAS chip has disconnected. The bus should
4621 		 * be either quiet or someone may be attempting a reselection
4622 		 * of us (or somebody else). Call the routine that sets the
4623 		 * chip back to a correct and known state.
4624 		 * If the last message in was a disconnect, search
4625 		 * for new work to do, else return to call fas_finish()
4626 		 */
4627 		fas->f_last_msgout = 0xff;
4628 		fas->f_omsglen = 0;
4629 		if (fas->f_last_msgin == MSG_DISCONNECT) {
4630 
4631 			fas_reg_cmd_write(fas, CMD_EN_RESEL);
4632 
4633 			New_state(fas, STATE_FREE);
4634 
4635 			ASSERT(fas->f_current_sp != NULL);
4636 			EPRINTF2("disconnecting %d.%d\n", Tgt(sp), Lun(sp));
4637 
4638 			sp->cmd_pkt->pkt_statistics |= STAT_DISCON;
4639 			sp->cmd_flags |= CFLAG_CMDDISC;
4640 			if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4641 				fas->f_ndisc++;
4642 			}
4643 			ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4644 			ASSERT(fas->f_ncmds >= fas->f_ndisc);
4645 
4646 			fas->f_current_sp = NULL;
4647 
4648 			/*
4649 			 * start a cmd here to save time
4650 			 */
4651 			if ((fas->f_ncmds > fas->f_ndisc) && fas_ustart(fas)) {
4652 				TRACE_0(TR_FAC_SCSI_FAS,
4653 				    TR_FAS_HANDLE_CLEARING_RETURN2_END,
4654 				    "fas_handle_clearing_end (ACTION_RETURN2)");
4655 				return (ACTION_RETURN);
4656 			}
4657 
4658 
4659 			TRACE_0(TR_FAC_SCSI_FAS,
4660 			    TR_FAS_HANDLE_CLEARING_RETURN3_END,
4661 			    "fas_handle_clearing_end (ACTION_RETURN3)");
4662 			return (ACTION_RETURN);
4663 		} else {
4664 			TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_END,
4665 			    "fas_handle_clearing_end");
4666 			return (fas_finish(fas));
4667 		}
4668 	} else {
4669 		/*
4670 		 * If the target didn't disconnect from the
4671 		 * bus, that is a gross fatal error.
4672 		 * XXX this can be caused by asserting ATN
4673 		 * XXX check bus phase and if msgout, send a message
4674 		 */
4675 		fas_log(fas, CE_WARN,
4676 		    "Target %d didn't disconnect after sending %s",
4677 		    Tgt(sp), scsi_mname(fas->f_last_msgin));
4678 
4679 		fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4680 
4681 #ifdef FASDEBUG
4682 		IPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4683 			fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4684 			fas->f_cur_msgout[2], fas->f_last_msgout);
4685 		IPRINTF1("last msgin=%x\n", fas->f_last_msgin);
4686 #endif
4687 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_ABORT_END,
4688 		    "fas_handle_clearing_end (ACTION_ABORT_CURCMD)");
4689 		return (ACTION_ABORT_ALLCMDS);
4690 	}
4691 }
4692 
4693 /*
4694  * handle data phase start
4695  */
4696 static int
4697 fas_handle_data_start(struct fas *fas)
4698 {
4699 	uint64_t end;
4700 	uint32_t amt;
4701 	struct fas_cmd *sp = fas->f_current_sp;
4702 	int sending, phase;
4703 
4704 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_START,
4705 	    "fas_handle_data_start");
4706 	EPRINTF("fas_handle_data_start:\n");
4707 
4708 	if ((sp->cmd_flags & CFLAG_DMAVALID) == 0) {
4709 		fas_printstate(fas, "unexpected data phase");
4710 bad:
4711 		fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4712 
4713 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT1_END,
4714 		    "fas_handle_data_end (ACTION_ABORT_CURCMD1)");
4715 		return (ACTION_ABORT_CURCMD);
4716 	} else {
4717 		sending = (sp->cmd_flags & CFLAG_DMASEND)? 1 : 0;
4718 	}
4719 
4720 	if (sp->cmd_flags & CFLAG_RESTORE_PTRS) {
4721 		if (fas_restore_pointers(fas, sp)) {
4722 			return (ACTION_ABORT_CURCMD);
4723 		}
4724 		sp->cmd_flags &= ~CFLAG_RESTORE_PTRS;
4725 	}
4726 
4727 	/*
4728 	 * And make sure our DMA pointers are in good shape.
4729 	 *
4730 	 * Because SCSI is SCSI, the current DMA pointer has got to be
4731 	 * greater than or equal to our DMA base address. All other cases
4732 	 * that might have affected this always set curaddr to be >=
4733 	 * to the DMA base address.
4734 	 */
4735 	ASSERT(sp->cmd_cur_addr >= sp->cmd_dmacookie.dmac_address);
4736 	end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4737 		(uint64_t)sp->cmd_dmacookie.dmac_size;
4738 
4739 	DPRINTF5(
4740 	    "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%"
4741 	    PRIx64 ", nwin=%x\n",
4742 	    sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
4743 	    sp->cmd_nwin);
4744 	DPRINTF2("dmac_address = %x, dmac_size=%lx\n",
4745 	    sp->cmd_dmacookie.dmac_address, sp->cmd_dmacookie.dmac_size);
4746 
4747 	if (sp->cmd_cur_addr >= end) {
4748 		if (fas_next_window(fas, sp, end)) {
4749 			goto bad;
4750 		}
4751 		end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4752 			(uint64_t)sp->cmd_dmacookie.dmac_size;
4753 		DPRINTF2("dmac_address=%x, dmac_size=%lx\n",
4754 		    sp->cmd_dmacookie.dmac_address,
4755 		    sp->cmd_dmacookie.dmac_size);
4756 	}
4757 
4758 	amt = end - sp->cmd_cur_addr;
4759 	if (fas->f_dma_attr->dma_attr_count_max < amt) {
4760 		amt = fas->f_dma_attr->dma_attr_count_max;
4761 	}
4762 	DPRINTF3("amt=%x, end=%lx, cur_addr=%x\n", amt, end, sp->cmd_cur_addr);
4763 
4764 #ifdef FASDEBUG
4765 	/*
4766 	 * Make sure that we don't cross a boundary we can't handle
4767 	 */
4768 	end = (uint64_t)sp->cmd_cur_addr + (uint64_t)amt - 1;
4769 	if ((end & ~fas->f_dma_attr->dma_attr_seg) !=
4770 	    (sp->cmd_cur_addr & ~fas->f_dma_attr->dma_attr_seg)) {
4771 		EPRINTF3("curaddr %x curaddr+amt %" PRIx64
4772 		    " cntr_max %" PRIx64 "\n",
4773 		    sp->cmd_cur_addr, end, fas->f_dma_attr->dma_attr_seg);
4774 		amt = (end & ~fas->f_dma_attr->dma_attr_seg) - sp->cmd_cur_addr;
4775 		if (amt == 0 || amt > fas->f_dma_attr->dma_attr_count_max) {
4776 			fas_log(fas, CE_WARN, "illegal dma boundary? %x", amt);
4777 			goto bad;
4778 		}
4779 	}
4780 #endif
4781 
4782 	end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4783 		(uint64_t)sp->cmd_dmacookie.dmac_size -
4784 		(uint64_t)sp->cmd_cur_addr;
4785 	if (amt > end) {
4786 		EPRINTF4("ovflow amt %x s.b. %" PRIx64 " curaddr %x count %x\n",
4787 		    amt, end, sp->cmd_cur_addr, sp->cmd_dmacount);
4788 		amt = (uint32_t)end;
4789 	}
4790 
4791 	fas->f_lastcount = amt;
4792 
4793 	EPRINTF4("%d.%d cmd 0x%x to xfer %x\n", Tgt(sp), Lun(sp),
4794 	    sp->cmd_pkt->pkt_cdbp[0], amt);
4795 
4796 	phase = fas->f_stat & FAS_PHASE_MASK;
4797 
4798 	if ((phase == FAS_PHASE_DATA_IN) && !sending) {
4799 		FAS_DMA_WRITE(fas, amt, sp->cmd_cur_addr,
4800 		    CMD_TRAN_INFO|CMD_DMA);
4801 	} else if ((phase == FAS_PHASE_DATA_OUT) && sending) {
4802 		FAS_DMA_READ(fas, amt, sp->cmd_cur_addr, amt,
4803 		    CMD_TRAN_INFO|CMD_DMA);
4804 	} else {
4805 		fas_log(fas, CE_WARN,
4806 		    "unwanted data xfer direction for Target %d", Tgt(sp));
4807 		fas_set_pkt_reason(fas, sp, CMD_DMA_DERR, 0);
4808 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT2_END,
4809 		    "fas_handle_data_end (ACTION_ABORT_CURCMD2)");
4810 		return (ACTION_ABORT_CURCMD);
4811 	}
4812 
4813 #ifdef	FAS_TEST
4814 	if (!sending && (fas_ptest_data_in & (1<<Tgt(sp)))) {
4815 		fas_assert_atn(fas);
4816 	}
4817 #endif	/* FAS_TEST */
4818 
4819 	New_state(fas, ACTS_DATA_DONE);
4820 
4821 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_END,
4822 	    "fas_handle_data_end (ACTION_RETURN)");
4823 	return (ACTION_RETURN);
4824 }
4825 
4826 static int
4827 fas_handle_data_done(struct fas *fas)
4828 {
4829 	volatile struct fasreg *fasreg = fas->f_reg;
4830 	volatile struct dma *dmar = fas->f_dma;
4831 	struct fas_cmd *sp = fas->f_current_sp;
4832 	uint32_t xfer_amt;
4833 	char was_sending;
4834 	uchar_t stat, fifoamt, tgt;
4835 
4836 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_START,
4837 	    "fas_handle_data_done_start");
4838 	EPRINTF("fas_handle_data_done\n");
4839 
4840 	tgt = Tgt(sp);
4841 	stat = fas->f_stat;
4842 	was_sending = (sp->cmd_flags & CFLAG_DMASEND) ? 1 : 0;
4843 
4844 	/*
4845 	 * Check for DMA errors (parity or memory fault)
4846 	 */
4847 	if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr)) &
4848 	    DMA_ERRPEND) {
4849 		/*
4850 		 * It would be desirable to set the ATN* line and attempt to
4851 		 * do the whole schmear of INITIATOR DETECTED ERROR here,
4852 		 * but that is too hard to do at present.
4853 		 */
4854 		fas_log(fas, CE_WARN, "Unrecoverable DMA error on dma %s",
4855 		    (was_sending) ? "send" : "receive");
4856 		fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4857 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4858 		    "fas_handle_data_done_end (ACTION_RESET)");
4859 		return (ACTION_RESET);
4860 	}
4861 
4862 	/*
4863 	 * Data Receive conditions:
4864 	 *
4865 	 * Check for parity errors. If we have a parity error upon
4866 	 * receive, the FAS chip has asserted ATN* for us already.
4867 	 */
4868 	if (!was_sending) {
4869 #ifdef	FAS_TEST
4870 		if (fas_ptest_data_in & (1<<tgt)) {
4871 			fas_ptest_data_in = 0;
4872 			stat |= FAS_STAT_PERR;
4873 			if (fas_test_stop > 1) {
4874 				debug_enter("ptest_data_in");
4875 			}
4876 		}
4877 #endif	/* FAS_TEST */
4878 		if (stat & FAS_STAT_PERR) {
4879 			fas_log(fas, CE_WARN,
4880 			    "SCSI bus DATA IN phase parity error");
4881 			fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
4882 			fas->f_omsglen = 1;
4883 			sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4884 			sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
4885 		}
4886 	}
4887 
4888 	FAS_FLUSH_DMA(fas);
4889 
4890 	/*
4891 	 * Check to make sure we're still connected to the target.
4892 	 * If the target dropped the bus, that is a fatal error.
4893 	 * We don't even attempt to count what we were transferring
4894 	 * here. Let fas_handle_unknown clean up for us.
4895 	 */
4896 	if (fas->f_intr != FAS_INT_BUS) {
4897 		New_state(fas, ACTS_UNKNOWN);
4898 		TRACE_0(TR_FAC_SCSI_FAS,
4899 		    TR_FAS_HANDLE_DATA_DONE_PHASEMANAGE_END,
4900 		    "fas_handle_data_done_end (ACTION_PHASEMANAGE)");
4901 		return (ACTION_PHASEMANAGE);
4902 	}
4903 
4904 	/*
4905 	 * Figure out how far we got.
4906 	 * Latch up fifo amount first and double if wide has been enabled
4907 	 */
4908 	fifoamt = FIFO_CNT(fas);
4909 	if (fas->f_wide_enabled & (1<<tgt)) {
4910 		fifoamt = fifoamt << 1;
4911 	}
4912 
4913 	if (stat & FAS_STAT_XZERO) {
4914 		xfer_amt = fas->f_lastcount;
4915 	} else {
4916 		GET_FAS_COUNT(fasreg, xfer_amt);
4917 		xfer_amt = fas->f_lastcount - xfer_amt;
4918 	}
4919 	DPRINTF4("fifoamt=%x, xfer_amt=%x, lastcount=%x, stat=%x\n",
4920 	    fifoamt, xfer_amt, fas->f_lastcount, stat);
4921 
4922 
4923 	/*
4924 	 * Unconditionally knock off by the amount left
4925 	 * in the fifo if we were sending out the SCSI bus.
4926 	 *
4927 	 * If we were receiving from the SCSI bus, believe
4928 	 * what the chip told us (either XZERO or by the
4929 	 * value calculated from the counter register).
4930 	 * The reason we don't look at the fifo for
4931 	 * incoming data is that in synchronous mode
4932 	 * the fifo may have further data bytes, and
4933 	 * for async mode we assume that all data in
4934 	 * the fifo will have been transferred before
4935 	 * the fas asserts an interrupt.
4936 	 */
4937 	if (was_sending) {
4938 		xfer_amt -= fifoamt;
4939 	}
4940 
4941 #ifdef FASDEBUG
4942 {
4943 	int phase = stat & FAS_PHASE_MASK;
4944 	fas->f_stat2 = fas_reg_read(fas,
4945 				(uchar_t *)&fasreg->fas_stat2);
4946 
4947 	if (((fas->f_stat & FAS_STAT_XZERO) == 0) &&
4948 	    (phase != FAS_PHASE_DATA_IN) &&
4949 	    (phase != FAS_PHASE_DATA_OUT) &&
4950 	    (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
4951 		fas_log(fas, CE_WARN,
4952 		    "input shuttle not empty at end of data phase");
4953 		fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4954 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4955 		"fas_handle_data_done_end (ACTION_RESET)");
4956 		return (ACTION_RESET);
4957 	}
4958 }
4959 #endif /* FASDEBUG */
4960 
4961 	/*
4962 	 * If this was a synchronous transfer, flag it.
4963 	 * Also check for the errata condition of long
4964 	 * last REQ/ pulse for some synchronous targets
4965 	 */
4966 	if (fas->f_offset[tgt]) {
4967 		/*
4968 		 * flag that a synchronous data xfer took place
4969 		 */
4970 		sp->cmd_pkt->pkt_statistics |= STAT_SYNC;
4971 
4972 		if (was_sending)
4973 			fas_reg_cmd_write(fas, CMD_FLUSH);
4974 	} else {
4975 		/*
4976 		 * If we aren't doing Synchronous Data Transfers,
4977 		 * definitely offload the fifo.
4978 		 */
4979 		fas_reg_cmd_write(fas, CMD_FLUSH);
4980 	}
4981 
4982 	/*
4983 	 * adjust pointers...
4984 	 */
4985 	DPRINTF3("before:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4986 	    sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4987 	sp->cmd_data_count += xfer_amt;
4988 	sp->cmd_cur_addr += xfer_amt;
4989 	sp->cmd_pkt->pkt_state |= STATE_XFERRED_DATA;
4990 	New_state(fas, ACTS_UNKNOWN);
4991 	DPRINTF3("after:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4992 	    sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4993 
4994 	stat &= FAS_PHASE_MASK;
4995 	if (stat == FAS_PHASE_DATA_IN || stat == FAS_PHASE_DATA_OUT) {
4996 		fas->f_state = ACTS_DATA;
4997 		TRACE_0(TR_FAC_SCSI_FAS,
4998 		    TR_FAS_HANDLE_DATA_DONE_ACTION1_END,
4999 		    "fas_handle_data_done_end (action1)");
5000 		return (fas_handle_data_start(fas));
5001 	}
5002 
5003 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_ACTION2_END,
5004 	    "fas_handle_data_done_end (action2)");
5005 	return (fas_handle_unknown(fas));
5006 }
5007 
5008 static char msginperr[] = "SCSI bus MESSAGE IN phase parity error";
5009 
5010 static int
5011 fas_handle_c_cmplt(struct fas *fas)
5012 {
5013 	struct fas_cmd *sp = fas->f_current_sp;
5014 	volatile struct fasreg *fasreg = fas->f_reg;
5015 	uchar_t sts, msg, intr, perr;
5016 
5017 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_START,
5018 	    "fas_handle_c_cmplt_start");
5019 	EPRINTF("fas_handle_c_cmplt:\n");
5020 
5021 
5022 	/*
5023 	 * if target is fast, we can get cmd. completion by the time we get
5024 	 * here. Otherwise, we'll have to taken an interrupt.
5025 	 */
5026 	if (fas->f_laststate == ACTS_UNKNOWN) {
5027 		if (INTPENDING(fas)) {
5028 			fas->f_stat = fas_reg_read(fas,
5029 				(uchar_t *)&fasreg->fas_stat);
5030 			intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
5031 			fas->f_intr = intr;
5032 			if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5033 				return (fas_illegal_cmd_or_bus_reset(fas));
5034 			}
5035 		} else {
5036 			/*
5037 			 * change f_laststate for the next time around
5038 			 */
5039 			fas->f_laststate = ACTS_C_CMPLT;
5040 			TRACE_0(TR_FAC_SCSI_FAS,
5041 			    TR_FAS_HANDLE_C_CMPLT_RETURN1_END,
5042 			    "fas_handle_c_cmplt_end (ACTION_RETURN1)");
5043 			return (ACTION_RETURN);
5044 		}
5045 	} else {
5046 		intr = fas->f_intr;
5047 	}
5048 
5049 #ifdef	FAS_TEST
5050 	if (fas_ptest_status & (1<<Tgt(sp))) {
5051 		fas_ptest_status = 0;
5052 		fas->f_stat |= FAS_STAT_PERR;
5053 		if (fas_test_stop > 1) {
5054 			debug_enter("ptest_status");
5055 		}
5056 	} else if ((fas_ptest_msgin & (1<<Tgt(sp))) && fas_ptest_msg == 0) {
5057 		fas_ptest_msgin = 0;
5058 		fas_ptest_msg = -1;
5059 		fas->f_stat |= FAS_STAT_PERR;
5060 		if (fas_test_stop > 1) {
5061 			debug_enter("ptest_completion");
5062 		}
5063 	}
5064 #endif	/* FAS_TEST */
5065 
5066 	if (intr == FAS_INT_DISCON) {
5067 		New_state(fas, ACTS_UNKNOWN);
5068 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION1_END,
5069 		    "fas_handle_c_cmplt_end (action1)");
5070 		return (fas_handle_unknown(fas));
5071 	}
5072 
5073 	if ((perr = (fas->f_stat & FAS_STAT_PERR)) != 0) {
5074 		fas_assert_atn(fas);
5075 		sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5076 	}
5077 
5078 	/*
5079 	 * do a msg accept now and read the fifo data
5080 	 */
5081 	if (intr & FAS_INT_FCMP) {
5082 		/*
5083 		 * The FAS manuals state that this sequence completes
5084 		 * with a BUS SERVICE interrupt if just the status
5085 		 * byte was received, else a FUNCTION COMPLETE interrupt
5086 		 * if both status and a message was received.
5087 		 *
5088 		 * if we give the MSG_ACT before reading the msg byte
5089 		 * we get the status byte again and if the status is zero
5090 		 * then we won't detect a failure
5091 		 */
5092 		*(sp->cmd_pkt->pkt_scbp) =
5093 		    sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5094 		fas->f_last_msgin = fas->f_imsgarea[0] =
5095 		    msg = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5096 
5097 		fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5098 		sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5099 
5100 		/*
5101 		 * The manuals also state that ATN* is asserted if
5102 		 * bad parity is detected.
5103 		 *
5104 		 * The one case that we cannot handle is where we detect
5105 		 * bad parity for the status byte, but the target refuses
5106 		 * to go to MESSAGE OUT phase right away. This means that
5107 		 * if that happens, we will misconstrue the parity error
5108 		 * to be for the completion message, not the status byte.
5109 		 */
5110 		if (perr) {
5111 			fas_log(fas, CE_WARN, msginperr);
5112 			sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5113 
5114 			fas->f_cur_msgout[0] = MSG_MSG_PARITY;
5115 			fas->f_omsglen = 1;
5116 			New_state(fas, ACTS_UNKNOWN);
5117 			TRACE_0(TR_FAC_SCSI_FAS,
5118 				TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5119 				"fas_handle_c_cmplt_end (action5)");
5120 			return (ACTION_RETURN);
5121 		}
5122 
5123 	} else if (intr == FAS_INT_BUS) {
5124 		/*
5125 		 * We only got the status byte.
5126 		 */
5127 		sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5128 		sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5129 		*(sp->cmd_pkt->pkt_scbp) = sts;
5130 		msg = INVALID_MSG;
5131 
5132 		IPRINTF1("fas_handle_cmd_cmplt: sts=%x, no msg byte\n", sts);
5133 
5134 		if (perr) {
5135 			/*
5136 			 * If we get a parity error on a status byte
5137 			 * assume that it was a CHECK CONDITION
5138 			 */
5139 			sts = STATUS_CHECK;
5140 			fas_log(fas, CE_WARN,
5141 			    "SCSI bus STATUS phase parity error");
5142 			fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
5143 			fas->f_omsglen = 1;
5144 			New_state(fas, ACTS_UNKNOWN);
5145 			TRACE_0(TR_FAC_SCSI_FAS,
5146 				TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5147 				"fas_handle_c_cmplt_end (action5)");
5148 			return (fas_handle_unknown(fas));
5149 		}
5150 
5151 	} else {
5152 		msg = sts = INVALID_MSG;
5153 		IPRINTF("fas_handle_cmd_cmplt: unexpected intr\n");
5154 		New_state(fas, ACTS_UNKNOWN);
5155 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION2_END,
5156 		    "fas_handle_c_cmplt_end (action2)");
5157 		return (fas_handle_unknown(fas));
5158 	}
5159 
5160 	EPRINTF2("fas_handle_c_cmplt: status=%x, msg=%x\n", sts, msg);
5161 
5162 	EPRINTF1("Completion Message=%s\n", scsi_mname(msg));
5163 	if (msg == MSG_COMMAND_COMPLETE) {
5164 		/*
5165 		 * Actually, if the message was a 'linked command
5166 		 * complete' message, the target isn't going to be
5167 		 * clearing the bus.
5168 		 */
5169 		New_state(fas, ACTS_CLEARING);
5170 		TRACE_0(TR_FAC_SCSI_FAS,
5171 		    TR_FAS_HANDLE_C_CMPLT_ACTION4_END,
5172 		    "fas_handle_c_cmplt_end (action4)");
5173 		return (fas_handle_clearing(fas));
5174 	} else {
5175 		fas->f_imsglen = 1;
5176 		fas->f_imsgindex = 1;
5177 		New_state(fas, ACTS_MSG_IN_DONE);
5178 		TRACE_0(TR_FAC_SCSI_FAS,
5179 		    TR_FAS_HANDLE_C_CMPLT_ACTION3_END,
5180 		    "fas_handle_c_cmplt_end (action3)");
5181 		return (fas_handle_msg_in_done(fas));
5182 	}
5183 }
5184 
5185 /*
5186  * prepare for accepting a message byte from the fifo
5187  */
5188 static int
5189 fas_handle_msg_in_start(struct fas *fas)
5190 {
5191 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_START,
5192 	    "fas_handle_msg_in_start");
5193 	EPRINTF("fas_handle_msg_in_start\n");
5194 
5195 	/*
5196 	 * Pick up a message byte.
5197 	 * Clear the FIFO so we
5198 	 * don't get confused.
5199 	 */
5200 	if (!FIFO_EMPTY(fas)) {
5201 		fas_reg_cmd_write(fas, CMD_FLUSH);
5202 	}
5203 	fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5204 	fas->f_imsglen = 1;
5205 	fas->f_imsgindex = 0;
5206 	New_state(fas, ACTS_MSG_IN_DONE);
5207 
5208 	/*
5209 	 * give a little extra time by returning to phasemanage
5210 	 */
5211 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_END,
5212 	    "fas_handle_msg_in_end (ACTION_PHASEMANAGE)");
5213 	return (ACTION_PHASEMANAGE);
5214 }
5215 
5216 /*
5217  * We come here after issuing a MSG_ACCEPT
5218  * command and are expecting more message bytes.
5219  * The FAS should be asserting a BUS SERVICE
5220  * interrupt status, but may have asserted
5221  * a different interrupt in the case that
5222  * the target disconnected and dropped BSY*.
5223  *
5224  * In the case that we are eating up message
5225  * bytes (and throwing them away unread) because
5226  * we have ATN* asserted (we are trying to send
5227  * a message), we do not consider it an error
5228  * if the phase has changed out of MESSAGE IN.
5229  */
5230 static int
5231 fas_handle_more_msgin(struct fas *fas)
5232 {
5233 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_START,
5234 	    "fas_handle_more_msgin_start");
5235 	EPRINTF("fas_handle_more_msgin\n");
5236 
5237 	if (fas->f_intr & FAS_INT_BUS) {
5238 		if ((fas->f_stat & FAS_PHASE_MASK) == FAS_PHASE_MSG_IN) {
5239 			/*
5240 			 * Fetch another byte of a message in.
5241 			 */
5242 			fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5243 			New_state(fas, ACTS_MSG_IN_DONE);
5244 			TRACE_0(TR_FAC_SCSI_FAS,
5245 			    TR_FAS_HANDLE_MORE_MSGIN_RETURN1_END,
5246 			    "fas_handle_more_msgin_end (ACTION_RETURN)");
5247 			return (ACTION_RETURN);
5248 		}
5249 
5250 		/*
5251 		 * If we were gobbling up a message and we have
5252 		 * changed phases, handle this silently, else
5253 		 * complain. In either case, we return to let
5254 		 * fas_phasemanage() handle things.
5255 		 *
5256 		 * If it wasn't a BUS SERVICE interrupt,
5257 		 * let fas_phasemanage() find out if the
5258 		 * chip disconnected.
5259 		 */
5260 		if (fas->f_imsglen != 0) {
5261 			fas_log(fas, CE_WARN,
5262 			    "Premature end of extended message");
5263 		}
5264 	}
5265 	New_state(fas, ACTS_UNKNOWN);
5266 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_RETURN2_END,
5267 	    "fas_handle_more_msgin_end (action)");
5268 	return (fas_handle_unknown(fas));
5269 }
5270 
5271 static int
5272 fas_handle_msg_in_done(struct fas *fas)
5273 {
5274 	struct fas_cmd *sp = fas->f_current_sp;
5275 	volatile struct fasreg *fasreg = fas->f_reg;
5276 	int sndmsg = 0;
5277 	uchar_t msgin;
5278 
5279 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_START,
5280 	    "fas_handle_msg_in_done_start");
5281 	EPRINTF("fas_handle_msg_in_done:\n");
5282 	if (fas->f_laststate == ACTS_MSG_IN) {
5283 		if (INTPENDING(fas)) {
5284 			fas->f_stat = fas_reg_read(fas,
5285 				(uchar_t *)&fasreg->fas_stat);
5286 			fas->f_stat2 = fas_reg_read(fas,
5287 				(uchar_t *)&fasreg->fas_stat2);
5288 
5289 			fas_read_fifo(fas);
5290 
5291 			fas->f_intr = fas_reg_read(fas,
5292 				(uchar_t *)&fasreg->fas_intr);
5293 			if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5294 				return (fas_illegal_cmd_or_bus_reset(fas));
5295 			}
5296 		} else {
5297 			/*
5298 			 * change f_laststate for the next time around
5299 			 */
5300 			fas->f_laststate = ACTS_MSG_IN_DONE;
5301 			TRACE_0(TR_FAC_SCSI_FAS,
5302 			    TR_FAS_HANDLE_MSG_IN_DONE_RETURN1_END,
5303 			    "fas_handle_msg_in_done_end (ACTION_RETURN1)");
5304 			return (ACTION_RETURN);
5305 		}
5306 	}
5307 
5308 	/*
5309 	 * the most common case is a disconnect message. we do
5310 	 * a fast path for this condition and if it fails then
5311 	 * we go for the detailed error handling
5312 	 */
5313 #ifndef  FAS_TEST
5314 	if (((fas->f_laststate == ACTS_MSG_IN) ||
5315 	    (fas->f_laststate == ACTS_MSG_IN_DONE)) &&
5316 	    ((fas->f_intr & FAS_INT_DISCON) == 0) &&
5317 	    ((fas->f_stat & FAS_STAT_PERR) == 0) &&
5318 	    ((sp->cmd_pkt_flags & FLAG_NODISCON) == 0)) {
5319 
5320 		if ((fas->f_fifolen == 1) &&
5321 		    (fas->f_imsglen == 1) &&
5322 		    (fas->f_fifo[0] == MSG_DISCONNECT)) {
5323 
5324 			fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5325 			fas->f_imsgarea[fas->f_imsgindex++] = fas->f_fifo[0];
5326 			fas->f_last_msgin = MSG_DISCONNECT;
5327 			New_state(fas, ACTS_CLEARING);
5328 
5329 			TRACE_0(TR_FAC_SCSI_FAS,
5330 			    TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5331 			    "fas_handle_msg_in_done_end (action)");
5332 
5333 			return (fas_handle_clearing(fas));
5334 		}
5335 	}
5336 #endif	/* not FAS_TEST */
5337 
5338 	/*
5339 	 * We can be called here for both the case where
5340 	 * we had requested the FAS chip to fetch a message
5341 	 * byte from the target (at the target's request).
5342 	 * We can also be called in the case where we had
5343 	 * been using the CMD_COMP_SEQ command to pick up
5344 	 * both a status byte and a completion message from
5345 	 * a target, but where the message wasn't one of
5346 	 * COMMAND COMPLETE, LINKED COMMAND COMPLETE, or
5347 	 * LINKED COMMAND COMPLETE (with flag). This is a
5348 	 * legal (albeit extremely unusual) SCSI bus trans-
5349 	 * -ition, so we have to handle it.
5350 	 */
5351 	if (fas->f_laststate != ACTS_C_CMPLT) {
5352 #ifdef	FAS_TEST
5353 reloop:
5354 #endif	/* FAS_TEST */
5355 
5356 		if (fas->f_intr & FAS_INT_DISCON) {
5357 			fas_log(fas, CE_WARN,
5358 			    "premature end of input message");
5359 			New_state(fas, ACTS_UNKNOWN);
5360 			TRACE_0(TR_FAC_SCSI_FAS,
5361 			    TR_FAS_HANDLE_MSG_IN_DONE_PHASEMANAGE_END,
5362 			    "fas_handle_msg_in_done_end (ACTION_PHASEMANAGE)");
5363 			return (ACTION_PHASEMANAGE);
5364 		}
5365 
5366 		/*
5367 		 * Note that if f_imsglen is zero, then we are skipping
5368 		 * input message bytes, so there is no reason to look for
5369 		 * parity errors.
5370 		 */
5371 		if (fas->f_imsglen != 0 && (fas->f_stat & FAS_STAT_PERR)) {
5372 			fas_log(fas, CE_WARN, msginperr);
5373 			sndmsg = MSG_MSG_PARITY;
5374 			sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5375 			fas_reg_cmd_write(fas, CMD_FLUSH);
5376 
5377 		} else if ((msgin = fas->f_fifolen) != 1) {
5378 
5379 			/*
5380 			 * If we have got more than one or 0 bytes in the fifo,
5381 			 * that is a gross screwup, and we should let the
5382 			 * target know that we have completely fouled up.
5383 			 */
5384 			fas_printf(fas, "fifocount=%x", msgin);
5385 			fas_printstate(fas, "input message botch");
5386 			sndmsg = MSG_INITIATOR_ERROR;
5387 			fas_reg_cmd_write(fas, CMD_FLUSH);
5388 			fas_log(fas, CE_WARN, "input message botch");
5389 
5390 		} else if (fas->f_imsglen == 0) {
5391 			/*
5392 			 * If we are in the middle of gobbling up and throwing
5393 			 * away a message (due to a previous message input
5394 			 * error), drive on.
5395 			 */
5396 			msgin = fas_reg_read(fas,
5397 				(uchar_t *)&fasreg->fas_fifo_data);
5398 			New_state(fas, ACTS_MSG_IN_MORE);
5399 
5400 		} else {
5401 			msgin = fas->f_fifo[0];
5402 			fas->f_imsgarea[fas->f_imsgindex++] = msgin;
5403 		}
5404 
5405 	} else {
5406 		/*
5407 		 * In this case, we have been called (from
5408 		 * fas_handle_c_cmplt()) with the message
5409 		 * already stored in the message array.
5410 		 */
5411 		msgin = fas->f_imsgarea[0];
5412 	}
5413 
5414 	/*
5415 	 * Process this message byte (but not if we are
5416 	 * going to be trying to send back some error
5417 	 * anyway)
5418 	 */
5419 	if (sndmsg == 0 && fas->f_imsglen != 0) {
5420 
5421 		if (fas->f_imsgindex < fas->f_imsglen) {
5422 
5423 			EPRINTF2("message byte %d: 0x%x\n",
5424 			    fas->f_imsgindex-1,
5425 			    fas->f_imsgarea[fas->f_imsgindex-1]);
5426 
5427 			New_state(fas, ACTS_MSG_IN_MORE);
5428 
5429 		} else if (fas->f_imsglen == 1) {
5430 
5431 #ifdef	FAS_TEST
5432 			if ((fas_ptest_msgin & (1<<Tgt(sp))) &&
5433 			    fas_ptest_msg == msgin) {
5434 				fas_ptest_msgin = 0;
5435 				fas_ptest_msg = -1;
5436 				fas_assert_atn(fas);
5437 				fas->f_stat |= FAS_STAT_PERR;
5438 				fas->f_imsgindex -= 1;
5439 				if (fas_test_stop > 1) {
5440 					debug_enter("ptest msgin");
5441 				}
5442 				goto reloop;
5443 			}
5444 #endif	/* FAS_TEST */
5445 
5446 			sndmsg = fas_onebyte_msg(fas);
5447 
5448 		} else if (fas->f_imsglen == 2) {
5449 #ifdef	FAS_TEST
5450 			if (fas_ptest_emsgin & (1<<Tgt(sp))) {
5451 				fas_ptest_emsgin = 0;
5452 				fas_assert_atn(fas);
5453 				fas->f_stat |= FAS_STAT_PERR;
5454 				fas->f_imsgindex -= 1;
5455 				if (fas_test_stop > 1) {
5456 					debug_enter("ptest emsgin");
5457 				}
5458 				goto reloop;
5459 			}
5460 #endif	/* FAS_TEST */
5461 
5462 			if (fas->f_imsgarea[0] ==  MSG_EXTENDED) {
5463 				static char *tool =
5464 				    "Extended message 0x%x is too long";
5465 
5466 				/*
5467 				 * Is the incoming message too long
5468 				 * to be stored in our local array?
5469 				 */
5470 				if ((int)(msgin+2) > IMSGSIZE) {
5471 					fas_log(fas, CE_WARN,
5472 					    tool, fas->f_imsgarea[0]);
5473 					sndmsg = MSG_REJECT;
5474 				} else {
5475 					fas->f_imsglen = msgin + 2;
5476 					New_state(fas, ACTS_MSG_IN_MORE);
5477 				}
5478 			} else {
5479 				sndmsg = fas_twobyte_msg(fas);
5480 			}
5481 
5482 		} else {
5483 			sndmsg = fas_multibyte_msg(fas);
5484 		}
5485 	}
5486 
5487 	if (sndmsg < 0) {
5488 		/*
5489 		 * If sndmsg is less than zero, one of the subsidiary
5490 		 * routines needs to return some other state than
5491 		 * ACTION_RETURN.
5492 		 */
5493 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_SNDMSG_END,
5494 		    "fas_handle_msg_in_done_end (-sndmsg)");
5495 		return (-sndmsg);
5496 
5497 	} else if (sndmsg > 0) {
5498 		if (IS_1BYTE_MSG(sndmsg)) {
5499 			fas->f_omsglen = 1;
5500 		}
5501 		fas->f_cur_msgout[0] = (uchar_t)sndmsg;
5502 
5503 		/*
5504 		 * The target is not guaranteed to go to message out
5505 		 * phase, period. Moreover, until the entire incoming
5506 		 * message is transferred, the target may (and likely
5507 		 * will) continue to transfer message bytes (which
5508 		 * we will have to ignore).
5509 		 *
5510 		 * In order to do this, we'll go to 'infinite'
5511 		 * message in handling by setting the current input
5512 		 * message length to a sentinel of zero.
5513 		 *
5514 		 * This works regardless of the message we are trying
5515 		 * to send out. At the point in time which we want
5516 		 * to send a message in response to an incoming message
5517 		 * we do not care any more about the incoming message.
5518 		 *
5519 		 * If we are sending a message in response to detecting
5520 		 * a parity error on input, the FAS chip has already
5521 		 * set ATN* for us, but it doesn't hurt to set it here
5522 		 * again anyhow.
5523 		 */
5524 		fas_assert_atn(fas);
5525 		New_state(fas, ACTS_MSG_IN_MORE);
5526 		fas->f_imsglen = 0;
5527 	}
5528 
5529 	fas_reg_cmd_write(fas, CMD_FLUSH);
5530 
5531 	fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5532 
5533 	if ((fas->f_laststate == ACTS_MSG_IN_DONE) &&
5534 	    (fas->f_state == ACTS_CLEARING)) {
5535 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5536 		    "fas_handle_msg_in_done_end (action)");
5537 		return (fas_handle_clearing(fas));
5538 	}
5539 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_RETURN2_END,
5540 	    "fas_handle_msg_in_done_end (ACTION_RETURN2)");
5541 	return (ACTION_RETURN);
5542 }
5543 
5544 static int
5545 fas_onebyte_msg(struct fas *fas)
5546 {
5547 	struct fas_cmd *sp = fas->f_current_sp;
5548 	int msgout = 0;
5549 	uchar_t msgin = fas->f_last_msgin = fas->f_imsgarea[0];
5550 	int tgt = Tgt(sp);
5551 
5552 	EPRINTF("fas_onebyte_msg\n");
5553 
5554 	if (msgin & MSG_IDENTIFY) {
5555 		/*
5556 		 * How did we get here? We should only see identify
5557 		 * messages on a reconnection, but we'll handle this
5558 		 * fine here (just in case we get this) as long as
5559 		 * we believe that this is a valid identify message.
5560 		 *
5561 		 * For this to be a valid incoming message,
5562 		 * bits 6-4 must must be zero. Also, the
5563 		 * bit that says that I'm an initiator and
5564 		 * can support disconnection cannot possibly
5565 		 * be set here.
5566 		 */
5567 
5568 		char garbled = ((msgin & (BAD_IDENTIFY|INI_CAN_DISCON)) != 0);
5569 
5570 		fas_log(fas, CE_WARN, "%s message 0x%x from Target %d",
5571 		    garbled ? "Garbled" : "Identify", msgin, tgt);
5572 
5573 		if (garbled) {
5574 			/*
5575 			 * If it's a garbled message,
5576 			 * try and tell the target...
5577 			 */
5578 			msgout = MSG_INITIATOR_ERROR;
5579 		} else {
5580 			New_state(fas, ACTS_UNKNOWN);
5581 		}
5582 		return (msgout);
5583 
5584 	} else if (IS_2BYTE_MSG(msgin) || IS_EXTENDED_MSG(msgin)) {
5585 		fas->f_imsglen = 2;
5586 		New_state(fas, ACTS_MSG_IN_MORE);
5587 		return (0);
5588 	}
5589 
5590 	New_state(fas, ACTS_UNKNOWN);
5591 
5592 	switch (msgin) {
5593 	case MSG_DISCONNECT:
5594 		/*
5595 		 * If we 'cannot' disconnect- reject this message.
5596 		 * Note that we only key off of the pkt_flags here-
5597 		 * the FLAG_NODISCON was set in fas_accept_pkt() if
5598 		 * no disconnect was enabled in scsi_options
5599 		 */
5600 		if (sp->cmd_pkt_flags & FLAG_NODISCON) {
5601 			msgout = MSG_REJECT;
5602 			break;
5603 		}
5604 		/* FALLTHROUGH */
5605 	case MSG_COMMAND_COMPLETE:
5606 		fas->f_state = ACTS_CLEARING;
5607 		break;
5608 
5609 	case MSG_NOP:
5610 		break;
5611 
5612 	/* XXX Make it a MSG_REJECT handler */
5613 	case MSG_REJECT:
5614 	{
5615 		uchar_t reason = 0;
5616 		uchar_t lastmsg = fas->f_last_msgout;
5617 		/*
5618 		 * The target is rejecting the last message we sent.
5619 		 *
5620 		 * If the last message we attempted to send out was an
5621 		 * extended message, we were trying to negotiate sync
5622 		 * xfers- and we're okay.
5623 		 *
5624 		 * Otherwise, a target has rejected a message that
5625 		 * it should have handled. We will abort the operation
5626 		 * in progress and set the pkt_reason value here to
5627 		 * show why we have completed. The process of aborting
5628 		 * may be via a message or may be via a bus reset (as
5629 		 * a last resort).
5630 		 */
5631 		msgout = (TAGGED(tgt)? MSG_ABORT_TAG : MSG_ABORT);
5632 
5633 		switch (lastmsg) {
5634 		case MSG_EXTENDED:
5635 			if (fas->f_wdtr_sent) {
5636 				/*
5637 				 * Disable wide, Target rejected
5638 				 * out WDTR message
5639 				 */
5640 				fas_set_wide_conf3(fas, tgt, 0);
5641 				fas->f_nowide |= (1<<tgt);
5642 				fas->f_wdtr_sent = 0;
5643 				/*
5644 				 * we still want to negotiate sync
5645 				 */
5646 				if ((fas->f_nosync & (1<<tgt)) == 0) {
5647 					fas_assert_atn(fas);
5648 					fas_make_sdtr(fas, 0, tgt);
5649 				}
5650 			} else if (fas->f_sdtr_sent) {
5651 				fas_reg_cmd_write(fas, CMD_CLR_ATN);
5652 				fas_revert_to_async(fas, tgt);
5653 				fas->f_nosync |= (1<<tgt);
5654 				fas->f_sdtr_sent = 0;
5655 			}
5656 			msgout = 0;
5657 			break;
5658 		case MSG_NOP:
5659 			reason = CMD_NOP_FAIL;
5660 			break;
5661 		case MSG_INITIATOR_ERROR:
5662 			reason = CMD_IDE_FAIL;
5663 			break;
5664 		case MSG_MSG_PARITY:
5665 			reason = CMD_PER_FAIL;
5666 			break;
5667 		case MSG_REJECT:
5668 			reason = CMD_REJECT_FAIL;
5669 			break;
5670 		/* XXX - abort not good, queue full handling or drain (?) */
5671 		case MSG_SIMPLE_QTAG:
5672 		case MSG_ORDERED_QTAG:
5673 		case MSG_HEAD_QTAG:
5674 			msgout = MSG_ABORT;
5675 			reason = CMD_TAG_REJECT;
5676 			break;
5677 		case MSG_DEVICE_RESET:
5678 			reason = CMD_BDR_FAIL;
5679 			msgout = -ACTION_ABORT_CURCMD;
5680 			break;
5681 		case MSG_ABORT:
5682 		case MSG_ABORT_TAG:
5683 			/*
5684 			 * If an RESET/ABORT OPERATION message is rejected
5685 			 * it is time to yank the chain on the bus...
5686 			 */
5687 			reason = CMD_ABORT_FAIL;
5688 			msgout = -ACTION_ABORT_CURCMD;
5689 			break;
5690 		default:
5691 			if (IS_IDENTIFY_MSG(lastmsg)) {
5692 				if (TAGGED(tgt)) {
5693 					/*
5694 					 * this often happens when the
5695 					 * target rejected our tag
5696 					 */
5697 					reason = CMD_TAG_REJECT;
5698 				} else {
5699 					reason = CMD_ID_FAIL;
5700 				}
5701 			} else {
5702 				reason = CMD_TRAN_ERR;
5703 				msgout = -ACTION_ABORT_CURCMD;
5704 			}
5705 
5706 			break;
5707 		}
5708 
5709 		if (msgout) {
5710 			fas_log(fas, CE_WARN,
5711 			    "Target %d rejects our message '%s'",
5712 			    tgt, scsi_mname(lastmsg));
5713 			fas_set_pkt_reason(fas, sp, reason, 0);
5714 		}
5715 
5716 		break;
5717 	}
5718 	case MSG_RESTORE_PTRS:
5719 		sp->cmd_cdbp = sp->cmd_pkt->pkt_cdbp;
5720 		if (sp->cmd_data_count != sp->cmd_saved_data_count) {
5721 			if (fas_restore_pointers(fas, sp)) {
5722 				msgout = -ACTION_ABORT_CURCMD;
5723 			} else if ((sp->cmd_pkt->pkt_reason & CMD_TRAN_ERR) &&
5724 				(sp->cmd_pkt->pkt_statistics & STAT_PERR) &&
5725 				(sp->cmd_cur_win == 0) &&
5726 				(sp->cmd_data_count == 0)) {
5727 				sp->cmd_pkt->pkt_reason &= ~CMD_TRAN_ERR;
5728 			}
5729 		}
5730 		break;
5731 
5732 	case MSG_SAVE_DATA_PTR:
5733 		sp->cmd_saved_data_count = sp->cmd_data_count;
5734 		sp->cmd_saved_win = sp->cmd_cur_win;
5735 		sp->cmd_saved_cur_addr = sp->cmd_cur_addr;
5736 		break;
5737 
5738 	/* These don't make sense for us, and	*/
5739 	/* will be rejected			*/
5740 	/*	case MSG_INITIATOR_ERROR	*/
5741 	/*	case MSG_ABORT			*/
5742 	/*	case MSG_MSG_PARITY		*/
5743 	/*	case MSG_DEVICE_RESET		*/
5744 	default:
5745 		msgout = MSG_REJECT;
5746 		fas_log(fas, CE_WARN,
5747 		    "Rejecting message '%s' from Target %d",
5748 		    scsi_mname(msgin), tgt);
5749 		break;
5750 	}
5751 
5752 	EPRINTF1("Message in: %s\n", scsi_mname(msgin));
5753 
5754 	return (msgout);
5755 }
5756 
5757 /*
5758  * phase handlers that are rarely used
5759  */
5760 static int
5761 fas_handle_cmd_start(struct fas *fas)
5762 {
5763 	struct fas_cmd *sp = fas->f_current_sp;
5764 	volatile uchar_t *tp = fas->f_cmdarea;
5765 	int i;
5766 	int amt = sp->cmd_cdblen;
5767 
5768 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_START,
5769 	    "fas_handle_cmd_start_start");
5770 	EPRINTF("fas_handle_cmd: send cmd\n");
5771 
5772 	for (i = 0; i < amt; i++) {
5773 		*tp++ = sp->cmd_cdbp[i];
5774 	}
5775 	fas_reg_cmd_write(fas, CMD_FLUSH);
5776 
5777 	FAS_DMA_READ(fas, amt, fas->f_dmacookie.dmac_address, amt,
5778 	    CMD_TRAN_INFO|CMD_DMA);
5779 	fas->f_lastcount = amt;
5780 
5781 	New_state(fas, ACTS_CMD_DONE);
5782 
5783 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_END,
5784 	    "fas_handle_cmd_start_end");
5785 	return (ACTION_RETURN);
5786 }
5787 
5788 static int
5789 fas_handle_cmd_done(struct fas *fas)
5790 {
5791 	struct fas_cmd *sp = fas->f_current_sp;
5792 	uchar_t intr = fas->f_intr;
5793 	volatile struct dma *dmar = fas->f_dma;
5794 
5795 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_START,
5796 	    "fas_handle_cmd_done_start");
5797 	EPRINTF("fas_handle_cmd_done\n");
5798 
5799 	/*
5800 	 * We should have gotten a BUS SERVICE interrupt.
5801 	 * If it isn't that, and it isn't a DISCONNECT
5802 	 * interrupt, we have a "cannot happen" situation.
5803 	 */
5804 	if ((intr & FAS_INT_BUS) == 0) {
5805 		if ((intr & FAS_INT_DISCON) == 0) {
5806 			fas_printstate(fas, "cmd transmission error");
5807 			TRACE_0(TR_FAC_SCSI_FAS,
5808 			    TR_FAS_HANDLE_CMD_DONE_ABORT1_END,
5809 			    "fas_handle_cmd_done_end (abort1)");
5810 			return (ACTION_ABORT_CURCMD);
5811 		}
5812 	} else {
5813 		sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
5814 	}
5815 
5816 	fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr);
5817 	FAS_FLUSH_DMA(fas);
5818 
5819 	New_state(fas, ACTS_UNKNOWN);
5820 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_END,
5821 	    "fas_handle_cmd_done_end");
5822 	return (fas_handle_unknown(fas));
5823 }
5824 
5825 /*
5826  * Begin to send a message out
5827  */
5828 static int
5829 fas_handle_msg_out_start(struct fas *fas)
5830 {
5831 	struct fas_cmd *sp = fas->f_current_sp;
5832 	uchar_t *msgout = fas->f_cur_msgout;
5833 	uchar_t amt = fas->f_omsglen;
5834 
5835 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_START,
5836 	    "fas_handle_msg_out_start");
5837 	EPRINTF("fas_handle_msg_out_start\n");
5838 
5839 	/*
5840 	 * Check to make *sure* that we are really
5841 	 * in MESSAGE OUT phase. If the last state
5842 	 * was ACTS_MSG_OUT_DONE, then we are trying
5843 	 * to resend a message that the target stated
5844 	 * had a parity error in it.
5845 	 *
5846 	 * If this is the case, and mark completion reason as CMD_NOMSGOUT.
5847 	 * XXX: Right now, we just *drive* on. Should we abort the command?
5848 	 */
5849 	if ((fas->f_stat & FAS_PHASE_MASK) != FAS_PHASE_MSG_OUT &&
5850 	    fas->f_laststate == ACTS_MSG_OUT_DONE) {
5851 		fas_log(fas, CE_WARN,
5852 		    "Target %d refused message resend", Tgt(sp));
5853 		fas_set_pkt_reason(fas, sp, CMD_NOMSGOUT, 0);
5854 		New_state(fas, ACTS_UNKNOWN);
5855 		TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_PHASEMANAGE_END,
5856 		    "fas_handle_msg_out_end (ACTION_PHASEMANAGE)");
5857 		return (ACTION_PHASEMANAGE);
5858 	}
5859 
5860 	/*
5861 	 * Clean the fifo.
5862 	 */
5863 	fas_reg_cmd_write(fas, CMD_FLUSH);
5864 
5865 	if (amt == 0) {
5866 		/*
5867 		 * no msg to send
5868 		 */
5869 		*msgout = MSG_NOP;
5870 		amt = fas->f_omsglen = 1;
5871 	}
5872 
5873 	/*
5874 	 * If msg only 1 byte, just dump it in the fifo and go.  For
5875 	 * multi-byte msgs, dma them to save time.  If we have no
5876 	 * msg to send and we're in msg out phase, send a NOP.
5877 	 */
5878 	fas->f_last_msgout = *msgout;
5879 
5880 	/*
5881 	 * There is a bug in the fas366 that occasionaly
5882 	 * deasserts the ATN signal prematurely when we send
5883 	 * the sync/wide negotiation bytes out using DMA. The
5884 	 * workaround here is to send the negotiation bytes out
5885 	 * using PIO
5886 	 */
5887 	fas_write_fifo(fas, msgout, fas->f_omsglen, 1);
5888 	fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5889 
5890 	EPRINTF2("amt=%x, last_msgout=%x\n", amt, fas->f_last_msgout);
5891 
5892 	New_state(fas, ACTS_MSG_OUT_DONE);
5893 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_END,
5894 	    "fas_handle_msg_out_end");
5895 	return (ACTION_RETURN);
5896 }
5897 
5898 static int
5899 fas_handle_msg_out_done(struct fas *fas)
5900 {
5901 	struct fas_cmd *sp = fas->f_current_sp;
5902 	uchar_t msgout, phase;
5903 	int target = Tgt(sp);
5904 	int	amt = fas->f_omsglen;
5905 	int action;
5906 
5907 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_START,
5908 	    "fas_handle_msg_out_done_start");
5909 	msgout = fas->f_cur_msgout[0];
5910 	if ((msgout == MSG_HEAD_QTAG) || (msgout == MSG_SIMPLE_QTAG)) {
5911 		msgout = fas->f_cur_msgout[2];
5912 	}
5913 	EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
5914 		fas->f_cur_msgout[0], fas->f_cur_msgout[1],
5915 		fas->f_cur_msgout[2], fas->f_last_msgout);
5916 
5917 	EPRINTF1("fas_handle_msgout_done: msgout=%x\n", msgout);
5918 
5919 	/*
5920 	 * flush fifo, just in case some bytes were not sent
5921 	 */
5922 	fas_reg_cmd_write(fas, CMD_FLUSH);
5923 
5924 	/*
5925 	 * If the FAS disconnected, then the message we sent caused
5926 	 * the target to decide to drop BSY* and clear the bus.
5927 	 */
5928 	if (fas->f_intr == FAS_INT_DISCON) {
5929 		if (msgout == MSG_DEVICE_RESET || msgout == MSG_ABORT ||
5930 		    msgout == MSG_ABORT_TAG) {
5931 			/*
5932 			 * If we sent a device reset msg, then we need to do
5933 			 * a synch negotiate again unless we have already
5934 			 * inhibited synch.
5935 			 */
5936 			if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
5937 				fas->f_abort_msg_sent++;
5938 				if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5939 					fas_set_pkt_reason(fas, sp,
5940 					    CMD_ABORTED, STAT_ABORTED);
5941 				}
5942 			} else if (msgout == MSG_DEVICE_RESET) {
5943 				fas->f_reset_msg_sent++;
5944 				if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5945 					fas_set_pkt_reason(fas, sp,
5946 					    CMD_RESET, STAT_DEV_RESET);
5947 				}
5948 				fas_force_renegotiation(fas, Tgt(sp));
5949 			}
5950 			EPRINTF2("Successful %s message to target %d\n",
5951 			    scsi_mname(msgout), target);
5952 
5953 			if (sp->cmd_flags & CFLAG_CMDPROXY) {
5954 				sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
5955 			}
5956 			TRACE_0(TR_FAC_SCSI_FAS,
5957 			    TR_FAS_HANDLE_MSG_OUT_DONE_FINISH_END,
5958 			    "fas_handle_msg_out_done_end (ACTION_FINISH)");
5959 			return (ACTION_FINISH);
5960 		}
5961 		/*
5962 		 * If the target dropped busy on any other message, it
5963 		 * wasn't expected. We will let the code in fas_phasemanage()
5964 		 * handle this unexpected bus free event.
5965 		 */
5966 		goto out;
5967 	}
5968 
5969 	/*
5970 	 * What phase have we transitioned to?
5971 	 */
5972 	phase = fas->f_stat & FAS_PHASE_MASK;
5973 
5974 	/*
5975 	 * If we finish sending a message out, and we are
5976 	 * still in message out phase, then the target has
5977 	 * detected one or more parity errors in the message
5978 	 * we just sent and it is asking us to resend the
5979 	 * previous message.
5980 	 */
5981 	if ((fas->f_intr & FAS_INT_BUS) && phase == FAS_PHASE_MSG_OUT) {
5982 		/*
5983 		 * As per SCSI-2 specification, if the message to
5984 		 * be re-sent is greater than one byte, then we
5985 		 * have to set ATN*.
5986 		 */
5987 		if (amt > 1) {
5988 			fas_assert_atn(fas);
5989 		}
5990 		fas_log(fas, CE_WARN,
5991 		    "SCSI bus MESSAGE OUT phase parity error");
5992 		sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5993 		New_state(fas, ACTS_MSG_OUT);
5994 		TRACE_0(TR_FAC_SCSI_FAS,
5995 		    TR_FAS_HANDLE_MSG_OUT_DONE_PHASEMANAGE_END,
5996 		    "fas_handle_msg_out_done_end (ACTION_PHASEMANAGE)");
5997 		return (ACTION_PHASEMANAGE);
5998 	}
5999 
6000 
6001 out:
6002 	fas->f_last_msgout = msgout;
6003 	fas->f_omsglen = 0;
6004 	New_state(fas, ACTS_UNKNOWN);
6005 	action = fas_handle_unknown(fas);
6006 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_END,
6007 	    "fas_handle_msg_out_done_end");
6008 	return (action);
6009 }
6010 
6011 static int
6012 fas_twobyte_msg(struct fas *fas)
6013 {
6014 	struct fas_cmd *sp = fas->f_current_sp;
6015 
6016 	if ((fas->f_imsgarea[0] == MSG_IGNORE_WIDE_RESID) &&
6017 	    (fas->f_imsgarea[1] == 1)) {
6018 		int xfer_amt;
6019 
6020 		/*
6021 		 * Knock off one byte if there
6022 		 * is a last transfer and is even number of bytes
6023 		 */
6024 		xfer_amt = sp->cmd_data_count - sp->cmd_saved_data_count;
6025 		if (xfer_amt && (!(xfer_amt & 1))) {
6026 			ASSERT(sp->cmd_data_count > 0);
6027 			sp->cmd_data_count--;
6028 			sp->cmd_cur_addr--;
6029 		}
6030 		IPRINTF1("ignore wide resid %d\n", fas->f_imsgarea[1]);
6031 		New_state(fas, ACTS_UNKNOWN);
6032 		return (0);
6033 	}
6034 
6035 	fas_log(fas, CE_WARN,
6036 	    "Two byte message '%s' 0x%x rejected",
6037 	    scsi_mname(fas->f_imsgarea[0]), fas->f_imsgarea[1]);
6038 	return (MSG_REJECT);
6039 }
6040 
6041 /*
6042  * handle receiving extended messages
6043  */
6044 static int
6045 fas_multibyte_msg(struct fas *fas)
6046 {
6047 #ifdef FASDEBUG
6048 	static char *mbs =
6049 	    "Target %d now Synchronous at %d.%d MB/s max transmit rate\n";
6050 	static char *mbs1 =
6051 	    "Target %d now Synchronous at %d.0%d MB/s max transmit rate\n";
6052 	static char *mbs2 =
6053 	    "Target %d now Synchronous at %d.00%d MB/s max transmit rate\n";
6054 #endif
6055 	struct fas_cmd *sp = fas->f_current_sp;
6056 	volatile struct fasreg *fasreg = fas->f_reg;
6057 	uchar_t emsg = fas->f_imsgarea[2];
6058 	int tgt = Tgt(sp);
6059 	int msgout = 0;
6060 
6061 	EPRINTF("fas_multibyte_msg:\n");
6062 
6063 	if (emsg == MSG_SYNCHRONOUS) {
6064 		uint_t period, offset, regval;
6065 		uint_t minsync, maxsync, clockval;
6066 		uint_t xfer_freq, xfer_div, xfer_mod, xfer_rate;
6067 
6068 		period = fas->f_imsgarea[3] & 0xff;
6069 		offset = fas->f_imsgarea[4] & 0xff;
6070 		minsync = MIN_SYNC_PERIOD(fas);
6071 		maxsync = MAX_SYNC_PERIOD(fas);
6072 		DPRINTF5("sync msg received: %x %x %x %x %x\n",
6073 		    fas->f_imsgarea[0], fas->f_imsgarea[1],
6074 		    fas->f_imsgarea[2], fas->f_imsgarea[3],
6075 		    fas->f_imsgarea[4]);
6076 		DPRINTF3("received period %d offset %d from tgt %d\n",
6077 		    period, offset, tgt);
6078 		DPRINTF3("calculated minsync %d, maxsync %d for tgt %d\n",
6079 		    minsync, maxsync, tgt);
6080 		DPRINTF2("sync period %d, neg period %d\n",
6081 		    fas->f_sync_period[tgt], fas->f_neg_period[tgt]);
6082 
6083 		if ((++(fas->f_sdtr_sent)) & 1) {
6084 			/*
6085 			 * In cases where the target negotiates synchronous
6086 			 * mode before we do, and we either have sync mode
6087 			 * disabled, or this target is known to be a weak
6088 			 * signal target, we send back a message indicating
6089 			 * a desire to stay in asynchronous mode (the SCSI-2
6090 			 * spec states that if we have synchronous capability
6091 			 * then we cannot reject a SYNCHRONOUS DATA TRANSFER
6092 			 * REQUEST message).
6093 			 */
6094 			IPRINTF1("SYNC negotiation initiated by target %d\n",
6095 			    tgt);
6096 
6097 			msgout = MSG_EXTENDED;
6098 
6099 			period =
6100 			    period ? max(period, MIN_SYNC_PERIOD(fas)) : 0;
6101 
6102 			if (fas->f_backoff & (1<<tgt)) {
6103 				period = period ?
6104 				    max(period, fas->f_neg_period[tgt]) : 0;
6105 			}
6106 			offset = min(offset, fas_default_offset);
6107 		}
6108 		xfer_freq = regval = 0;
6109 
6110 		/*
6111 		 * If the target's offset is bigger than ours,
6112 		 * the target has violated the scsi protocol.
6113 		 */
6114 		if (offset > fas_default_offset) {
6115 			period = offset = 0;
6116 			msgout = MSG_REJECT;
6117 		}
6118 
6119 		if (offset && (period > maxsync)) {
6120 			/*
6121 			 * We cannot transmit data in synchronous
6122 			 * mode this slow, so convert to asynchronous
6123 			 * mode.
6124 			 */
6125 			msgout = MSG_EXTENDED;
6126 			period = offset = 0;
6127 
6128 		} else if (offset && (period < minsync)) {
6129 			/*
6130 			 * If the target's period is less than ours,
6131 			 * the target has violated the scsi protocol.
6132 			 */
6133 			period = offset = 0;
6134 			msgout = MSG_REJECT;
6135 
6136 		} else if (offset) {
6137 			/*
6138 			 * Conversion method for received PERIOD value
6139 			 * to the number of input clock ticks to the FAS.
6140 			 *
6141 			 * We adjust the input period value such that
6142 			 * we always will transmit data *not* faster
6143 			 * than the period value received.
6144 			 */
6145 
6146 			clockval = fas->f_clock_cycle / 1000;
6147 			regval = (((period << 2) + clockval - 1) / clockval);
6148 
6149 			/*
6150 			 * correction if xfer rate <= 5MB/sec
6151 			 * XXX do we need this?
6152 			 */
6153 			if (regval && (period >= FASTSCSI_THRESHOLD)) {
6154 				regval--;
6155 			}
6156 		}
6157 
6158 		fas->f_offset[tgt] = offset;
6159 		fas->f_neg_period[tgt] = period;
6160 
6161 		/*
6162 		 * Is is now safe to produce a responce to a target
6163 		 * initiated sdtr.  period and offset have been checked.
6164 		 */
6165 		if (msgout == MSG_EXTENDED) {
6166 			fas_make_sdtr(fas, 0, tgt);
6167 			period = fas->f_neg_period[tgt];
6168 			offset = (fas->f_offset[tgt] & 0xf);
6169 		}
6170 
6171 		if (offset) {
6172 			fas->f_sync_period[tgt] = regval & SYNC_PERIOD_MASK;
6173 			fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period,
6174 			    fas->f_sync_period[tgt]);
6175 
6176 			fas->f_offset[tgt] = offset | fas->f_req_ack_delay;
6177 			fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset,
6178 				fas->f_offset[tgt]);
6179 
6180 			/*
6181 			 * if transferring > 5 MB/sec then enable
6182 			 * fastscsi in conf3
6183 			 */
6184 			if (period < FASTSCSI_THRESHOLD) {
6185 				fas->f_fasconf3[tgt] |= FAS_CONF3_FASTSCSI;
6186 			} else {
6187 				fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6188 			}
6189 
6190 			fas_reg_write(fas, (uchar_t *)&fasreg->fas_conf3,
6191 			    fas->f_fasconf3[tgt]);
6192 
6193 			DPRINTF4("period %d (%d), offset %d to tgt %d\n",
6194 				period,
6195 				fas->f_sync_period[tgt] & SYNC_PERIOD_MASK,
6196 				fas->f_offset[tgt] & 0xf, tgt);
6197 			DPRINTF1("req/ack delay = %x\n", fas->f_req_ack_delay);
6198 			DPRINTF1("conf3 = %x\n", fas->f_fasconf3[tgt]);
6199 #ifdef FASDEBUG
6200 			/*
6201 			 * Convert input clock cycle per
6202 			 * byte to nanoseconds per byte.
6203 			 * (ns/b), and convert that to
6204 			 * k-bytes/second.
6205 			 */
6206 			xfer_freq = FAS_SYNC_KBPS((regval *
6207 				fas->f_clock_cycle) / 1000);
6208 			xfer_rate = ((fas->f_nowide & (1<<tgt))? 1 : 2) *
6209 						xfer_freq;
6210 			xfer_div = xfer_rate / 1000;
6211 			xfer_mod = xfer_rate % 1000;
6212 
6213 
6214 			if (xfer_mod > 99) {
6215 				IPRINTF3(mbs, tgt, xfer_div, xfer_mod);
6216 			} else if (xfer_mod > 9) {
6217 				IPRINTF3(mbs1, tgt, xfer_div, xfer_mod);
6218 			} else {
6219 				IPRINTF3(mbs2, tgt, xfer_div, xfer_mod);
6220 			}
6221 #endif
6222 			fas->f_sync_enabled |= (1<<tgt);
6223 
6224 		} else {
6225 			/*
6226 			 * We are converting back to async mode.
6227 			 */
6228 			fas_revert_to_async(fas, tgt);
6229 		}
6230 
6231 		/*
6232 		 * If this target violated the scsi spec, reject the
6233 		 * sdtr msg and don't negotiate sdtr again.
6234 		 */
6235 		if (msgout == MSG_REJECT) {
6236 			fas->f_nosync |= (1<<tgt);
6237 		}
6238 
6239 		fas->f_props_update |= (1<<tgt);
6240 
6241 	} else	if (emsg == MSG_WIDE_DATA_XFER) {
6242 		uchar_t width = fas->f_imsgarea[3] & 0xff;
6243 
6244 		DPRINTF4("wide msg received: %x %x %x %x\n",
6245 		    fas->f_imsgarea[0], fas->f_imsgarea[1],
6246 		    fas->f_imsgarea[2], fas->f_imsgarea[3]);
6247 
6248 		/* always renegotiate sync after wide */
6249 		msgout = MSG_EXTENDED;
6250 
6251 		if ((++(fas->f_wdtr_sent)) &	1) {
6252 			IPRINTF1("Wide negotiation initiated by target %d\n",
6253 			    tgt);
6254 			/*
6255 			 * allow wide neg even if the target driver hasn't
6256 			 * enabled wide yet.
6257 			 */
6258 			fas->f_nowide &= ~(1<<tgt);
6259 			fas_make_wdtr(fas, 0, tgt, width);
6260 			IPRINTF1("sending wide sync %d back\n", width);
6261 			/*
6262 			 * Let us go back to async mode(SCSI spec)
6263 			 * and depend on target to do sync
6264 			 * after wide negotiations.
6265 			 * If target does not do a sync neg and enters
6266 			 * async mode we will negotiate sync on next command
6267 			 */
6268 			fas_revert_to_async(fas, tgt);
6269 			fas->f_sync_known &= ~(1<<tgt);
6270 		} else {
6271 			/*
6272 			 * renegotiate sync after wide
6273 			 */
6274 			fas_set_wide_conf3(fas, tgt, width);
6275 			ASSERT(width <= 1);
6276 			fas->f_wdtr_sent = 0;
6277 			if ((fas->f_nosync & (1<<tgt)) == 0) {
6278 				fas_make_sdtr(fas, 0, tgt);
6279 			} else {
6280 				msgout = 0;
6281 			}
6282 		}
6283 
6284 		fas->f_props_update |= (1<<tgt);
6285 
6286 	} else if (emsg == MSG_MODIFY_DATA_PTR) {
6287 		msgout = MSG_REJECT;
6288 	} else {
6289 		fas_log(fas, CE_WARN,
6290 		    "Rejecting message %s 0x%x from Target %d",
6291 		    scsi_mname(MSG_EXTENDED), emsg, tgt);
6292 		msgout = MSG_REJECT;
6293 	}
6294 out:
6295 	New_state(fas, ACTS_UNKNOWN);
6296 	return (msgout);
6297 }
6298 
6299 /*
6300  * Back off sync negotiation
6301  * and got to async mode
6302  */
6303 static void
6304 fas_revert_to_async(struct fas *fas, int tgt)
6305 {
6306 	volatile struct fasreg *fasreg = fas->f_reg;
6307 
6308 	fas->f_sync_period[tgt] = 0;
6309 	fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period, 0);
6310 	fas->f_offset[tgt] = 0;
6311 	fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset, 0);
6312 	fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6313 	fas_reg_write(fas, &fasreg->fas_conf3, fas->f_fasconf3[tgt]);
6314 	fas->f_sync_enabled &= ~(1<<tgt);
6315 }
6316 
6317 /*
6318  * handle an unexpected selection attempt
6319  * XXX look for better way: msg reject, drop off the bus
6320  */
6321 static int
6322 fas_handle_selection(struct fas *fas)
6323 {
6324 	fas_reg_cmd_write(fas, CMD_DISCONNECT);
6325 	fas_reg_cmd_write(fas, CMD_FLUSH);
6326 	fas_reg_cmd_write(fas, CMD_EN_RESEL);
6327 	return (ACTION_RETURN);
6328 }
6329 
6330 /*
6331  * dma window handling
6332  */
6333 static int
6334 fas_restore_pointers(struct fas *fas, struct fas_cmd *sp)
6335 {
6336 	if (sp->cmd_data_count != sp->cmd_saved_data_count) {
6337 		sp->cmd_data_count = sp->cmd_saved_data_count;
6338 		sp->cmd_cur_addr = sp->cmd_saved_cur_addr;
6339 
6340 		if (sp->cmd_cur_win != sp->cmd_saved_win) {
6341 			sp->cmd_cur_win = sp->cmd_saved_win;
6342 			if (fas_set_new_window(fas, sp)) {
6343 				return (-1);
6344 			}
6345 		}
6346 		DPRINTF1("curaddr=%x\n", sp->cmd_cur_addr);
6347 	}
6348 	return (0);
6349 }
6350 
6351 static int
6352 fas_set_new_window(struct fas *fas, struct fas_cmd *sp)
6353 {
6354 	off_t offset;
6355 	size_t len;
6356 	uint_t count;
6357 
6358 	if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_cur_win,
6359 	    &offset, &len, &sp->cmd_dmacookie, &count) != DDI_SUCCESS) {
6360 		return (-1);
6361 	}
6362 
6363 	DPRINTF4("new window %x: off=%lx, len=%lx, count=%x\n",
6364 	    sp->cmd_cur_win, offset, len, count);
6365 
6366 	ASSERT(count == 1);
6367 	return (0);
6368 }
6369 
6370 static int
6371 fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end)
6372 {
6373 
6374 	/* are there more windows? */
6375 	if (sp->cmd_nwin == 0) {
6376 		uint_t nwin = 0;
6377 		(void) ddi_dma_numwin(sp->cmd_dmahandle, &nwin);
6378 		sp->cmd_nwin = (uchar_t)nwin;
6379 	}
6380 
6381 	DPRINTF5(
6382 	    "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%lx, nwin=%x\n",
6383 	    sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
6384 	    sp->cmd_nwin);
6385 
6386 	if (sp->cmd_cur_win < sp->cmd_nwin) {
6387 		sp->cmd_cur_win++;
6388 		if (fas_set_new_window(fas, sp)) {
6389 			fas_printstate(fas, "cannot set new window");
6390 			sp->cmd_cur_win--;
6391 			return (-1);
6392 		}
6393 	/*
6394 	 * if there are no more windows, we have a data overrun condition
6395 	 */
6396 	} else {
6397 		int slot = sp->cmd_slot;
6398 
6399 		fas_printstate(fas, "data transfer overrun");
6400 		fas_set_pkt_reason(fas, sp, CMD_DATA_OVR, 0);
6401 
6402 		/*
6403 		 * if we get data transfer overruns, assume we have
6404 		 * a weak scsi bus. Note that this won't catch consistent
6405 		 * underruns or other noise related syndromes.
6406 		 */
6407 		fas_sync_wide_backoff(fas, sp, slot);
6408 		return (-1);
6409 	}
6410 	sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
6411 	DPRINTF1("cur_addr=%x\n", sp->cmd_cur_addr);
6412 	return (0);
6413 }
6414 
6415 /*
6416  * dma error handler
6417  */
6418 static int
6419 fas_check_dma_error(struct fas *fas)
6420 {
6421 	/*
6422 	 * was there a dma error that	caused fas_intr_svc() to be called?
6423 	 */
6424 	if (fas->f_dma->dma_csr & DMA_ERRPEND) {
6425 		/*
6426 		 * It would be desirable to set the ATN* line and attempt to
6427 		 * do the whole schmear of INITIATOR DETECTED ERROR here,
6428 		 * but that is too hard to do at present.
6429 		 */
6430 		fas_log(fas, CE_WARN, "Unrecoverable DMA error");
6431 		fas_printstate(fas, "dma error");
6432 		fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6433 		return (-1);
6434 	}
6435 	return (0);
6436 }
6437 
6438 /*
6439  * check for gross error or spurious interrupt
6440  */
6441 static int
6442 fas_handle_gross_err(struct fas *fas)
6443 {
6444 	volatile struct fasreg *fasreg = fas->f_reg;
6445 
6446 	fas_log(fas, CE_WARN,
6447 		    "gross error in fas status (%x)", fas->f_stat);
6448 
6449 	IPRINTF5("fas_cmd=%x, stat=%x, intr=%x, step=%x, fifoflag=%x\n",
6450 		    fasreg->fas_cmd, fas->f_stat, fas->f_intr, fasreg->fas_step,
6451 		    fasreg->fas_fifo_flag);
6452 
6453 	fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6454 
6455 	fas_internal_reset(fas, FAS_RESET_FAS);
6456 	return (ACTION_RESET);
6457 }
6458 
6459 
6460 /*
6461  * handle illegal cmd interrupt or (external) bus reset cleanup
6462  */
6463 static int
6464 fas_illegal_cmd_or_bus_reset(struct fas *fas)
6465 {
6466 	/*
6467 	 * If we detect a SCSI reset, we blow away the current
6468 	 * command (if there is one) and all disconnected commands
6469 	 * because we now don't know the state of them at all.
6470 	 */
6471 	ASSERT(fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET));
6472 
6473 	if (fas->f_intr & FAS_INT_RESET) {
6474 		return (ACTION_FINRST);
6475 	}
6476 
6477 	/*
6478 	 * Illegal cmd to fas:
6479 	 * This should not happen. The one situation where
6480 	 * we can get an ILLEGAL COMMAND interrupt is due to
6481 	 * a bug in the FAS366 during reselection which we
6482 	 * should be handling in fas_reconnect().
6483 	 */
6484 	if (fas->f_intr & FAS_INT_ILLEGAL) {
6485 		IPRINTF1("lastcmd=%x\n", fas->f_reg->fas_cmd);
6486 		fas_printstate(fas, "ILLEGAL bit set");
6487 		return (ACTION_RESET);
6488 	}
6489 	/*NOTREACHED*/
6490 }
6491 
6492 /*
6493  * set throttles for all luns of this target
6494  */
6495 static void
6496 fas_set_throttles(struct fas *fas, int slot, int n, int what)
6497 {
6498 	int i;
6499 
6500 	/*
6501 	 * if the bus is draining/quiesced, no changes to the throttles
6502 	 * are allowed. Not allowing change of throttles during draining
6503 	 * limits error recovery but will reduce draining time
6504 	 *
6505 	 * all throttles should have been set to HOLD_THROTTLE
6506 	 */
6507 	if (fas->f_softstate & (FAS_SS_QUIESCED | FAS_SS_DRAINING)) {
6508 		return;
6509 	}
6510 
6511 	ASSERT((n == 1) || (n == N_SLOTS) || (n == NLUNS_PER_TARGET));
6512 	ASSERT((slot + n) <= N_SLOTS);
6513 	if (n == NLUNS_PER_TARGET) {
6514 		slot &= ~(NLUNS_PER_TARGET - 1);
6515 	}
6516 
6517 	for (i = slot; i < (slot + n); i++) {
6518 		if (what == HOLD_THROTTLE) {
6519 			fas->f_throttle[i] = HOLD_THROTTLE;
6520 		} else if ((fas->f_reset_delay[i/NLUNS_PER_TARGET]) == 0) {
6521 			if (what == MAX_THROTTLE) {
6522 				int tshift = 1 << (i/NLUNS_PER_TARGET);
6523 				fas->f_throttle[i] = (short)
6524 					((fas->f_notag & tshift)? 1 : what);
6525 			} else {
6526 				fas->f_throttle[i] = what;
6527 			}
6528 		}
6529 	}
6530 }
6531 
6532 static void
6533 fas_set_all_lun_throttles(struct fas *fas, int slot, int what)
6534 {
6535 	/*
6536 	 * fas_set_throttle will adjust slot to starting at LUN 0
6537 	 */
6538 	fas_set_throttles(fas, slot, NLUNS_PER_TARGET, what);
6539 }
6540 
6541 static void
6542 fas_full_throttle(struct fas *fas, int slot)
6543 {
6544 	fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
6545 }
6546 
6547 /*
6548  * run a polled cmd
6549  */
6550 static void
6551 fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp)
6552 {
6553 	int limit, i, n;
6554 	int timeout = 0;
6555 
6556 	DPRINTF4("runpoll: slot=%x, cmd=%x, current_sp=0x%p, tcmds=%x\n",
6557 		slot, *((uchar_t *)sp->cmd_pkt->pkt_cdbp),
6558 		(void *)fas->f_current_sp, fas->f_tcmds[slot]);
6559 
6560 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_START, "fas_runpoll_start");
6561 
6562 	/*
6563 	 * wait for cmd to complete
6564 	 * don't start new cmds so set throttles to HOLD_THROTTLE
6565 	 */
6566 	while ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6567 		if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6568 			fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
6569 		}
6570 		if ((fas->f_state != STATE_FREE) || INTPENDING(fas)) {
6571 			if (fas_dopoll(fas, POLL_TIMEOUT) <= 0) {
6572 				IPRINTF("runpoll: timeout on draining\n");
6573 				goto bad;
6574 			}
6575 		}
6576 
6577 		ASSERT(fas->f_state == STATE_FREE);
6578 		ASSERT(fas->f_current_sp == NULL);
6579 
6580 		/*
6581 		 * if this is not a proxy cmd, don't start the cmd
6582 		 * without draining the active cmd(s)
6583 		 * for proxy cmds, we zap the active cmd and assume
6584 		 * that the caller will take care of this
6585 		 * For tagged cmds, wait with submitting a non-tagged
6586 		 * cmd until the queue has been drained
6587 		 * If the cmd is a request sense, then draining won't
6588 		 * help since we are in contingence allegiance condition
6589 		 */
6590 		if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6591 			uchar_t *cmdp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
6592 
6593 			if ((fas->f_tcmds[slot]) &&
6594 			    (NOTAG(Tgt(sp)) ||
6595 			    (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
6596 			    (*cmdp != SCMD_REQUEST_SENSE)))) {
6597 				if (timeout < POLL_TIMEOUT) {
6598 					timeout += 100;
6599 					drv_usecwait(100);
6600 					continue;
6601 				} else {
6602 					fas_log(fas, CE_WARN,
6603 					    "polled cmd failed (target busy)");
6604 					goto cleanup;
6605 				}
6606 			}
6607 		}
6608 
6609 		/*
6610 		 * If the draining of active commands killed the
6611 		 * the current polled command, we're done..
6612 		 */
6613 		if (sp->cmd_flags & CFLAG_COMPLETED) {
6614 			break;
6615 		}
6616 
6617 		/*
6618 		 * ensure we are not accessing a target too quickly
6619 		 * after a reset. the throttles get set back later
6620 		 * by the reset delay watch; hopefully, we don't go
6621 		 * thru this loop more than once
6622 		 */
6623 		if (fas->f_reset_delay[slot/NLUNS_PER_TARGET]) {
6624 			IPRINTF1("reset delay set for slot %x\n", slot);
6625 			drv_usecwait(fas->f_scsi_reset_delay * 1000);
6626 			for (i = 0; i < NTARGETS_WIDE; i++) {
6627 				if (fas->f_reset_delay[i]) {
6628 					int s = i * NLUNS_PER_TARGET;
6629 					int e = s + NLUNS_PER_TARGET;
6630 					fas->f_reset_delay[i] = 0;
6631 					for (; s < e; s++) {
6632 						fas_full_throttle(fas, s);
6633 					}
6634 				}
6635 			}
6636 		}
6637 
6638 		/*
6639 		 * fas_startcmd() will return false if preempted
6640 		 * or draining
6641 		 */
6642 		if (fas_startcmd(fas, sp) != TRUE) {
6643 			IPRINTF("runpoll: cannot start new cmds\n");
6644 			ASSERT(fas->f_current_sp != sp);
6645 			continue;
6646 		}
6647 
6648 		/*
6649 		 * We're now 'running' this command.
6650 		 *
6651 		 * fas_dopoll will always return when
6652 		 * fas->f_state is STATE_FREE, and
6653 		 */
6654 		limit = sp->cmd_pkt->pkt_time * 1000000;
6655 		if (limit == 0) {
6656 			limit = POLL_TIMEOUT;
6657 		}
6658 
6659 		/*
6660 		 * if the cmd disconnected, the first call to fas_dopoll
6661 		 * will return with bus free; we go thru the loop one more
6662 		 * time and wait limit usec for the target to reconnect
6663 		 */
6664 		for (i = 0; i <= POLL_TIMEOUT; i += 100) {
6665 
6666 			if ((n = fas_dopoll(fas, limit)) <= 0) {
6667 				IPRINTF("runpoll: timeout on polling\n");
6668 				goto bad;
6669 			}
6670 
6671 			/*
6672 			 * If a preemption occurred that caused this
6673 			 * command to actually not start, go around
6674 			 * the loop again. If CFLAG_COMPLETED is set, the
6675 			 * command completed
6676 			 */
6677 			if ((sp->cmd_flags & CFLAG_COMPLETED) ||
6678 			    (sp->cmd_pkt->pkt_state == 0)) {
6679 				break;
6680 			}
6681 
6682 			/*
6683 			 * the bus may have gone free because the target
6684 			 * disconnected; go thru the loop again
6685 			 */
6686 			ASSERT(fas->f_state == STATE_FREE);
6687 			if (n == 0) {
6688 				/*
6689 				 * bump i, we have waited limit usecs in
6690 				 * fas_dopoll
6691 				 */
6692 				i += limit - 100;
6693 			}
6694 		}
6695 
6696 		if ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6697 
6698 			if (i > POLL_TIMEOUT) {
6699 				IPRINTF("polled timeout on disc. cmd\n");
6700 				goto bad;
6701 			}
6702 
6703 			if (sp->cmd_pkt->pkt_state) {
6704 				/*
6705 				 * don't go thru the loop again; the cmd
6706 				 * was already started
6707 				 */
6708 				IPRINTF("fas_runpoll: cmd started??\n");
6709 				goto bad;
6710 			}
6711 		}
6712 	}
6713 
6714 	/*
6715 	 * blindly restore throttles which is preferable over
6716 	 * leaving throttle hanging at 0 and noone to clear it
6717 	 */
6718 	if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6719 		fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6720 	}
6721 
6722 	/*
6723 	 * ensure that the cmd is completely removed
6724 	 */
6725 	fas_remove_cmd(fas, sp, 0);
6726 
6727 	/*
6728 	 * If we stored up commands to do, start them off now.
6729 	 */
6730 	if ((fas->f_state == STATE_FREE) &&
6731 	    (!(sp->cmd_flags & CFLAG_CMDPROXY))) {
6732 		(void) fas_ustart(fas);
6733 	}
6734 exit:
6735 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_END, "fas_runpoll_end");
6736 	return;
6737 
6738 bad:
6739 	fas_log(fas, CE_WARN, "Polled cmd failed");
6740 #ifdef FASDEBUG
6741 	fas_printstate(fas, "fas_runpoll: polled cmd failed");
6742 #endif /* FASDEBUG */
6743 
6744 cleanup:
6745 	fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6746 
6747 	/*
6748 	 * clean up all traces of this sp because fas_runpoll will return
6749 	 * before fas_reset_recovery() cleans up
6750 	 */
6751 	fas_remove_cmd(fas, sp, NEW_TIMEOUT);
6752 	fas_decrement_ncmds(fas, sp);
6753 	fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
6754 
6755 	if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
6756 		(void) fas_reset_bus(fas);
6757 	}
6758 	goto exit;
6759 }
6760 
6761 /*
6762  * Poll for command completion (i.e., no interrupts)
6763  * limit is in usec (and will not be very accurate)
6764  *
6765  * the assumption is that we only run polled cmds in interrupt context
6766  * as scsi_transport will filter out FLAG_NOINTR
6767  */
6768 static int
6769 fas_dopoll(struct fas *fas, int limit)
6770 {
6771 	int i, n;
6772 
6773 	/*
6774 	 * timeout is not very accurate since we don't know how
6775 	 * long the poll takes
6776 	 * also if the packet gets started fairly late, we may
6777 	 * timeout prematurely
6778 	 * fas_dopoll always returns if e_state transitions to STATE_FREE
6779 	 */
6780 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_START, "fas_dopoll_start");
6781 
6782 	if (limit == 0) {
6783 		limit = POLL_TIMEOUT;
6784 	}
6785 
6786 	for (n = i = 0; i < limit; i += 100) {
6787 		if (INTPENDING(fas)) {
6788 			fas->f_polled_intr = 1;
6789 			n++;
6790 			(void) fas_intr_svc(fas);
6791 			if (fas->f_state == STATE_FREE)
6792 				break;
6793 		}
6794 		drv_usecwait(100);
6795 	}
6796 
6797 	if (i >= limit && fas->f_state != STATE_FREE) {
6798 		fas_printstate(fas, "polled command timeout");
6799 		n = -1;
6800 	}
6801 	TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_END,
6802 		"fas_dopoll_end: rval %x", n);
6803 	return (n);
6804 }
6805 
6806 /*
6807  * prepare a sync negotiation message
6808  */
6809 static void
6810 fas_make_sdtr(struct fas *fas, int msgout_offset, int target)
6811 {
6812 	uchar_t *p = fas->f_cur_msgout + msgout_offset;
6813 	ushort_t tshift = 1<<target;
6814 	uchar_t period = MIN_SYNC_PERIOD(fas);
6815 	uchar_t offset = fas_default_offset;
6816 
6817 	/*
6818 	 * If this target experienced a sync backoff use the
6819 	 * target's sync speed that was adjusted in
6820 	 * fas_sync_wide_backoff.  For second sync backoff,
6821 	 * offset will be ajusted below in sanity checks.
6822 	 */
6823 	if (fas->f_backoff & tshift) {
6824 		period = fas->f_neg_period[target];
6825 	}
6826 
6827 	/*
6828 	 * If this is a responce to a target initiated sdtr,
6829 	 * use the agreed upon values.
6830 	 */
6831 	if (fas->f_sdtr_sent & 1) {
6832 		period = fas->f_neg_period[target];
6833 		offset = fas->f_offset[target];
6834 	}
6835 
6836 	/*
6837 	 * If the target driver disabled
6838 	 * sync then make offset = 0
6839 	 */
6840 	if (fas->f_force_async & tshift) {
6841 		offset = 0;
6842 	}
6843 
6844 	/*
6845 	 * sanity check of period and offset
6846 	 */
6847 	if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_FAST) {
6848 		if (period < (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4)) {
6849 			period = (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4);
6850 		}
6851 	} else if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_SYNC) {
6852 		if (period < (uchar_t)(DEFAULT_SYNC_PERIOD/4)) {
6853 			period = (uchar_t)(DEFAULT_SYNC_PERIOD/4);
6854 		}
6855 	} else {
6856 		fas->f_nosync |= tshift;
6857 	}
6858 
6859 	if (fas->f_nosync & tshift) {
6860 		offset = 0;
6861 	}
6862 
6863 	if ((uchar_t)(offset & 0xf) > fas_default_offset) {
6864 		offset = fas_default_offset | fas->f_req_ack_delay;
6865 	}
6866 
6867 	fas->f_neg_period[target] = (uchar_t)period;
6868 	fas->f_offset[target] = (uchar_t)offset;
6869 
6870 	*p++ = (uchar_t)MSG_EXTENDED;
6871 	*p++ = (uchar_t)3;
6872 	*p++ = (uchar_t)MSG_SYNCHRONOUS;
6873 	*p++ = period;
6874 	*p++ = offset & 0xf;
6875 	fas->f_omsglen = 5 + msgout_offset;
6876 
6877 	IPRINTF2("fas_make_sdtr: period = %x, offset = %x\n",
6878 		period, offset);
6879 	/*
6880 	 * increment sdtr flag, odd value indicates that we initiated
6881 	 * the negotiation
6882 	 */
6883 	fas->f_sdtr_sent++;
6884 
6885 	/*
6886 	 * the target may reject the optional sync message so
6887 	 * to avoid negotiating on every cmd, set sync known here
6888 	 * we should not negotiate wide after sync again
6889 	 */
6890 	fas->f_sync_known |= 1<<target;
6891 	fas->f_wide_known |= 1<<target;
6892 }
6893 
6894 /*
6895  * prepare a wide negotiation message
6896  */
6897 static void
6898 fas_make_wdtr(struct fas *fas, int msgout_offset, int target, int width)
6899 {
6900 	uchar_t *p = fas->f_cur_msgout + msgout_offset;
6901 
6902 	if (((fas->f_target_scsi_options[target] & SCSI_OPTIONS_WIDE) == 0) ||
6903 	    (fas->f_nowide & (1<<target))) {
6904 		fas->f_nowide |= 1<<target;
6905 		width = 0;
6906 	}
6907 	if (fas->f_force_narrow & (1<<target)) {
6908 		width = 0;
6909 	}
6910 	width = min(FAS_XFER_WIDTH, width);
6911 
6912 	*p++ = (uchar_t)MSG_EXTENDED;
6913 	*p++ = (uchar_t)2;
6914 	*p++ = (uchar_t)MSG_WIDE_DATA_XFER;
6915 	*p++ = (uchar_t)width;
6916 	fas->f_omsglen = 4 + msgout_offset;
6917 	IPRINTF1("fas_make_wdtr: width=%x\n", width);
6918 
6919 	/*
6920 	 * increment wdtr flag, odd value indicates that we initiated
6921 	 * the negotiation
6922 	 */
6923 	fas->f_wdtr_sent++;
6924 
6925 	/*
6926 	 * the target may reject the optional wide message so
6927 	 * to avoid negotiating on every cmd, set wide known here
6928 	 */
6929 	fas->f_wide_known |= 1<<target;
6930 
6931 	fas_set_wide_conf3(fas, target, width);
6932 }
6933 
6934 /*
6935  * auto request sense support
6936  * create or destroy an auto request sense packet
6937  */
6938 static int
6939 fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap)
6940 {
6941 	/*
6942 	 * Allocate a request sense packet using get_pktiopb
6943 	 */
6944 	struct fas_cmd *rqpktp;
6945 	uchar_t slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
6946 	struct buf *bp;
6947 	struct arq_private_data *arq_data;
6948 
6949 	/*
6950 	 * if one exists, don't create another
6951 	 */
6952 	if (fas->f_arq_pkt[slot] != 0) {
6953 		return (0);
6954 	}
6955 
6956 	/*
6957 	 * it would be nicer if we could allow the target driver
6958 	 * to specify the size but this is easier and OK for most
6959 	 * drivers to use SENSE_LENGTH
6960 	 * Allocate a request sense packet.
6961 	 */
6962 	bp = scsi_alloc_consistent_buf(ap, (struct buf *)NULL,
6963 	    SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
6964 	rqpktp = PKT2CMD(scsi_init_pkt(ap,
6965 	    NULL, bp, CDB_GROUP0, 1, PKT_PRIV_LEN,
6966 	    PKT_CONSISTENT, SLEEP_FUNC, NULL));
6967 	arq_data =
6968 	    (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
6969 	arq_data->arq_save_bp = bp;
6970 
6971 	RQ_MAKECOM_G0((CMD2PKT(rqpktp)),
6972 	    FLAG_SENSING | FLAG_HEAD | FLAG_NODISCON,
6973 	    (char)SCMD_REQUEST_SENSE, 0, (char)SENSE_LENGTH);
6974 	rqpktp->cmd_flags |= CFLAG_CMDARQ;
6975 	rqpktp->cmd_slot = slot;
6976 	rqpktp->cmd_pkt->pkt_ha_private = rqpktp;
6977 	fas->f_arq_pkt[slot] = rqpktp;
6978 
6979 	/*
6980 	 * we need a function ptr here so abort/reset can
6981 	 * defer callbacks; fas_call_pkt_comp() calls
6982 	 * fas_complete_arq_pkt() directly without releasing the lock
6983 	 * However, since we are not calling back directly thru
6984 	 * pkt_comp, don't check this with warlock
6985 	 */
6986 #ifndef __lock_lint
6987 	rqpktp->cmd_pkt->pkt_comp =
6988 		(void (*)(struct scsi_pkt *))fas_complete_arq_pkt;
6989 #endif
6990 	return (0);
6991 }
6992 
6993 static int
6994 fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap)
6995 {
6996 	struct fas_cmd *rqpktp;
6997 	int slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
6998 
6999 	/*
7000 	 * if there is still a pkt saved or no rqpkt
7001 	 * then we cannot deallocate or there is nothing to do
7002 	 */
7003 	if ((rqpktp = fas->f_arq_pkt[slot]) != NULL) {
7004 		struct arq_private_data *arq_data =
7005 		    (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
7006 		struct buf *bp = arq_data->arq_save_bp;
7007 		/*
7008 		 * is arq pkt in use?
7009 		 */
7010 		if (arq_data->arq_save_sp) {
7011 			return (-1);
7012 		}
7013 
7014 		scsi_destroy_pkt(CMD2PKT(rqpktp));
7015 		scsi_free_consistent_buf(bp);
7016 		fas->f_arq_pkt[slot] = 0;
7017 	}
7018 	return (0);
7019 }
7020 
7021 /*
7022  * complete an arq packet by copying over transport info and the actual
7023  * request sense data; called with mutex held from fas_call_pkt_comp()
7024  */
7025 void
7026 fas_complete_arq_pkt(struct scsi_pkt *pkt)
7027 {
7028 	struct fas *fas = ADDR2FAS(&pkt->pkt_address);
7029 	struct fas_cmd *sp = pkt->pkt_ha_private;
7030 	struct scsi_arq_status *arqstat;
7031 	struct arq_private_data *arq_data =
7032 		    (struct arq_private_data *)sp->cmd_pkt->pkt_private;
7033 	struct fas_cmd *ssp = arq_data->arq_save_sp;
7034 	struct buf *bp = arq_data->arq_save_bp;
7035 	int	slot = sp->cmd_slot;
7036 
7037 	DPRINTF1("completing arq pkt sp=0x%p\n", (void *)sp);
7038 	ASSERT(sp == fas->f_arq_pkt[slot]);
7039 	ASSERT(arq_data->arq_save_sp != NULL);
7040 	ASSERT(ssp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7041 
7042 	arqstat = (struct scsi_arq_status *)(ssp->cmd_pkt->pkt_scbp);
7043 	arqstat->sts_rqpkt_status = *((struct scsi_status *)
7044 		(sp->cmd_pkt->pkt_scbp));
7045 	arqstat->sts_rqpkt_reason = sp->cmd_pkt->pkt_reason;
7046 	arqstat->sts_rqpkt_state  = sp->cmd_pkt->pkt_state;
7047 	arqstat->sts_rqpkt_statistics = sp->cmd_pkt->pkt_statistics;
7048 	arqstat->sts_rqpkt_resid  = sp->cmd_pkt->pkt_resid;
7049 	arqstat->sts_sensedata =
7050 	    *((struct scsi_extended_sense *)bp->b_un.b_addr);
7051 	ssp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
7052 	arq_data->arq_save_sp = NULL;
7053 
7054 	/*
7055 	 * ASC=0x47 is parity error
7056 	 */
7057 	if (arqstat->sts_sensedata.es_key == KEY_ABORTED_COMMAND &&
7058 		arqstat->sts_sensedata.es_add_code == 0x47) {
7059 		fas_sync_wide_backoff(fas, sp, slot);
7060 	}
7061 
7062 	fas_call_pkt_comp(fas, ssp);
7063 }
7064 
7065 /*
7066  * handle check condition and start an arq packet
7067  */
7068 static int
7069 fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp)
7070 {
7071 	struct fas_cmd *arqsp =	fas->f_arq_pkt[sp->cmd_slot];
7072 	struct arq_private_data *arq_data;
7073 	struct buf *bp;
7074 
7075 	if ((arqsp == NULL) || (arqsp == sp) ||
7076 	    (sp->cmd_scblen < sizeof (struct scsi_arq_status))) {
7077 		IPRINTF("no arq packet or cannot arq on arq pkt\n");
7078 		fas_call_pkt_comp(fas, sp);
7079 		return (0);
7080 	}
7081 
7082 	arq_data = (struct arq_private_data *)arqsp->cmd_pkt->pkt_private;
7083 	bp = arq_data->arq_save_bp;
7084 
7085 	ASSERT(sp->cmd_flags & CFLAG_FINISHED);
7086 	ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7087 	DPRINTF3("start arq for slot=%x, arqsp=0x%p, rqpkt=0x%p\n",
7088 	    sp->cmd_slot, (void *)arqsp, (void *)fas->f_arq_pkt[sp->cmd_slot]);
7089 	if (arq_data->arq_save_sp != NULL) {
7090 		IPRINTF("auto request sense already in progress\n");
7091 		goto fail;
7092 	}
7093 
7094 	arq_data->arq_save_sp = sp;
7095 
7096 	bzero(bp->b_un.b_addr, sizeof (struct scsi_extended_sense));
7097 
7098 	/*
7099 	 * copy the timeout from the original packet by lack of a better
7100 	 * value
7101 	 * we could take the residue of the timeout but that could cause
7102 	 * premature timeouts perhaps
7103 	 */
7104 	arqsp->cmd_pkt->pkt_time = sp->cmd_pkt->pkt_time;
7105 	arqsp->cmd_flags &= ~CFLAG_TRANFLAG;
7106 	ASSERT(arqsp->cmd_pkt->pkt_comp != NULL);
7107 
7108 	/*
7109 	 * make sure that auto request sense always goes out
7110 	 * after queue full and after throttle was set to draining
7111 	 */
7112 	fas_full_throttle(fas, sp->cmd_slot);
7113 	(void) fas_accept_pkt(fas, arqsp, NO_TRAN_BUSY);
7114 	return (0);
7115 
7116 fail:
7117 	fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
7118 	fas_log(fas, CE_WARN, "auto request sense failed\n");
7119 	fas_dump_cmd(fas, sp);
7120 	fas_call_pkt_comp(fas, sp);
7121 	return (-1);
7122 }
7123 
7124 
7125 /*
7126  * handle qfull condition
7127  */
7128 static void
7129 fas_handle_qfull(struct fas *fas, struct fas_cmd *sp)
7130 {
7131 	int slot = sp->cmd_slot;
7132 
7133 	if ((++sp->cmd_qfull_retries > fas->f_qfull_retries[Tgt(sp)]) ||
7134 		(fas->f_qfull_retries[Tgt(sp)] == 0)) {
7135 		/*
7136 		 * We have exhausted the retries on QFULL, or,
7137 		 * the target driver has indicated that it
7138 		 * wants to handle QFULL itself by setting
7139 		 * qfull-retries capability to 0. In either case
7140 		 * we want the target driver's QFULL handling
7141 		 * to kick in. We do this by having pkt_reason
7142 		 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
7143 		 */
7144 		IPRINTF2("%d.%d: status queue full, retries over\n",
7145 			Tgt(sp), Lun(sp));
7146 		fas_set_all_lun_throttles(fas, slot, DRAIN_THROTTLE);
7147 		fas_call_pkt_comp(fas, sp);
7148 	} else {
7149 		if (fas->f_reset_delay[Tgt(sp)] == 0) {
7150 			fas->f_throttle[slot] =
7151 			    max((fas->f_tcmds[slot] - 2), 0);
7152 		}
7153 		IPRINTF3("%d.%d: status queue full, new throttle = %d, "
7154 			"retrying\n", Tgt(sp), Lun(sp), fas->f_throttle[slot]);
7155 		sp->cmd_pkt->pkt_flags |= FLAG_HEAD;
7156 		sp->cmd_flags &= ~CFLAG_TRANFLAG;
7157 		(void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
7158 
7159 		/*
7160 		 * when target gives queue full status with no commands
7161 		 * outstanding (f_tcmds[] == 0), throttle is set to 0
7162 		 * (HOLD_THROTTLE), and the queue full handling starts
7163 		 * (see psarc/1994/313); if there are commands outstanding,
7164 		 * the throttle is set to (f_tcmds[] - 2)
7165 		 */
7166 		if (fas->f_throttle[slot] == HOLD_THROTTLE) {
7167 			/*
7168 			 * By setting throttle to QFULL_THROTTLE, we
7169 			 * avoid submitting new commands and in
7170 			 * fas_restart_cmd find out slots which need
7171 			 * their throttles to be cleared.
7172 			 */
7173 			fas_set_all_lun_throttles(fas, slot, QFULL_THROTTLE);
7174 			if (fas->f_restart_cmd_timeid == 0) {
7175 				fas->f_restart_cmd_timeid =
7176 				    timeout(fas_restart_cmd, fas,
7177 				    fas->f_qfull_retry_interval[Tgt(sp)]);
7178 			}
7179 		}
7180 	}
7181 }
7182 
7183 /*
7184  * invoked from timeout() to restart qfull cmds with throttle == 0
7185  */
7186 static void
7187 fas_restart_cmd(void *fas_arg)
7188 {
7189 	struct fas *fas = fas_arg;
7190 	int i;
7191 
7192 	IPRINTF("fas_restart_cmd:\n");
7193 
7194 	mutex_enter(FAS_MUTEX(fas));
7195 	fas->f_restart_cmd_timeid = 0;
7196 
7197 	for (i = 0; i < N_SLOTS; i += NLUNS_PER_TARGET) {
7198 		if (fas->f_reset_delay[i/NLUNS_PER_TARGET] == 0) {
7199 			if (fas->f_throttle[i] == QFULL_THROTTLE) {
7200 				fas_set_all_lun_throttles(fas,
7201 					i, MAX_THROTTLE);
7202 			}
7203 		}
7204 	}
7205 
7206 	(void) fas_ustart(fas);
7207 	mutex_exit(FAS_MUTEX(fas));
7208 }
7209 
7210 /*
7211  * Timeout handling:
7212  * Command watchdog routines
7213  */
7214 
7215 /*ARGSUSED*/
7216 static void
7217 fas_watch(void *arg)
7218 {
7219 	struct fas *fas;
7220 	ushort_t	props_update = 0;
7221 
7222 	rw_enter(&fas_global_rwlock, RW_READER);
7223 
7224 	for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
7225 
7226 		mutex_enter(FAS_MUTEX(fas));
7227 		IPRINTF2("ncmds=%x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
7228 
7229 #ifdef FAS_PIO_COUNTS
7230 	if (fas->f_total_cmds) {
7231 		int n = fas->f_total_cmds;
7232 
7233 		fas_log(fas, CE_NOTE,
7234 	"total=%d, cmds=%d fas-rd=%d, fas-wrt=%d, dma-rd=%d, dma-wrt=%d\n",
7235 			fas->f_total_cmds,
7236 			fas->f_reg_cmds/n,
7237 			fas->f_reg_reads/n, fas->f_reg_writes/n,
7238 			fas->f_reg_dma_reads/n, fas->f_reg_dma_writes/n);
7239 
7240 		fas->f_reg_reads = fas->f_reg_writes =
7241 			fas->f_reg_dma_reads = fas->f_reg_dma_writes =
7242 			fas->f_reg_cmds = fas->f_total_cmds = 0;
7243 	}
7244 #endif
7245 		if (fas->f_ncmds) {
7246 			int i;
7247 			fas_watchsubr(fas);
7248 
7249 			/*
7250 			 * reset throttle. the throttle may have been
7251 			 * too low if queue full was caused by
7252 			 * another initiator
7253 			 * Only reset throttle if no cmd active in slot 0
7254 			 * (untagged cmd)
7255 			 */
7256 #ifdef FAS_TEST
7257 			if (fas_enable_untagged) {
7258 				fas_test_untagged++;
7259 			}
7260 #endif
7261 			for (i = 0; i < N_SLOTS; i++) {
7262 				if ((fas->f_throttle[i] > HOLD_THROTTLE) &&
7263 				    (fas->f_active[i] &&
7264 				    (fas->f_active[i]->f_slot[0] == NULL))) {
7265 					fas_full_throttle(fas, i);
7266 				}
7267 			}
7268 		}
7269 
7270 		if (fas->f_props_update) {
7271 			int i;
7272 			/*
7273 			 * f_mutex will be released and reentered in
7274 			 * fas_props_update().
7275 			 * Hence we save the fas->f_props_update now and
7276 			 * set to 0 indicating that property has been
7277 			 * updated. This will avoid a race condition with
7278 			 * any thread that runs in interrupt context that
7279 			 * attempts to set the f_props_update to non-zero value
7280 			 */
7281 			props_update = fas->f_props_update;
7282 			fas->f_props_update = 0;
7283 			for (i = 0; i < NTARGETS_WIDE; i++) {
7284 				if (props_update & (1<<i)) {
7285 					fas_update_props(fas, i);
7286 				}
7287 			}
7288 		}
7289 		fas_check_waitQ_and_mutex_exit(fas);
7290 
7291 	}
7292 	rw_exit(&fas_global_rwlock);
7293 
7294 again:
7295 	mutex_enter(&fas_global_mutex);
7296 	if (fas_timeout_initted && fas_timeout_id) {
7297 		fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
7298 	}
7299 	mutex_exit(&fas_global_mutex);
7300 	TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_WATCH_END, "fas_watch_end");
7301 }
7302 
7303 static void
7304 fas_watchsubr(struct fas *fas)
7305 {
7306 	short slot;
7307 	int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7308 	struct f_slots *tag_slots;
7309 
7310 	for (slot = 0; slot < N_SLOTS; slot += d)  {
7311 
7312 #ifdef FAS_TEST
7313 		if (fas_btest) {
7314 			fas_btest = 0;
7315 			(void) fas_reset_bus(fas);
7316 			return;
7317 		}
7318 		if (fas_force_timeout && fas->f_tcmds[slot]) {
7319 			fas_cmd_timeout(fas, slot);
7320 			fas_force_timeout = 0;
7321 			return;
7322 		}
7323 		fas_test_reset(fas, slot);
7324 		fas_test_abort(fas, slot);
7325 #endif /* FAS_TEST */
7326 
7327 		/*
7328 		 * check tagged cmds first
7329 		 */
7330 		tag_slots = fas->f_active[slot];
7331 		DPRINTF3(
7332 		"fas_watchsubr: slot %x: tcmds=%x, timeout=%x\n",
7333 		slot, fas->f_tcmds[slot], tag_slots->f_timeout);
7334 
7335 		if ((fas->f_tcmds[slot] > 0) && (tag_slots->f_timebase)) {
7336 
7337 			if (tag_slots->f_timebase <=
7338 			    fas_scsi_watchdog_tick) {
7339 				tag_slots->f_timebase +=
7340 				    fas_scsi_watchdog_tick;
7341 				continue;
7342 			}
7343 
7344 			tag_slots->f_timeout -= fas_scsi_watchdog_tick;
7345 
7346 			if (tag_slots->f_timeout < 0) {
7347 				fas_cmd_timeout(fas, slot);
7348 				return;
7349 			}
7350 			if ((tag_slots->f_timeout) <=
7351 			    fas_scsi_watchdog_tick) {
7352 				IPRINTF1("pending timeout on slot=%x\n",
7353 					slot);
7354 				IPRINTF("draining all queues\n");
7355 				fas_set_throttles(fas, 0, N_SLOTS,
7356 					DRAIN_THROTTLE);
7357 			}
7358 		}
7359 	}
7360 }
7361 
7362 /*
7363  * timeout recovery
7364  */
7365 static void
7366 fas_cmd_timeout(struct fas *fas, int slot)
7367 {
7368 	int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7369 	int target, lun, i, n, tag, ncmds;
7370 	struct fas_cmd *sp = NULL;
7371 	struct fas_cmd *ssp;
7372 
7373 	ASSERT(fas->f_tcmds[slot]);
7374 
7375 #ifdef FAS_TEST
7376 	if (fas_test_stop) {
7377 		debug_enter("timeout");
7378 	}
7379 #endif
7380 
7381 	/*
7382 	 * set throttle back; no more draining necessary
7383 	 */
7384 	for (i = 0; i < N_SLOTS; i += d) {
7385 		if (fas->f_throttle[i] == DRAIN_THROTTLE) {
7386 			fas_full_throttle(fas, i);
7387 		}
7388 	}
7389 
7390 	if (NOTAG(slot/NLUNS_PER_TARGET)) {
7391 		sp = fas->f_active[slot]->f_slot[0];
7392 	}
7393 
7394 	/*
7395 	 * if no interrupt pending for next second then the current
7396 	 * cmd must be stuck; switch slot and sp to current slot and cmd
7397 	 */
7398 	if (fas->f_current_sp && fas->f_state != STATE_FREE) {
7399 		for (i = 0; (i < 10000) && (INTPENDING(fas) == 0); i++) {
7400 			drv_usecwait(100);
7401 		}
7402 		if (INTPENDING(fas) == 0) {
7403 			slot = fas->f_current_sp->cmd_slot;
7404 			sp = fas->f_current_sp;
7405 		}
7406 	}
7407 
7408 	target = slot / NLUNS_PER_TARGET;
7409 	lun = slot % NLUNS_PER_TARGET;
7410 
7411 	/*
7412 	 * update all outstanding  pkts for this slot
7413 	 */
7414 	n = fas->f_active[slot]->f_n_slots;
7415 	for (ncmds = tag = 0; tag < n; tag++) {
7416 		ssp = fas->f_active[slot]->f_slot[tag];
7417 		if (ssp && ssp->cmd_pkt->pkt_time) {
7418 			fas_set_pkt_reason(fas, ssp, CMD_TIMEOUT,
7419 				STAT_TIMEOUT | STAT_ABORTED);
7420 			fas_short_dump_cmd(fas, ssp);
7421 			ncmds++;
7422 		}
7423 	}
7424 
7425 	/*
7426 	 * no timed-out cmds here?
7427 	 */
7428 	if (ncmds == 0) {
7429 		return;
7430 	}
7431 
7432 	/*
7433 	 * dump all we know about this timeout
7434 	 */
7435 	if (sp) {
7436 		if (sp->cmd_flags & CFLAG_CMDDISC) {
7437 			fas_log(fas, CE_WARN,
7438 			    "Disconnected command timeout for Target %d.%d",
7439 			    target, lun);
7440 		} else {
7441 			ASSERT(sp == fas->f_current_sp);
7442 			fas_log(fas, CE_WARN,
7443 			    "Connected command timeout for Target %d.%d",
7444 			    target, lun);
7445 			/*
7446 			 * Current command timeout appears to relate often
7447 			 * to noisy SCSI in synchronous mode.
7448 			 */
7449 			if (fas->f_state == ACTS_DATA_DONE) {
7450 				fas_sync_wide_backoff(fas, sp, slot);
7451 			}
7452 		}
7453 #ifdef FASDEBUG
7454 		fas_printstate(fas, "timeout");
7455 #endif
7456 	} else {
7457 		fas_log(fas, CE_WARN,
7458 		    "Disconnected tagged cmd(s) (%d) timeout for Target %d.%d",
7459 		    fas->f_tcmds[slot], target, lun);
7460 	}
7461 
7462 	if (fas_abort_cmd(fas, sp, slot) == ACTION_SEARCH) {
7463 		(void) fas_istart(fas);
7464 	}
7465 }
7466 
7467 /*
7468  * fas_sync_wide_backoff() increases sync period and enables slow
7469  * cable mode.
7470  * the second time, we revert back to narrow/async
7471  * we count on a bus reset to disable wide in the target and will
7472  * never renegotiate wide again
7473  */
7474 static void
7475 fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
7476     int slot)
7477 {
7478 	char phase;
7479 	ushort_t state = fas->f_state;
7480 	uchar_t tgt = slot / NLUNS_PER_TARGET;
7481 	uint_t tshift = 1 << tgt;
7482 
7483 	phase = fas_reg_read(fas, &fas->f_reg->fas_stat);
7484 	phase &=  FAS_PHASE_MASK;
7485 
7486 	IPRINTF4(
7487 	"fas_sync_wide_backoff: target %d: state=%x, phase=%x, sp=0x%p\n",
7488 	    tgt, state, phase, (void *)sp);
7489 
7490 #ifdef FASDEBUG
7491 	if (fas_no_sync_wide_backoff) {
7492 		return;
7493 	}
7494 #endif
7495 
7496 	/*
7497 	 * if this not the first time or sync is disabled
7498 	 * thru scsi_options then disable wide
7499 	 */
7500 	if ((fas->f_backoff & tshift) ||
7501 	    (fas->f_nosync & tshift)) {
7502 		/*
7503 		 * disable wide for just this target
7504 		 */
7505 		if ((fas->f_nowide & tshift) == 0) {
7506 			fas_log(fas, CE_WARN,
7507 			    "Target %d disabled wide SCSI mode", tgt);
7508 		}
7509 		/*
7510 		 * do not reset the bit in f_nowide because that
7511 		 * would not force a renegotiation of wide
7512 		 * and do not change any register value yet because
7513 		 * we may have reconnects before the renegotiations
7514 		 */
7515 		fas->f_target_scsi_options[tgt] &= ~SCSI_OPTIONS_WIDE;
7516 	}
7517 
7518 	/*
7519 	 * reduce xfer rate. if this is the first time, reduce by
7520 	 * 100%. second time, disable sync and wide.
7521 	 */
7522 	if (fas->f_offset[tgt] != 0) {
7523 		/*
7524 		 * do not reset the bit in f_nosync because that
7525 		 * would not force a renegotiation of sync
7526 		 */
7527 		if (fas->f_backoff & tshift) {
7528 			if ((fas->f_nosync & tshift) == 0) {
7529 				fas_log(fas, CE_WARN,
7530 				    "Target %d reverting to async. mode",
7531 				    tgt);
7532 			}
7533 			fas->f_target_scsi_options[tgt] &=
7534 				~(SCSI_OPTIONS_SYNC | SCSI_OPTIONS_FAST);
7535 		} else {
7536 			/* increase period by 100% */
7537 			fas->f_neg_period[tgt] *= 2;
7538 
7539 			fas_log(fas, CE_WARN,
7540 			    "Target %d reducing sync. transfer rate", tgt);
7541 		}
7542 	}
7543 	fas->f_backoff |= tshift;
7544 
7545 	/*
7546 	 * always enable slow cable mode, if not already enabled
7547 	 */
7548 	if ((fas->f_fasconf & FAS_CONF_SLOWMODE) == 0) {
7549 		fas->f_fasconf |= FAS_CONF_SLOWMODE;
7550 		fas_reg_write(fas, &fas->f_reg->fas_conf, fas->f_fasconf);
7551 		IPRINTF("Reverting to slow SCSI cable mode\n");
7552 	}
7553 
7554 	/*
7555 	 * Force sync renegotiation and update properties
7556 	 */
7557 	fas_force_renegotiation(fas, tgt);
7558 	fas->f_props_update |= (1<<tgt);
7559 }
7560 
7561 /*
7562  * handle failed negotiations (either reject or bus free condition)
7563  */
7564 static void
7565 fas_reset_sync_wide(struct fas *fas)
7566 {
7567 	struct fas_cmd *sp = fas->f_current_sp;
7568 	int tgt = Tgt(sp);
7569 
7570 	if (fas->f_wdtr_sent) {
7571 		IPRINTF("wide neg message rejected or bus free\n");
7572 		fas->f_nowide |= (1<<tgt);
7573 		fas->f_fasconf3[tgt] &= ~FAS_CONF3_WIDE;
7574 		fas_reg_write(fas, &fas->f_reg->fas_conf3,
7575 		fas->f_fasconf3[tgt]);
7576 		/*
7577 		 * clear offset just in case it goes to
7578 		 * data phase
7579 		 */
7580 		fas_reg_write(fas,
7581 		    (uchar_t *)&fas->f_reg->fas_sync_offset, 0);
7582 	} else if (fas->f_sdtr_sent) {
7583 		volatile struct fasreg *fasreg =
7584 					fas->f_reg;
7585 		IPRINTF("sync neg message rejected or bus free\n");
7586 		fas->f_nosync |= (1<<tgt);
7587 		fas->f_offset[tgt] = 0;
7588 		fas->f_sync_period[tgt] = 0;
7589 		fas_reg_write(fas,
7590 		    (uchar_t *)&fasreg->fas_sync_period, 0);
7591 		fas_reg_write(fas,
7592 		    (uchar_t *)&fasreg->fas_sync_offset, 0);
7593 		fas->f_offset[tgt] = 0;
7594 		fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
7595 		fas_reg_write(fas, &fasreg->fas_conf3,
7596 		    fas->f_fasconf3[tgt]);
7597 	}
7598 
7599 	fas_force_renegotiation(fas, tgt);
7600 }
7601 
7602 /*
7603  * force wide and sync renegotiation
7604  */
7605 static void
7606 fas_force_renegotiation(struct fas *fas, int target)
7607 {
7608 	ushort_t tshift = 1<<target;
7609 	fas->f_sync_known &= ~tshift;
7610 	fas->f_sync_enabled &= ~tshift;
7611 	fas->f_wide_known &= ~tshift;
7612 	fas->f_wide_enabled &= ~tshift;
7613 }
7614 
7615 /*
7616  * update conf3 register for wide negotiation
7617  */
7618 static void
7619 fas_set_wide_conf3(struct fas *fas, int target, int width)
7620 {
7621 	ASSERT(width <= 1);
7622 	switch (width) {
7623 	case 0:
7624 		fas->f_fasconf3[target] &= ~FAS_CONF3_WIDE;
7625 		break;
7626 	case 1:
7627 		fas->f_fasconf3[target] |= FAS_CONF3_WIDE;
7628 		fas->f_wide_enabled |= (1<<target);
7629 		break;
7630 	}
7631 
7632 	fas_reg_write(fas, &fas->f_reg->fas_conf3, fas->f_fasconf3[target]);
7633 	fas->f_fasconf3_reg_last = fas->f_fasconf3[target];
7634 }
7635 
7636 /*
7637  * Abort command handling
7638  *
7639  * abort current cmd, either by device reset or immediately with bus reset
7640  * (usually an abort msg doesn't completely solve the problem, therefore
7641  * a device or bus reset is recommended)
7642  */
7643 static int
7644 fas_abort_curcmd(struct fas *fas)
7645 {
7646 	if (fas->f_current_sp) {
7647 		return (fas_abort_cmd(fas, fas->f_current_sp,
7648 			fas->f_current_sp->cmd_slot));
7649 	} else {
7650 		return (fas_reset_bus(fas));
7651 	}
7652 }
7653 
7654 static int
7655 fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot)
7656 {
7657 	struct scsi_address ap;
7658 
7659 	ap.a_hba_tran = fas->f_tran;
7660 	ap.a_target = slot / NLUNS_PER_TARGET;
7661 	ap.a_lun    = slot % NLUNS_PER_TARGET;
7662 
7663 	IPRINTF1("abort cmd 0x%p\n", (void *)sp);
7664 
7665 	/*
7666 	 * attempting to abort a connected cmd is usually fruitless, so
7667 	 * only try disconnected cmds
7668 	 * a reset is preferable over an abort (see 1161701)
7669 	 */
7670 	if ((fas->f_current_sp && (fas->f_current_sp->cmd_slot != slot)) ||
7671 	    (fas->f_state == STATE_FREE)) {
7672 		IPRINTF2("attempting to reset target %d.%d\n",
7673 		    ap.a_target, ap.a_lun);
7674 		if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
7675 			return (ACTION_SEARCH);
7676 		}
7677 	}
7678 
7679 	/*
7680 	 * if the target won't listen, then a retry is useless
7681 	 * there is also the possibility that the cmd still completed while
7682 	 * we were trying to reset and the target driver may have done a
7683 	 * device reset which has blown away this sp.
7684 	 * well, we've tried, now pull the chain
7685 	 */
7686 	IPRINTF("aborting all cmds by bus reset\n");
7687 	return (fas_reset_bus(fas));
7688 }
7689 
7690 /*
7691  * fas_do_scsi_abort() assumes that we already have the mutex.
7692  * during the abort, we hold the mutex and prevent callbacks by setting
7693  * completion pointer to NULL. this will also avoid that a target driver
7694  * attempts to do a scsi_abort/reset while we are aborting.
7695  * because the completion pointer is NULL  we can still update the
7696  * packet after completion
7697  * the throttle for this slot is cleared either by fas_abort_connected_cmd
7698  * or fas_runpoll which prevents new cmds from starting while aborting
7699  */
7700 static int
7701 fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
7702 {
7703 	struct fas *fas = ADDR2FAS(ap);
7704 	struct fas_cmd *sp;
7705 	int rval = FALSE;
7706 	short slot;
7707 	struct fas_cmd *cur_sp = fas->f_current_sp;
7708 	void	(*cur_savec)(), (*sp_savec)();
7709 	int	sp_tagged_flag, abort_msg;
7710 
7711 	if (pkt) {
7712 		sp = PKT2CMD(pkt);
7713 		slot = sp->cmd_slot;
7714 		ASSERT(slot == ((ap->a_target * NLUNS_PER_TARGET) | ap->a_lun));
7715 	} else {
7716 		sp = NULL;
7717 		slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
7718 	}
7719 
7720 	fas_move_waitQ_to_readyQ(fas);
7721 
7722 	/*
7723 	 *   If no specific command was passed, all cmds here will be aborted
7724 	 *   If a specific command was passed as an argument (to be aborted)
7725 	 *   only the specified command will be aborted
7726 	 */
7727 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
7728 	IPRINTF4("fas_scsi_abort for slot %x, "
7729 	    "sp=0x%p, pkt_flags=%x, cur_sp=0x%p\n",
7730 	    slot, (void *)sp, (sp? sp->cmd_pkt_flags : 0), (void *)cur_sp);
7731 
7732 	/*
7733 	 * first check if the cmd is in the ready queue or
7734 	 * in the active queue
7735 	 */
7736 	if (sp) {
7737 		IPRINTF3("aborting one command 0x%p for %d.%d\n",
7738 		    (void *)sp, ap->a_target, ap->a_lun);
7739 		rval = fas_remove_from_readyQ(fas, sp, slot);
7740 		if (rval) {
7741 			IPRINTF("aborted one ready cmd\n");
7742 			fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7743 			fas_decrement_ncmds(fas, sp);
7744 			fas_call_pkt_comp(fas, sp);
7745 			goto exit;
7746 
7747 		} else if ((sp !=
7748 		    fas->f_active[slot]->f_slot[sp->cmd_tag[1]])) {
7749 			IPRINTF("cmd doesn't exist here\n");
7750 			rval = TRUE;
7751 			goto exit;
7752 		}
7753 	}
7754 
7755 	/*
7756 	 * hold off any new commands while attempting to abort
7757 	 * an active cmd
7758 	 */
7759 	fas_set_throttles(fas, slot, 1, HOLD_THROTTLE);
7760 
7761 	if (cur_sp) {
7762 		/*
7763 		 * prevent completion on current cmd
7764 		 */
7765 		cur_savec = cur_sp->cmd_pkt->pkt_comp;
7766 		cur_sp->cmd_pkt->pkt_comp = NULL;
7767 	}
7768 
7769 	if (sp) {
7770 		/*
7771 		 * the cmd exists here. is it connected or disconnected?
7772 		 * if connected but still selecting then can't abort now.
7773 		 * prevent completion on this cmd
7774 		 */
7775 		sp_tagged_flag = (sp->cmd_pkt_flags & FLAG_TAGMASK);
7776 		abort_msg = (sp_tagged_flag? MSG_ABORT_TAG : MSG_ABORT);
7777 		sp_savec = sp->cmd_pkt->pkt_comp;
7778 		sp->cmd_pkt->pkt_comp = NULL;
7779 
7780 		/* connected but not selecting? */
7781 		if ((sp == cur_sp) && (fas->f_state != STATE_FREE) &&
7782 		    (sp->cmd_pkt->pkt_state)) {
7783 			rval = fas_abort_connected_cmd(fas, sp, abort_msg);
7784 		}
7785 
7786 		/* if abort connected cmd failed, try abort disconnected */
7787 		if ((rval == 0) &&
7788 		    (sp->cmd_flags & CFLAG_CMDDISC) &&
7789 		    ((sp->cmd_flags &  CFLAG_COMPLETED) == 0)) {
7790 			rval = fas_abort_disconnected_cmd(fas, ap, sp,
7791 				abort_msg, slot);
7792 		}
7793 
7794 		if (rval) {
7795 			sp->cmd_flags |= CFLAG_COMPLETED;
7796 			fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7797 		}
7798 
7799 		sp->cmd_pkt->pkt_comp = sp_savec;
7800 
7801 	} else {
7802 		IPRINTF2("aborting all commands for %d.%d\n",
7803 		    ap->a_target, ap->a_lun);
7804 		abort_msg = MSG_ABORT;
7805 
7806 		/* active and not selecting ? */
7807 		if (cur_sp && (fas->f_state != STATE_FREE) &&
7808 		    (cur_sp->cmd_slot == slot) &&
7809 		    cur_sp->cmd_pkt->pkt_state) {
7810 			rval = fas_abort_connected_cmd(fas, cur_sp,
7811 				abort_msg);
7812 		}
7813 		if (rval == 0) {
7814 			rval = fas_abort_disconnected_cmd(fas, ap,
7815 				    NULL, abort_msg, slot);
7816 		}
7817 	}
7818 
7819 done:
7820 	/* complete the current sp */
7821 	if (cur_sp) {
7822 		cur_sp->cmd_pkt->pkt_comp = cur_savec;
7823 		if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
7824 			fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
7825 			cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
7826 			fas_decrement_ncmds(fas, cur_sp);
7827 			fas_call_pkt_comp(fas, cur_sp);
7828 		}
7829 	}
7830 
7831 	/* complete the sp passed as 2nd arg */
7832 	if (sp && (sp != cur_sp) && (sp->cmd_flags & CFLAG_COMPLETED)) {
7833 		sp->cmd_flags &= ~CFLAG_COMPLETED;
7834 		fas_remove_cmd(fas, sp, NEW_TIMEOUT);
7835 		fas_decrement_ncmds(fas, sp);
7836 		fas_call_pkt_comp(fas, sp);
7837 	}
7838 
7839 	/* clean up all cmds for this slot */
7840 	if (rval && (abort_msg == MSG_ABORT)) {
7841 		/*
7842 		 * mark all commands here as aborted
7843 		 * abort msg has been accepted, now cleanup queues;
7844 		 */
7845 		fas_mark_packets(fas, slot, CMD_ABORTED, STAT_ABORTED);
7846 		fas_flush_tagQ(fas, slot);
7847 		fas_flush_readyQ(fas, slot);
7848 	}
7849 	fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
7850 
7851 exit:
7852 	if (fas->f_state == STATE_FREE) {
7853 		(void) fas_ustart(fas);
7854 	}
7855 
7856 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
7857 
7858 #ifdef FASDEBUG
7859 	if (rval && fas_test_stop) {
7860 		debug_enter("abort succeeded");
7861 	}
7862 #endif
7863 	return (rval);
7864 }
7865 
7866 /*
7867  * mark all packets with new reason and update statistics
7868  */
7869 static void
7870 fas_mark_packets(struct fas *fas, int slot, uchar_t reason, uint_t stat)
7871 {
7872 	struct fas_cmd *sp = fas->f_readyf[slot];
7873 
7874 	while (sp != 0) {
7875 		fas_set_pkt_reason(fas, sp, reason, STAT_ABORTED);
7876 		sp = sp->cmd_forw;
7877 	}
7878 	if (fas->f_tcmds[slot]) {
7879 		int n = 0;
7880 		ushort_t tag;
7881 
7882 		for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
7883 			if ((sp = fas->f_active[slot]->f_slot[tag]) != 0) {
7884 				fas_set_pkt_reason(fas, sp, reason, stat);
7885 				n++;
7886 			}
7887 		}
7888 		ASSERT(fas->f_tcmds[slot] == n);
7889 	}
7890 }
7891 
7892 /*
7893  * set pkt_reason and OR in pkt_statistics flag
7894  */
7895 static void
7896 fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
7897     uint_t stat)
7898 {
7899 	if (sp) {
7900 		if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
7901 			sp->cmd_pkt->pkt_reason = reason;
7902 		}
7903 		sp->cmd_pkt->pkt_statistics |= stat;
7904 		IPRINTF3("sp=0x%p, pkt_reason=%x, pkt_stat=%x\n",
7905 		    (void *)sp, reason, sp->cmd_pkt->pkt_statistics);
7906 	}
7907 }
7908 
7909 /*
7910  * delete specified cmd from the ready queue
7911  */
7912 static int
7913 fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp, int slot)
7914 {
7915 	struct fas_cmd *ssp, *psp;
7916 
7917 	/*
7918 	 * command has not been started yet and is still in the ready queue
7919 	 */
7920 	if (sp) {
7921 		ASSERT(fas->f_ncmds > 0);
7922 		/*
7923 		 * find packet on the ready queue and remove it
7924 		 */
7925 		for (psp = NULL, ssp = fas->f_readyf[slot]; ssp != NULL;
7926 		    psp = ssp, ssp = ssp->cmd_forw) {
7927 			if (ssp == sp) {
7928 				if (fas->f_readyf[slot] == sp) {
7929 					fas->f_readyf[slot] = sp->cmd_forw;
7930 				} else {
7931 					psp->cmd_forw = sp->cmd_forw;
7932 				}
7933 				if (fas->f_readyb[slot] == sp) {
7934 					fas->f_readyb[slot] = psp;
7935 				}
7936 				return (TRUE);
7937 			}
7938 		}
7939 	}
7940 	return (FALSE);
7941 }
7942 
7943 /*
7944  * add cmd to to head of the readyQ
7945  * due to tag allocation failure or preemption we have to return
7946  * this cmd to the readyQ
7947  */
7948 static void
7949 fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp)
7950 {
7951 	/*
7952 	 * never return a NOINTR pkt to the readyQ
7953 	 * (fas_runpoll will resubmit)
7954 	 */
7955 	if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7956 		struct fas_cmd *dp;
7957 		int slot = sp->cmd_slot;
7958 
7959 		dp = fas->f_readyf[slot];
7960 		fas->f_readyf[slot] = sp;
7961 		sp->cmd_forw = dp;
7962 		if (fas->f_readyb[slot] == NULL) {
7963 			fas->f_readyb[slot] = sp;
7964 		}
7965 	}
7966 }
7967 
7968 /*
7969  * flush cmds in ready queue
7970  */
7971 static void
7972 fas_flush_readyQ(struct fas *fas, int slot)
7973 {
7974 	if (fas->f_readyf[slot]) {
7975 		struct fas_cmd *sp, *nsp;
7976 
7977 		IPRINTF1("flushing ready queue, slot=%x\n", slot);
7978 		ASSERT(fas->f_ncmds > 0);
7979 
7980 		sp = fas->f_readyf[slot];
7981 		fas->f_readyf[slot] = fas->f_readyb[slot] = NULL;
7982 
7983 		while (sp != 0) {
7984 			/*
7985 			 * save the forward pointer before calling
7986 			 * the completion routine
7987 			 */
7988 			nsp = sp->cmd_forw;
7989 			ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
7990 			ASSERT(Tgt(sp) == slot/NLUNS_PER_TARGET);
7991 			fas_decrement_ncmds(fas, sp);
7992 			fas_call_pkt_comp(fas, sp);
7993 			sp = nsp;
7994 		}
7995 		fas_check_ncmds(fas);
7996 	}
7997 }
7998 
7999 /*
8000  * cleanup the tag queue
8001  * preserve some order by starting with the oldest tag
8002  */
8003 static void
8004 fas_flush_tagQ(struct fas *fas, int slot)
8005 {
8006 	ushort_t tag, starttag;
8007 	struct fas_cmd *sp;
8008 	struct f_slots *tagque = fas->f_active[slot];
8009 
8010 	if (tagque == NULL) {
8011 		return;
8012 	}
8013 
8014 	DPRINTF2("flushing entire tag queue, slot=%x, tcmds=%x\n",
8015 	    slot, fas->f_tcmds[slot]);
8016 
8017 #ifdef FASDEBUG
8018 	{
8019 		int n = 0;
8020 		for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
8021 			if ((sp = tagque->f_slot[tag]) != 0) {
8022 				n++;
8023 				ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8024 				if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
8025 					if ((sp->cmd_flags & CFLAG_FINISHED) ==
8026 					    0) {
8027 						debug_enter("fas_flush_tagQ");
8028 					}
8029 				}
8030 			}
8031 		}
8032 		ASSERT(fas->f_tcmds[slot] == n);
8033 	}
8034 #endif
8035 	tag = starttag = fas->f_active[slot]->f_tags;
8036 
8037 	do {
8038 		if ((sp = tagque->f_slot[tag]) != 0) {
8039 			fas_flush_cmd(fas, sp, 0, 0);
8040 		}
8041 		tag = ((ushort_t)(tag + 1)) %
8042 		    (ushort_t)fas->f_active[slot]->f_n_slots;
8043 	} while (tag != starttag);
8044 
8045 	ASSERT(fas->f_tcmds[slot] == 0);
8046 	EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8047 	fas_check_ncmds(fas);
8048 }
8049 
8050 /*
8051  * cleanup one active command
8052  */
8053 static void
8054 fas_flush_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
8055     uint_t stat)
8056 {
8057 	short slot = sp->cmd_slot;
8058 
8059 	ASSERT(fas->f_ncmds > 0);
8060 	ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8061 	ASSERT(sp == fas->f_active[slot]->f_slot[sp->cmd_tag[1]]);
8062 
8063 	fas_remove_cmd(fas, sp, NEW_TIMEOUT);
8064 	fas_decrement_ncmds(fas, sp);
8065 	fas_set_pkt_reason(fas, sp, reason, stat);
8066 	fas_call_pkt_comp(fas, sp);
8067 
8068 	EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8069 	fas_check_ncmds(fas);
8070 }
8071 
8072 /*
8073  * prepare a proxy cmd (a cmd sent on behalf of the target driver,
8074  * usually for error recovery or abort/reset)
8075  */
8076 static void
8077 fas_makeproxy_cmd(struct fas_cmd *sp, struct scsi_address *ap,
8078     struct scsi_pkt *pkt, int nmsgs, ...)
8079 {
8080 	va_list vap;
8081 	int i;
8082 
8083 	ASSERT(nmsgs <= (CDB_GROUP5 - CDB_GROUP0 - 3));
8084 
8085 	bzero(sp, sizeof (*sp));
8086 	bzero(pkt, sizeof (*pkt));
8087 
8088 	pkt->pkt_address	= *ap;
8089 	pkt->pkt_cdbp		= (opaque_t)&sp->cmd_cdb[0];
8090 	pkt->pkt_scbp		= (opaque_t)&sp->cmd_scb;
8091 	pkt->pkt_ha_private	= (opaque_t)sp;
8092 	sp->cmd_pkt		= pkt;
8093 	sp->cmd_scblen		= 1;
8094 	sp->cmd_pkt_flags	= pkt->pkt_flags = FLAG_NOINTR;
8095 	sp->cmd_flags		= CFLAG_CMDPROXY;
8096 	sp->cmd_cdb[FAS_PROXY_TYPE] = FAS_PROXY_SNDMSG;
8097 	sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
8098 	sp->cmd_cdb[FAS_PROXY_DATA] = (char)nmsgs;
8099 
8100 	va_start(vap, nmsgs);
8101 	for (i = 0; i < nmsgs; i++) {
8102 		sp->cmd_cdb[FAS_PROXY_DATA + 1 + i] = (uchar_t)va_arg(vap, int);
8103 	}
8104 	va_end(vap);
8105 }
8106 
8107 /*
8108  * send a proxy cmd and check the result
8109  */
8110 static int
8111 fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
8112     struct scsi_address *ap, char *what)
8113 {
8114 	int rval;
8115 
8116 	IPRINTF3("Sending proxy %s message to %d.%d\n", what,
8117 	    ap->a_target, ap->a_lun);
8118 	if (fas_accept_pkt(fas, sp, TRAN_BUSY_OK) == TRAN_ACCEPT &&
8119 	    sp->cmd_pkt->pkt_reason == CMD_CMPLT &&
8120 	    sp->cmd_cdb[FAS_PROXY_RESULT] == TRUE) {
8121 		IPRINTF3("Proxy %s succeeded for %d.%d\n", what,
8122 		    ap->a_target, ap->a_lun);
8123 		ASSERT(fas->f_current_sp != sp);
8124 		rval = TRUE;
8125 	} else {
8126 		IPRINTF5(
8127 		"Proxy %s failed for %d.%d, result=%x, reason=%x\n", what,
8128 		    ap->a_target, ap->a_lun, sp->cmd_cdb[FAS_PROXY_RESULT],
8129 		    sp->cmd_pkt->pkt_reason);
8130 		ASSERT(fas->f_current_sp != sp);
8131 		rval = FALSE;
8132 	}
8133 	return (rval);
8134 }
8135 
8136 /*
8137  * abort a connected command by sending an abort msg; hold off on
8138  * starting new cmds by setting throttles to HOLD_THROTTLE
8139  */
8140 static int
8141 fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t msg)
8142 {
8143 	int rval = FALSE;
8144 	int flags = sp->cmd_pkt_flags;
8145 
8146 	/*
8147 	 * if reset delay active we cannot  access the target.
8148 	 */
8149 	if (fas->f_reset_delay[Tgt(sp)]) {
8150 		return (rval);
8151 	}
8152 
8153 	/*
8154 	 * only abort while in data phase; otherwise we mess up msg phase
8155 	 */
8156 	if (!((fas->f_state == ACTS_DATA) ||
8157 	    (fas->f_state == ACTS_DATA_DONE))) {
8158 		return (rval);
8159 	}
8160 
8161 
8162 	IPRINTF3("Sending abort message %s to connected %d.%d\n",
8163 	    scsi_mname(msg), Tgt(sp), Lun(sp));
8164 
8165 
8166 	fas->f_abort_msg_sent = 0;
8167 	fas->f_omsglen = 1;
8168 	fas->f_cur_msgout[0] = msg;
8169 	sp->cmd_pkt_flags |= FLAG_NOINTR;
8170 	fas_assert_atn(fas);
8171 
8172 	(void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8173 
8174 	/*
8175 	 * now check if the msg was taken
8176 	 * e_abort is set in fas_handle_msg_out_done when the abort
8177 	 * msg has actually gone out (ie. msg out phase occurred
8178 	 */
8179 	if (fas->f_abort_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8180 		IPRINTF2("target %d.%d aborted\n",
8181 			Tgt(sp), Lun(sp));
8182 		rval = TRUE;
8183 	} else {
8184 		IPRINTF2("target %d.%d did not abort\n",
8185 			Tgt(sp), Lun(sp));
8186 	}
8187 	sp->cmd_pkt_flags = flags;
8188 	fas->f_omsglen = 0;
8189 	return (rval);
8190 }
8191 
8192 /*
8193  * abort a disconnected command; if it is a tagged command, we need
8194  * to include the tag
8195  */
8196 static int
8197 fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
8198     struct fas_cmd *sp, uchar_t msg, int slot)
8199 {
8200 	auto struct fas_cmd local;
8201 	struct scsi_pkt pkt;
8202 	struct fas_cmd *proxy_cmdp = &local;
8203 	int target = ap->a_target;
8204 
8205 	/*
8206 	 * if reset delay is active, we cannot start a selection
8207 	 * and there shouldn't be a cmd outstanding
8208 	 */
8209 	if (fas->f_reset_delay[target] != 0) {
8210 		return (FALSE);
8211 	}
8212 
8213 	if (sp)
8214 		ASSERT(sp->cmd_slot == slot);
8215 
8216 	IPRINTF1("aborting disconnected tagged cmd(s) with %s\n",
8217 		scsi_mname(msg));
8218 	if (sp && (TAGGED(target) && (msg == MSG_ABORT_TAG))) {
8219 		int tag = sp->cmd_tag[1];
8220 		ASSERT(sp == fas->f_active[slot]->f_slot[tag]);
8221 		fas_makeproxy_cmd(proxy_cmdp, ap, &pkt, 3,
8222 		    MSG_SIMPLE_QTAG, tag, msg);
8223 	} else {
8224 		fas_makeproxy_cmd(proxy_cmdp, ap, &pkt, 1, msg);
8225 	}
8226 
8227 	return (fas_do_proxy_cmd(fas, proxy_cmdp, ap, scsi_mname(msg)));
8228 }
8229 
8230 /*
8231  * reset handling:
8232  * fas_do_scsi_reset assumes that we have already entered the mutex
8233  */
8234 static int
8235 fas_do_scsi_reset(struct scsi_address *ap, int level)
8236 {
8237 	int rval = FALSE;
8238 	struct fas *fas = ADDR2FAS(ap);
8239 	short slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
8240 
8241 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
8242 	IPRINTF3("fas_scsi_reset for slot %x, level=%x, tcmds=%x\n",
8243 		slot, level, fas->f_tcmds[slot]);
8244 
8245 	fas_move_waitQ_to_readyQ(fas);
8246 
8247 	if (level == RESET_ALL) {
8248 		/*
8249 		 * We know that fas_reset_bus() returns ACTION_RETURN.
8250 		 */
8251 		(void) fas_reset_bus(fas);
8252 
8253 		/*
8254 		 * Now call fas_dopoll() to field the reset interrupt
8255 		 * which will then call fas_reset_recovery which will
8256 		 * call the completion function for all commands.
8257 		 */
8258 		if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8259 			/*
8260 			 * reset fas
8261 			 */
8262 			fas_internal_reset(fas, FAS_RESET_FAS);
8263 			(void) fas_reset_bus(fas);
8264 			if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8265 				fas_log(fas,
8266 				    CE_WARN, "reset scsi bus failed");
8267 				New_state(fas, STATE_FREE);
8268 			} else {
8269 				rval = TRUE;
8270 			}
8271 		} else {
8272 			rval = TRUE;
8273 		}
8274 
8275 	} else {
8276 		struct fas_cmd *cur_sp = fas->f_current_sp;
8277 		void (*savec)() = NULL;
8278 
8279 		/*
8280 		 * prevent new commands from starting
8281 		 */
8282 		fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
8283 
8284 		/*
8285 		 * zero pkt_comp so it won't complete during the reset and
8286 		 * we can still update the packet after the reset.
8287 		 */
8288 		if (cur_sp) {
8289 			savec = cur_sp->cmd_pkt->pkt_comp;
8290 			cur_sp->cmd_pkt->pkt_comp = NULL;
8291 		}
8292 
8293 		/*
8294 		 * is this a connected cmd but not selecting?
8295 		 */
8296 		if (cur_sp && (fas->f_state != STATE_FREE) &&
8297 		    (cur_sp->cmd_pkt->pkt_state != 0) &&
8298 		    (ap->a_target == (Tgt(cur_sp)))) {
8299 			rval = fas_reset_connected_cmd(fas, ap);
8300 		}
8301 
8302 		/*
8303 		 * if not connected or fas_reset_connected_cmd() failed,
8304 		 * attempt a reset_disconnected_cmd
8305 		 */
8306 		if (rval == FALSE) {
8307 			rval = fas_reset_disconnected_cmd(fas, ap);
8308 		}
8309 
8310 		/*
8311 		 * cleanup if reset was successful
8312 		 * complete the current sp first.
8313 		 */
8314 		if (cur_sp) {
8315 			cur_sp->cmd_pkt->pkt_comp = savec;
8316 			if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
8317 				if (ap->a_target == (Tgt(cur_sp))) {
8318 					fas_set_pkt_reason(fas, cur_sp,
8319 					    CMD_RESET, STAT_DEV_RESET);
8320 				}
8321 				fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
8322 				cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
8323 				fas_decrement_ncmds(fas, cur_sp);
8324 				fas_call_pkt_comp(fas, cur_sp);
8325 			}
8326 		}
8327 
8328 		if (rval == TRUE) {
8329 			fas_reset_cleanup(fas, slot);
8330 		} else {
8331 			IPRINTF1("fas_scsi_reset failed for slot %x\n", slot);
8332 
8333 			/*
8334 			 * restore throttles to max throttle, regardless
8335 			 * of what it was (fas_set_throttles() will deal
8336 			 * with reset delay active)
8337 			 * restoring to the old throttle is not
8338 			 * a such a good idea
8339 			 */
8340 			fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
8341 
8342 		}
8343 
8344 		if (fas->f_state == STATE_FREE) {
8345 			(void) fas_ustart(fas);
8346 		}
8347 	}
8348 exit:
8349 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
8350 	ASSERT(fas->f_ncmds >= fas->f_ndisc);
8351 
8352 #ifdef FASDEBUG
8353 	if (rval && fas_test_stop) {
8354 		debug_enter("reset succeeded");
8355 	}
8356 #endif
8357 	return (rval);
8358 }
8359 
8360 /*
8361  * reset delay is  handled by a separate watchdog; this ensures that
8362  * regardless of fas_scsi_watchdog_tick, the reset delay will not change
8363  */
8364 static void
8365 fas_start_watch_reset_delay(struct fas *fas)
8366 {
8367 	mutex_enter(&fas_global_mutex);
8368 	if ((fas_reset_watch == 0) && FAS_CAN_SCHED) {
8369 		fas_reset_watch = timeout(fas_watch_reset_delay, NULL,
8370 		    drv_usectohz((clock_t)FAS_WATCH_RESET_DELAY_TICK * 1000));
8371 	}
8372 	ASSERT((fas_reset_watch != 0) || (fas->f_flags & FAS_FLG_NOTIMEOUTS));
8373 	mutex_exit(&fas_global_mutex);
8374 }
8375 
8376 /*
8377  * set throttles to HOLD and set reset_delay for all target/luns
8378  */
8379 static void
8380 fas_setup_reset_delay(struct fas *fas)
8381 {
8382 	if (!ddi_in_panic()) {
8383 		int i;
8384 
8385 		fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
8386 		for (i = 0; i < NTARGETS_WIDE; i++) {
8387 			fas->f_reset_delay[i] = fas->f_scsi_reset_delay;
8388 		}
8389 		fas_start_watch_reset_delay(fas);
8390 	} else {
8391 		drv_usecwait(fas->f_scsi_reset_delay * 1000);
8392 	}
8393 }
8394 
8395 /*
8396  * fas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8397  * fas instance for active reset delays
8398  */
8399 /*ARGSUSED*/
8400 static void
8401 fas_watch_reset_delay(void *arg)
8402 {
8403 	struct fas *fas;
8404 	struct fas *lfas;	/* last not_done fas */
8405 	int not_done = 0;
8406 
8407 	mutex_enter(&fas_global_mutex);
8408 	fas_reset_watch = 0;
8409 	mutex_exit(&fas_global_mutex);
8410 
8411 	rw_enter(&fas_global_rwlock, RW_READER);
8412 	for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
8413 		if (fas->f_tran == 0) {
8414 			continue;
8415 		}
8416 		mutex_enter(FAS_MUTEX(fas));
8417 		not_done += fas_watch_reset_delay_subr(fas);
8418 		lfas = fas;
8419 		fas_check_waitQ_and_mutex_exit(fas);
8420 	}
8421 	rw_exit(&fas_global_rwlock);
8422 	if (not_done) {
8423 		ASSERT(lfas != NULL);
8424 		fas_start_watch_reset_delay(lfas);
8425 	}
8426 }
8427 
8428 static int
8429 fas_watch_reset_delay_subr(struct fas *fas)
8430 {
8431 	short slot, s;
8432 	int start_slot = -1;
8433 	int done = 0;
8434 
8435 	for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET)  {
8436 
8437 		/*
8438 		 * check if a reset delay is active; if so back to full throttle
8439 		 * which will unleash the cmds in the ready Q
8440 		 */
8441 		s = slot/NLUNS_PER_TARGET;
8442 		if (fas->f_reset_delay[s] != 0) {
8443 			EPRINTF2("target%d: reset delay=%d\n", s,
8444 			    fas->f_reset_delay[s]);
8445 			fas->f_reset_delay[s] -= FAS_WATCH_RESET_DELAY_TICK;
8446 			if (fas->f_reset_delay[s] <= 0) {
8447 				/*
8448 				 * clear throttle for all luns on  this target
8449 				 */
8450 				fas->f_reset_delay[s] = 0;
8451 				fas_set_all_lun_throttles(fas,
8452 				    slot, MAX_THROTTLE);
8453 				IPRINTF1("reset delay completed, slot=%x\n",
8454 				    slot);
8455 				if (start_slot == -1) {
8456 					start_slot = slot;
8457 				}
8458 			} else {
8459 				done = -1;
8460 			}
8461 		}
8462 	}
8463 
8464 	/*
8465 	 * start a cmd if a reset delay expired
8466 	 */
8467 	if (start_slot != -1 && fas->f_state == STATE_FREE) {
8468 		(void) fas_ustart(fas);
8469 	}
8470 	return (done);
8471 }
8472 
8473 /*
8474  * cleanup after a device reset. this affects all target's luns
8475  */
8476 static void
8477 fas_reset_cleanup(struct fas *fas, int slot)
8478 {
8479 	/*
8480 	 * reset msg has been accepted, now cleanup queues;
8481 	 * for all luns of this target
8482 	 */
8483 	int i, start, end;
8484 	int target  = slot/NLUNS_PER_TARGET;
8485 
8486 	start = slot & ~(NLUNS_PER_TARGET-1);
8487 	end = start + NLUNS_PER_TARGET;
8488 	IPRINTF4("fas_reset_cleanup: slot %x, start=%x, end=%x, tcmds=%x\n",
8489 	    slot, start, end, fas->f_tcmds[slot]);
8490 
8491 	ASSERT(!(fas->f_current_sp &&
8492 		(fas->f_current_sp->cmd_slot == slot) &&
8493 		(fas->f_state & STATE_SELECTING)));
8494 
8495 	/*
8496 	 * if we are not in panic set up a reset delay for this target,
8497 	 * a zero throttle forces all new requests into the ready Q
8498 	 */
8499 	if (!ddi_in_panic()) {
8500 		fas_set_all_lun_throttles(fas, start, HOLD_THROTTLE);
8501 		fas->f_reset_delay[target] = fas->f_scsi_reset_delay;
8502 		fas_start_watch_reset_delay(fas);
8503 	} else {
8504 		drv_usecwait(fas->f_scsi_reset_delay * 1000);
8505 	}
8506 
8507 	for (i = start; i < end; i++) {
8508 		fas_mark_packets(fas, i, CMD_RESET, STAT_DEV_RESET);
8509 		fas_flush_tagQ(fas, i);
8510 		fas_flush_readyQ(fas, i);
8511 		if (fas->f_arq_pkt[i]) {
8512 			struct fas_cmd *sp = fas->f_arq_pkt[i];
8513 			struct arq_private_data *arq_data =
8514 			(struct arq_private_data *)(sp->cmd_pkt->pkt_private);
8515 			if (sp->cmd_pkt->pkt_comp) {
8516 				ASSERT(arq_data->arq_save_sp == NULL);
8517 			}
8518 		}
8519 		ASSERT(fas->f_tcmds[i] == 0);
8520 	}
8521 	ASSERT(fas->f_ncmds >= fas->f_ndisc);
8522 
8523 	fas_force_renegotiation(fas, target);
8524 }
8525 
8526 /*
8527  * reset a currently disconnected target
8528  */
8529 static int
8530 fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap)
8531 {
8532 	auto struct fas_cmd local;
8533 	struct fas_cmd *sp = &local;
8534 	struct scsi_pkt pkt;
8535 
8536 	fas_makeproxy_cmd(sp, ap, &pkt, 1, MSG_DEVICE_RESET);
8537 	return (fas_do_proxy_cmd(fas, sp, ap, scsi_mname(MSG_DEVICE_RESET)));
8538 }
8539 
8540 /*
8541  * reset a target with a currently connected command
8542  * Assert ATN and send MSG_DEVICE_RESET, zero throttles temporarily
8543  * to prevent new cmds from starting regardless of the outcome
8544  */
8545 static int
8546 fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap)
8547 {
8548 	int rval = FALSE;
8549 	struct fas_cmd *sp = fas->f_current_sp;
8550 	int flags = sp->cmd_pkt_flags;
8551 
8552 	/*
8553 	 * only attempt to reset in data phase; during other phases
8554 	 * asserting ATN may just cause confusion
8555 	 */
8556 	if (!((fas->f_state == ACTS_DATA) ||
8557 	    (fas->f_state == ACTS_DATA_DONE))) {
8558 		return (rval);
8559 	}
8560 
8561 	IPRINTF2("Sending reset message to connected %d.%d\n",
8562 	    ap->a_target, ap->a_lun);
8563 	fas->f_reset_msg_sent = 0;
8564 	fas->f_omsglen = 1;
8565 	fas->f_cur_msgout[0] = MSG_DEVICE_RESET;
8566 	sp->cmd_pkt_flags |= FLAG_NOINTR;
8567 
8568 	fas_assert_atn(fas);
8569 
8570 	/*
8571 	 * poll for interrupts until bus free
8572 	 */
8573 	(void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8574 
8575 	/*
8576 	 * now check if the msg was taken
8577 	 * f_reset is set in fas_handle_msg_out_done when
8578 	 * msg has actually gone out  (ie. msg out phase occurred)
8579 	 */
8580 	if (fas->f_reset_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8581 		IPRINTF2("target %d.%d reset\n", ap->a_target, ap->a_lun);
8582 		rval = TRUE;
8583 	} else {
8584 		IPRINTF2("target %d.%d did not reset\n",
8585 			ap->a_target, ap->a_lun);
8586 	}
8587 	sp->cmd_pkt_flags = flags;
8588 	fas->f_omsglen = 0;
8589 
8590 	return (rval);
8591 }
8592 
8593 /*
8594  * reset the scsi bus to blow all commands away
8595  */
8596 static int
8597 fas_reset_bus(struct fas *fas)
8598 {
8599 	IPRINTF("fas_reset_bus:\n");
8600 	New_state(fas, ACTS_RESET);
8601 
8602 	fas_internal_reset(fas, FAS_RESET_SCSIBUS);
8603 
8604 	/*
8605 	 * Now that we've reset the SCSI bus, we'll take a SCSI RESET
8606 	 * interrupt and use that to clean up the state of things.
8607 	 */
8608 	return (ACTION_RETURN);
8609 }
8610 
8611 /*
8612  * fas_reset_recovery is called on the reset interrupt and cleans
8613  * up all cmds (active or waiting)
8614  */
8615 static int
8616 fas_reset_recovery(struct fas *fas)
8617 {
8618 	short slot, start_slot;
8619 	int i;
8620 	int rval = ACTION_SEARCH;
8621 	int max_loop = 0;
8622 
8623 	IPRINTF("fas_reset_recovery:\n");
8624 	fas_check_ncmds(fas);
8625 
8626 	/*
8627 	 * renegotiate wide and sync for all targets
8628 	 */
8629 	fas->f_sync_known = fas->f_wide_known = 0;
8630 
8631 	/*
8632 	 * reset dma engine
8633 	 */
8634 	FAS_FLUSH_DMA_HARD(fas);
8635 
8636 	/*
8637 	 * set throttles and reset delay
8638 	 */
8639 	fas_setup_reset_delay(fas);
8640 
8641 	/*
8642 	 * clear interrupts until they go away
8643 	 */
8644 	while (INTPENDING(fas) && (max_loop < FAS_RESET_SPIN_MAX_LOOP)) {
8645 		volatile struct fasreg *fasreg = fas->f_reg;
8646 		fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
8647 		fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
8648 		fas->f_step = fas_reg_read(fas, &fasreg->fas_step);
8649 		fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
8650 		drv_usecwait(FAS_RESET_SPIN_DELAY_USEC);
8651 		max_loop++;
8652 	}
8653 
8654 	if (max_loop >= FAS_RESET_SPIN_MAX_LOOP) {
8655 		fas_log(fas, CE_WARN, "Resetting SCSI bus failed");
8656 	}
8657 
8658 	fas_reg_cmd_write(fas, CMD_FLUSH);
8659 
8660 	/*
8661 	 * reset the chip, this shouldn't be necessary but sometimes
8662 	 * we get a hang in the next data in phase
8663 	 */
8664 	fas_internal_reset(fas, FAS_RESET_FAS);
8665 
8666 	/*
8667 	 * reset was expected? if not, it must be external bus reset
8668 	 */
8669 	if (fas->f_state != ACTS_RESET) {
8670 		if (fas->f_ncmds) {
8671 			fas_log(fas, CE_WARN, "external SCSI bus reset");
8672 		}
8673 	}
8674 
8675 	if (fas->f_ncmds == 0) {
8676 		rval = ACTION_RETURN;
8677 		goto done;
8678 	}
8679 
8680 	/*
8681 	 * completely reset the state of the softc data.
8682 	 */
8683 	fas_internal_reset(fas, FAS_RESET_SOFTC);
8684 
8685 	/*
8686 	 * Hold the state of the host adapter open
8687 	 */
8688 	New_state(fas, ACTS_FROZEN);
8689 
8690 	/*
8691 	 * for right now just claim that all
8692 	 * commands have been destroyed by a SCSI reset
8693 	 * and let already set reason fields or callers
8694 	 * decide otherwise for specific commands.
8695 	 */
8696 	start_slot = fas->f_next_slot;
8697 	slot = start_slot;
8698 	do {
8699 		fas_check_ncmds(fas);
8700 		fas_mark_packets(fas, slot, CMD_RESET, STAT_BUS_RESET);
8701 		fas_flush_tagQ(fas, slot);
8702 		fas_flush_readyQ(fas, slot);
8703 		if (fas->f_arq_pkt[slot]) {
8704 			struct fas_cmd *sp = fas->f_arq_pkt[slot];
8705 			struct arq_private_data *arq_data =
8706 			(struct arq_private_data *)(sp->cmd_pkt->pkt_private);
8707 			if (sp->cmd_pkt->pkt_comp) {
8708 				ASSERT(arq_data->arq_save_sp == NULL);
8709 			}
8710 		}
8711 		slot = NEXTSLOT(slot, fas->f_dslot);
8712 	} while (slot != start_slot);
8713 
8714 	fas_check_ncmds(fas);
8715 
8716 	/*
8717 	 * reset timeouts
8718 	 */
8719 	for (i = 0; i < N_SLOTS; i++) {
8720 		if (fas->f_active[i]) {
8721 			fas->f_active[i]->f_timebase = 0;
8722 			fas->f_active[i]->f_timeout = 0;
8723 			fas->f_active[i]->f_dups = 0;
8724 		}
8725 	}
8726 
8727 done:
8728 	/*
8729 	 * Move the state back to free...
8730 	 */
8731 	New_state(fas, STATE_FREE);
8732 	ASSERT(fas->f_ncmds >= fas->f_ndisc);
8733 
8734 	/*
8735 	 * perform the reset notification callbacks that are registered.
8736 	 */
8737 	(void) scsi_hba_reset_notify_callback(&fas->f_mutex,
8738 		&fas->f_reset_notify_listf);
8739 
8740 	/*
8741 	 * if reset delay is still active a search is meaningless
8742 	 * but do it anyway
8743 	 */
8744 	return (rval);
8745 }
8746 
8747 /*
8748  * hba_tran ops for quiesce and unquiesce
8749  */
8750 static int
8751 fas_scsi_quiesce(dev_info_t *dip)
8752 {
8753 	struct fas *fas;
8754 	scsi_hba_tran_t *tran;
8755 
8756 	tran = ddi_get_driver_private(dip);
8757 	if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8758 		return (-1);
8759 	}
8760 
8761 	return (fas_quiesce_bus(fas));
8762 }
8763 
8764 static int
8765 fas_scsi_unquiesce(dev_info_t *dip)
8766 {
8767 	struct fas *fas;
8768 	scsi_hba_tran_t *tran;
8769 
8770 	tran = ddi_get_driver_private(dip);
8771 	if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8772 		return (-1);
8773 	}
8774 
8775 	return (fas_unquiesce_bus(fas));
8776 }
8777 
8778 #ifdef FAS_TEST
8779 /*
8780  * torture test functions
8781  */
8782 static void
8783 fas_test_reset(struct fas *fas, int slot)
8784 {
8785 	struct scsi_address ap;
8786 	char target = slot/NLUNS_PER_TARGET;
8787 
8788 	if (fas_rtest & (1 << target)) {
8789 		ap.a_hba_tran = fas->f_tran;
8790 		ap.a_target = target;
8791 		ap.a_lun = 0;
8792 		if ((fas_rtest_type == 1) &&
8793 		    (fas->f_state == ACTS_DATA_DONE)) {
8794 			if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8795 				fas_rtest = 0;
8796 			}
8797 		} else if ((fas_rtest_type == 2) &&
8798 		    (fas->f_state == ACTS_DATA_DONE)) {
8799 			if (fas_do_scsi_reset(&ap, RESET_ALL)) {
8800 				fas_rtest = 0;
8801 			}
8802 		} else {
8803 			if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8804 				fas_rtest = 0;
8805 			}
8806 		}
8807 	}
8808 }
8809 
8810 static void
8811 fas_test_abort(struct fas *fas, int slot)
8812 {
8813 	struct fas_cmd *sp = fas->f_current_sp;
8814 	struct scsi_address ap;
8815 	char target = slot/NLUNS_PER_TARGET;
8816 	struct scsi_pkt *pkt = NULL;
8817 
8818 	if (fas_atest & (1 << target)) {
8819 		ap.a_hba_tran = fas->f_tran;
8820 		ap.a_target = target;
8821 		ap.a_lun = 0;
8822 
8823 		if ((fas_atest_disc == 0) && sp &&
8824 		    (sp->cmd_slot == slot) &&
8825 		    ((sp->cmd_flags & CFLAG_CMDDISC) == 0)) {
8826 			pkt = sp->cmd_pkt;
8827 		} else if ((fas_atest_disc == 1) && NOTAG(target)) {
8828 			sp = fas->f_active[slot]->f_slot[0];
8829 			if (sp && (sp->cmd_flags & CFLAG_CMDDISC)) {
8830 				pkt = sp->cmd_pkt;
8831 			}
8832 		} else if ((fas_atest_disc == 1) && (sp == 0) &&
8833 		    TAGGED(target) &&
8834 		    (fas->f_tcmds[slot] != 0)) {
8835 			int tag;
8836 			/*
8837 			 * find the oldest tag
8838 			 */
8839 			for (tag = NTAGS-1; tag >= 0; tag--) {
8840 			    if ((sp = fas->f_active[slot]->f_slot[tag]) != 0)
8841 				break;
8842 			}
8843 			if (sp) {
8844 				pkt = sp->cmd_pkt;
8845 				ASSERT(sp->cmd_slot == slot);
8846 			} else {
8847 				return;
8848 			}
8849 		} else if (fas_atest_disc == 2 && (sp == 0) &&
8850 		    (fas->f_tcmds[slot] != 0)) {
8851 			pkt = NULL;
8852 		} else if (fas_atest_disc == 2 && NOTAG(target)) {
8853 			pkt = NULL;
8854 		} else if (fas_atest_disc == 3 && fas->f_readyf[slot]) {
8855 			pkt = fas->f_readyf[slot]->cmd_pkt;
8856 		} else if (fas_atest_disc == 4 &&
8857 		    fas->f_readyf[slot] && fas->f_readyf[slot]->cmd_forw) {
8858 			pkt = fas->f_readyf[slot]->cmd_forw->cmd_pkt;
8859 		} else if (fas_atest_disc == 5 && fas->f_readyb[slot]) {
8860 			pkt = fas->f_readyb[slot]->cmd_pkt;
8861 		} else if ((fas_atest_disc == 6) && sp &&
8862 		    (sp->cmd_slot == slot) &&
8863 		    (fas->f_state == ACTS_DATA_DONE)) {
8864 			pkt = sp->cmd_pkt;
8865 		} else if (fas_atest_disc == 7) {
8866 			if (fas_do_scsi_abort(&ap, NULL)) {
8867 				if (fas_do_scsi_abort(&ap, NULL)) {
8868 					if (fas_do_scsi_reset(&ap,
8869 					    RESET_TARGET)) {
8870 						fas_atest = 0;
8871 					}
8872 				}
8873 			}
8874 			return;
8875 		} else {
8876 			return;
8877 		}
8878 
8879 		fas_log(fas, CE_NOTE, "aborting pkt=0x%p state=%x\n",
8880 			(void *)pkt, (pkt != NULL? pkt->pkt_state : 0));
8881 		if (fas_do_scsi_abort(&ap, pkt)) {
8882 			fas_atest = 0;
8883 		}
8884 	}
8885 }
8886 #endif /* FAS_TEST */
8887 
8888 /*
8889  * capability interface
8890  */
8891 static int
8892 fas_commoncap(struct scsi_address *ap, char *cap, int val,
8893     int tgtonly, int doset)
8894 {
8895 	struct fas *fas = ADDR2FAS(ap);
8896 	int cidx;
8897 	int target = ap->a_target;
8898 	ushort_t tshift = (1<<target);
8899 	ushort_t ntshift = ~tshift;
8900 	int rval = FALSE;
8901 
8902 	mutex_enter(FAS_MUTEX(fas));
8903 
8904 	if (cap == (char *)0) {
8905 		goto exit;
8906 	}
8907 
8908 	cidx = scsi_hba_lookup_capstr(cap);
8909 	if (cidx == -1) {
8910 		rval = UNDEFINED;
8911 	} else if (doset) {
8912 		/*
8913 		 * we usually don't allow setting capabilities for
8914 		 * other targets!
8915 		 */
8916 		if (!tgtonly) {
8917 			goto exit;
8918 		}
8919 		switch (cidx) {
8920 		case SCSI_CAP_DMA_MAX:
8921 		case SCSI_CAP_MSG_OUT:
8922 		case SCSI_CAP_PARITY:
8923 		case SCSI_CAP_INITIATOR_ID:
8924 		case SCSI_CAP_LINKED_CMDS:
8925 		case SCSI_CAP_UNTAGGED_QING:
8926 		case SCSI_CAP_RESET_NOTIFICATION:
8927 			/*
8928 			 * None of these are settable via
8929 			 * the capability interface.
8930 			 */
8931 			break;
8932 
8933 		case SCSI_CAP_DISCONNECT:
8934 			if (val)
8935 				fas->f_target_scsi_options[ap->a_target] |=
8936 				    SCSI_OPTIONS_DR;
8937 			else
8938 				fas->f_target_scsi_options[ap->a_target] &=
8939 				    ~SCSI_OPTIONS_DR;
8940 
8941 			break;
8942 
8943 		case SCSI_CAP_SYNCHRONOUS:
8944 			if (val) {
8945 				fas->f_force_async &= ~tshift;
8946 			} else {
8947 				fas->f_force_async |= tshift;
8948 			}
8949 			fas_force_renegotiation(fas, target);
8950 			rval = TRUE;
8951 			break;
8952 
8953 		case SCSI_CAP_TAGGED_QING:
8954 		{
8955 			int slot = target * NLUNS_PER_TARGET | ap->a_lun;
8956 			ushort_t old_notag = fas->f_notag;
8957 
8958 			/* do not allow with active tgt */
8959 			if (fas->f_tcmds[slot]) {
8960 				break;
8961 			}
8962 
8963 			slot =	target * NLUNS_PER_TARGET | ap->a_lun;
8964 
8965 			if (val) {
8966 				if (fas->f_target_scsi_options[target] &
8967 				    SCSI_OPTIONS_TAG) {
8968 					IPRINTF1("target %d: TQ enabled\n",
8969 					    target);
8970 					fas->f_notag &= ntshift;
8971 				} else {
8972 					break;
8973 				}
8974 			} else {
8975 				IPRINTF1("target %d: TQ disabled\n",
8976 				    target);
8977 				fas->f_notag |= tshift;
8978 			}
8979 
8980 			if (val && fas_alloc_active_slots(fas, slot,
8981 			    KM_NOSLEEP)) {
8982 				fas->f_notag = old_notag;
8983 				break;
8984 			}
8985 
8986 			fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
8987 
8988 			fas_update_props(fas, target);
8989 			rval = TRUE;
8990 			break;
8991 		}
8992 
8993 		case SCSI_CAP_WIDE_XFER:
8994 			if (val) {
8995 				if (fas->f_target_scsi_options[target] &
8996 				    SCSI_OPTIONS_WIDE) {
8997 					fas->f_nowide &= ntshift;
8998 					fas->f_force_narrow &= ~tshift;
8999 				} else {
9000 					break;
9001 				}
9002 			} else {
9003 				fas->f_force_narrow |= tshift;
9004 			}
9005 			fas_force_renegotiation(fas, target);
9006 			rval = TRUE;
9007 			break;
9008 
9009 		case SCSI_CAP_ARQ:
9010 			if (val) {
9011 				if (fas_create_arq_pkt(fas, ap)) {
9012 					break;
9013 				}
9014 			} else {
9015 				if (fas_delete_arq_pkt(fas, ap)) {
9016 					break;
9017 				}
9018 			}
9019 			rval = TRUE;
9020 			break;
9021 
9022 		case SCSI_CAP_QFULL_RETRIES:
9023 			fas->f_qfull_retries[target] = (uchar_t)val;
9024 			rval = TRUE;
9025 			break;
9026 
9027 		case SCSI_CAP_QFULL_RETRY_INTERVAL:
9028 			fas->f_qfull_retry_interval[target] =
9029 					drv_usectohz(val * 1000);
9030 			rval = TRUE;
9031 			break;
9032 
9033 		default:
9034 			rval = UNDEFINED;
9035 			break;
9036 		}
9037 
9038 	} else if (doset == 0) {
9039 		int slot = target * NLUNS_PER_TARGET | ap->a_lun;
9040 
9041 		switch (cidx) {
9042 		case SCSI_CAP_DMA_MAX:
9043 			/* very high limit because of multiple dma windows */
9044 			rval = 1<<30;
9045 			break;
9046 		case SCSI_CAP_MSG_OUT:
9047 			rval = TRUE;
9048 			break;
9049 		case SCSI_CAP_DISCONNECT:
9050 			if (tgtonly &&
9051 			    (fas->f_target_scsi_options[target] &
9052 				SCSI_OPTIONS_DR)) {
9053 				rval = TRUE;
9054 			}
9055 			break;
9056 		case SCSI_CAP_SYNCHRONOUS:
9057 			if (tgtonly && fas->f_offset[target]) {
9058 				rval = TRUE;
9059 			}
9060 			break;
9061 		case SCSI_CAP_PARITY:
9062 			rval = TRUE;
9063 			break;
9064 		case SCSI_CAP_INITIATOR_ID:
9065 			rval = MY_ID(fas);
9066 			break;
9067 		case SCSI_CAP_TAGGED_QING:
9068 			if (tgtonly && ((fas->f_notag & tshift) == 0)) {
9069 				rval = TRUE;
9070 			}
9071 			break;
9072 		case SCSI_CAP_WIDE_XFER:
9073 			if ((tgtonly && (fas->f_nowide & tshift) == 0)) {
9074 				rval = TRUE;
9075 			}
9076 			break;
9077 		case SCSI_CAP_UNTAGGED_QING:
9078 			rval = TRUE;
9079 			break;
9080 		case SCSI_CAP_ARQ:
9081 			if (tgtonly && fas->f_arq_pkt[slot]) {
9082 				rval = TRUE;
9083 			}
9084 			break;
9085 		case SCSI_CAP_LINKED_CMDS:
9086 			break;
9087 		case SCSI_CAP_RESET_NOTIFICATION:
9088 			rval = TRUE;
9089 			break;
9090 		case SCSI_CAP_QFULL_RETRIES:
9091 			rval = fas->f_qfull_retries[target];
9092 			break;
9093 		case SCSI_CAP_QFULL_RETRY_INTERVAL:
9094 			rval = drv_hztousec(
9095 				fas->f_qfull_retry_interval[target]) /
9096 				1000;
9097 			break;
9098 
9099 		default:
9100 			rval = UNDEFINED;
9101 			break;
9102 		}
9103 	}
9104 exit:
9105 	if (val && tgtonly) {
9106 		fas_update_props(fas, target);
9107 	}
9108 	fas_check_waitQ_and_mutex_exit(fas);
9109 
9110 	if (doset) {
9111 		IPRINTF6(
9112 	    "fas_commoncap:tgt=%x,cap=%s,tgtonly=%x,doset=%x,val=%x,rval=%x\n",
9113 		target, cap, tgtonly, doset, val, rval);
9114 	}
9115 	return (rval);
9116 }
9117 
9118 /*
9119  * property management
9120  * fas_update_props:
9121  * create/update sync/wide/TQ/scsi-options properties for this target
9122  */
9123 static void
9124 fas_update_props(struct fas *fas, int tgt)
9125 {
9126 	char	property[32];
9127 	uint_t	xfer_speed = 0;
9128 	uint_t	xfer_rate = 0;
9129 	int	wide_enabled, tq_enabled;
9130 	uint_t	regval = fas->f_sync_period[tgt];
9131 	int	offset = fas->f_offset[tgt];
9132 
9133 	wide_enabled = ((fas->f_nowide & (1<<tgt)) == 0);
9134 	if (offset && regval) {
9135 		xfer_speed =
9136 			FAS_SYNC_KBPS((regval * fas->f_clock_cycle) / 1000);
9137 		xfer_rate = ((wide_enabled)? 2 : 1) * xfer_speed;
9138 	}
9139 	(void) sprintf(property, "target%x-sync-speed", tgt);
9140 	fas_update_this_prop(fas, property, xfer_rate);
9141 
9142 	(void) sprintf(property, "target%x-wide", tgt);
9143 	fas_update_this_prop(fas, property, wide_enabled);
9144 
9145 	(void) sprintf(property, "target%x-TQ", tgt);
9146 	tq_enabled = ((fas->f_notag & (1<<tgt))? 0 : 1);
9147 	fas_update_this_prop(fas, property, tq_enabled);
9148 
9149 }
9150 
9151 static void
9152 fas_update_this_prop(struct fas *fas, char *property, int value)
9153 {
9154 	dev_info_t *dip = fas->f_dev;
9155 
9156 	IPRINTF2("update prop: %s value=%x\n", property, value);
9157 	ASSERT(mutex_owned(FAS_MUTEX(fas)));
9158 	/*
9159 	 * We cannot hold any mutex at this point because the call to
9160 	 * ddi_prop_update_int() may block.
9161 	 */
9162 	mutex_exit(FAS_MUTEX(fas));
9163 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
9164 	    property, value) != DDI_PROP_SUCCESS)	{
9165 		IPRINTF1("cannot modify/create %s property\n",	property);
9166 	}
9167 	mutex_enter(FAS_MUTEX(fas));
9168 }
9169 
9170 /*
9171  * allocate active slots array, size is dependent on whether tagQ enabled
9172  */
9173 static int
9174 fas_alloc_active_slots(struct fas *fas, int slot, int flag)
9175 {
9176 	int target = slot / NLUNS_PER_TARGET;
9177 	struct f_slots *old_active = fas->f_active[slot];
9178 	struct f_slots *new_active;
9179 	ushort_t size;
9180 	int rval = -1;
9181 
9182 	if (fas->f_tcmds[slot]) {
9183 		IPRINTF("cannot change size of active slots array\n");
9184 		return (rval);
9185 	}
9186 
9187 	size = ((NOTAG(target)) ? FAS_F_SLOT_SIZE : FAS_F_SLOTS_SIZE_TQ);
9188 	EPRINTF4(
9189 	"fas_alloc_active_slots: target=%x size=%x, old=0x%p, oldsize=%x\n",
9190 		target, size, (void *)old_active,
9191 		((old_active == NULL) ? -1 : old_active->f_size));
9192 
9193 	new_active = kmem_zalloc(size, flag);
9194 	if (new_active == NULL) {
9195 		IPRINTF("new active alloc failed\n");
9196 	} else {
9197 		fas->f_active[slot] = new_active;
9198 		fas->f_active[slot]->f_n_slots = (NOTAG(target) ? 1 : NTAGS);
9199 		fas->f_active[slot]->f_size = size;
9200 		/*
9201 		 * reserve tag 0 for non-tagged cmds to tagged targets
9202 		 */
9203 		if (TAGGED(target)) {
9204 			fas->f_active[slot]->f_tags = 1;
9205 		}
9206 		if (old_active) {
9207 			kmem_free((caddr_t)old_active, old_active->f_size);
9208 		}
9209 		rval = 0;
9210 	}
9211 	return (rval);
9212 }
9213 
9214 /*
9215  * Error logging, printing, and debug print routines
9216  */
9217 static char *fas_label = "fas";
9218 
9219 /*PRINTFLIKE3*/
9220 static void
9221 fas_log(struct fas *fas, int level, const char *fmt, ...)
9222 {
9223 	dev_info_t *dev;
9224 	va_list ap;
9225 
9226 	if (fas) {
9227 		dev = fas->f_dev;
9228 	} else {
9229 		dev = 0;
9230 	}
9231 
9232 	mutex_enter(&fas_log_mutex);
9233 
9234 	va_start(ap, fmt);
9235 	(void) vsprintf(fas_log_buf, fmt, ap);
9236 	va_end(ap);
9237 
9238 	if (level == CE_CONT) {
9239 		scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9240 	} else {
9241 		scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9242 	}
9243 
9244 	mutex_exit(&fas_log_mutex);
9245 }
9246 
9247 /*PRINTFLIKE2*/
9248 static void
9249 fas_printf(struct fas *fas, const char *fmt, ...)
9250 {
9251 	dev_info_t *dev = 0;
9252 	va_list ap;
9253 	int level = CE_CONT;
9254 
9255 	mutex_enter(&fas_log_mutex);
9256 
9257 	va_start(ap, fmt);
9258 	(void) vsprintf(fas_log_buf, fmt, ap);
9259 	va_end(ap);
9260 
9261 	if (fas) {
9262 		dev = fas->f_dev;
9263 		level = CE_NOTE;
9264 		scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9265 	} else {
9266 		scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9267 	}
9268 
9269 	mutex_exit(&fas_log_mutex);
9270 }
9271 
9272 #ifdef FASDEBUG
9273 /*PRINTFLIKE2*/
9274 void
9275 fas_dprintf(struct fas *fas, const char *fmt, ...)
9276 {
9277 	dev_info_t *dev = 0;
9278 	va_list ap;
9279 
9280 	if (fas) {
9281 		dev = fas->f_dev;
9282 	}
9283 
9284 	mutex_enter(&fas_log_mutex);
9285 
9286 	va_start(ap, fmt);
9287 	(void) vsprintf(fas_log_buf, fmt, ap);
9288 	va_end(ap);
9289 
9290 	scsi_log(dev, fas_label, SCSI_DEBUG, "%s", fas_log_buf);
9291 
9292 	mutex_exit(&fas_log_mutex);
9293 }
9294 #endif
9295 
9296 
9297 static void
9298 fas_printstate(struct fas *fas, char *msg)
9299 {
9300 	volatile struct fasreg *fasreg = fas->f_reg;
9301 	volatile struct dma *dmar = fas->f_dma;
9302 	uint_t csr = fas_dma_reg_read(fas, &dmar->dma_csr);
9303 	uint_t count = fas_dma_reg_read(fas, &dmar->dma_count);
9304 	uint_t addr = fas_dma_reg_read(fas, &dmar->dma_addr);
9305 	uint_t test = fas_dma_reg_read(fas, &dmar->dma_test);
9306 	uint_t fas_cnt;
9307 
9308 	fas_log(fas, CE_WARN, "%s: current fas state:", msg);
9309 	fas_printf(NULL, "Latched stat=0x%b intr=0x%b",
9310 	    fas->f_stat, FAS_STAT_BITS, fas->f_intr, FAS_INT_BITS);
9311 	fas_printf(NULL, "last msgout: %s, last msgin: %s",
9312 	    scsi_mname(fas->f_last_msgout), scsi_mname(fas->f_last_msgin));
9313 	fas_printf(NULL, "DMA csr=0x%b", csr, dma_bits);
9314 	fas_printf(NULL,
9315 	    "addr=%x dmacnt=%x test=%x last=%x last_cnt=%x",
9316 	    addr, count, test, fas->f_lastdma, fas->f_lastcount);
9317 
9318 	GET_FAS_COUNT(fasreg, fas_cnt);
9319 	fas_printf(NULL, "fas state:");
9320 	fas_printf(NULL, "\tcount(32)=%x cmd=%x stat=%x stat2=%x intr=%x",
9321 	    fas_cnt, fasreg->fas_cmd, fasreg->fas_stat, fasreg->fas_stat2,
9322 	    fasreg->fas_intr);
9323 	fas_printf(NULL,
9324 	"\tstep=%x fifoflag=%x conf=%x test=%x conf2=%x conf3=%x",
9325 	    fasreg->fas_step, fasreg->fas_fifo_flag, fasreg->fas_conf,
9326 	    fasreg->fas_test, fasreg->fas_conf2, fasreg->fas_conf3);
9327 
9328 	if (fas->f_current_sp) {
9329 		fas_dump_cmd(fas, fas->f_current_sp);
9330 	}
9331 }
9332 
9333 /*
9334  * dump all we know about a cmd
9335  */
9336 static void
9337 fas_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9338 {
9339 	int i;
9340 	uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9341 	auto char buf[128];
9342 
9343 	buf[0] = '\0';
9344 	fas_printf(NULL, "Cmd dump for Target %d Lun %d:",
9345 	    Tgt(sp), Lun(sp));
9346 	(void) sprintf(&buf[0], " cdb=[");
9347 	for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9348 		(void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9349 	}
9350 	(void) sprintf(&buf[strlen(buf)], " ]");
9351 	fas_printf(NULL, buf);
9352 	fas_printf(NULL, "State=%s Last State=%s",
9353 	    fas_state_name(fas->f_state), fas_state_name(fas->f_laststate));
9354 	fas_printf(NULL,
9355 	    "pkt_state=0x%b pkt_flags=0x%x pkt_statistics=0x%x",
9356 	    sp->cmd_pkt->pkt_state, scsi_state_bits, sp->cmd_pkt_flags,
9357 	    sp->cmd_pkt->pkt_statistics);
9358 	if (sp->cmd_pkt->pkt_state & STATE_GOT_STATUS) {
9359 		fas_printf(NULL, "Status=0x%x\n", sp->cmd_pkt->pkt_scbp[0]);
9360 	}
9361 }
9362 
9363 /*ARGSUSED*/
9364 static void
9365 fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9366 {
9367 	int i;
9368 	uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9369 	auto char buf[128];
9370 
9371 	buf[0] = '\0';
9372 	(void) sprintf(&buf[0], "?%d.%d: cdb=[", Tgt(sp), Lun(sp));
9373 	for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9374 		(void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9375 	}
9376 	(void) sprintf(&buf[strlen(buf)], " ]");
9377 	fas_printf(NULL, buf);
9378 }
9379 
9380 /*
9381  * state decoding for error messages
9382  */
9383 static char *
9384 fas_state_name(ushort_t state)
9385 {
9386 	if (state == STATE_FREE) {
9387 		return ("FREE");
9388 	} else if (state & STATE_SELECTING) {
9389 		if (state == STATE_SELECT_NORMAL)
9390 			return ("SELECT");
9391 		else if (state == STATE_SELECT_N_STOP)
9392 			return ("SEL&STOP");
9393 		else if (state == STATE_SELECT_N_SENDMSG)
9394 			return ("SELECT_SNDMSG");
9395 		else
9396 			return ("SEL_NO_ATN");
9397 	} else {
9398 		static struct {
9399 			char *sname;
9400 			char state;
9401 		} names[] = {
9402 			"CMD_START",		ACTS_CMD_START,
9403 			"CMD_DONE",		ACTS_CMD_DONE,
9404 			"MSG_OUT",		ACTS_MSG_OUT,
9405 			"MSG_OUT_DONE", 	ACTS_MSG_OUT_DONE,
9406 			"MSG_IN",		ACTS_MSG_IN,
9407 			"MSG_IN_MORE",		ACTS_MSG_IN_MORE,
9408 			"MSG_IN_DONE",		ACTS_MSG_IN_DONE,
9409 			"CLEARING",		ACTS_CLEARING,
9410 			"DATA", 		ACTS_DATA,
9411 			"DATA_DONE",		ACTS_DATA_DONE,
9412 			"CMD_CMPLT",		ACTS_C_CMPLT,
9413 			"UNKNOWN",		ACTS_UNKNOWN,
9414 			"RESEL",		ACTS_RESEL,
9415 			"ENDVEC",		ACTS_ENDVEC,
9416 			"RESET",		ACTS_RESET,
9417 			"ABORTING",		ACTS_ABORTING,
9418 			"FROZEN",		ACTS_FROZEN,
9419 			0
9420 		};
9421 		int i;
9422 		for (i = 0; names[i].sname; i++) {
9423 			if (names[i].state == state)
9424 				return (names[i].sname);
9425 		}
9426 	}
9427 	return ("<BAD>");
9428 }
9429