1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 */
26
27
28 /*
29 * ISSUES
30 *
31 * - more consistent error messages
32 * - report name of device on errors?
33 * - if wide target renegotiates sync, back to narrow?
34 * - last_msgout is not accurate ????
35 * - resolve XXXX
36 * - improve msg reject code (use special msg reject handler)
37 * - better use of IDE message
38 * - keep track if ATN remains asserted and target not going into
39 * a msg-out phase
40 * - improve comments
41 * - no slave accesses when start address is odd and dma hasn't started
42 * this affect asserting ATN
43 */
44
45 /*
46 * fas - QLogic fas366 wide/fast SCSI Processor HBA driver with
47 * tagged and non-tagged queueing support
48 */
49 #if defined(lint) && !defined(DEBUG)
50 #define DEBUG 1
51 #define FASDEBUG
52 #endif
53
54 #define DMA_REG_TRACING /* enable dma register access tracing */
55
56
57 /*
58 * standard header files
59 */
60 #include <sys/note.h>
61 #include <sys/scsi/scsi.h>
62 #include <sys/file.h>
63 #include <sys/vtrace.h>
64
65 /*
66 * private header files
67 */
68 #include <sys/scsi/adapters/fasdma.h>
69 #include <sys/scsi/adapters/fasreg.h>
70 #include <sys/scsi/adapters/fasvar.h>
71 #include <sys/scsi/adapters/fascmd.h>
72 #include <sys/scsi/impl/scsi_reset_notify.h>
73
74 /*
75 * tunables
76 */
77 static int fas_selection_timeout = 250; /* 250 milliseconds */
78 static uchar_t fas_default_offset = DEFAULT_OFFSET;
79
80 /*
81 * needed for presto support, do not remove
82 */
83 static int fas_enable_sbus64 = 1;
84
85 #ifdef FASDEBUG
86 int fasdebug = 0;
87 int fasdebug_instance = -1; /* debug all instances */
88 static int fas_burstsizes_limit = -1;
89 static int fas_no_sync_wide_backoff = 0;
90 #endif /* FASDEBUG */
91
92 /*
93 * Local static data protected by global mutex
94 */
95 static kmutex_t fas_global_mutex; /* to allow concurrent attach */
96
97 static int fas_scsi_watchdog_tick; /* in seconds, for all */
98 /* instances */
99 static clock_t fas_tick; /* fas_watch() interval in Hz */
100 static timeout_id_t fas_reset_watch; /* timeout id for reset watch */
101 static timeout_id_t fas_timeout_id = 0;
102 static int fas_timeout_initted = 0;
103
104 static krwlock_t fas_global_rwlock;
105
106 static void *fas_state; /* soft state ptr */
107 static struct fas *fas_head; /* link all softstate structures */
108 static struct fas *fas_tail; /* for fas_watch() */
109
110 static kmutex_t fas_log_mutex;
111 static char fas_log_buf[256];
112 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
113 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
114 fas_scsi_watchdog_tick fas_tick))
115 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", fas::f_quiesce_timeid))
116
117 /*
118 * dma attribute structure for scsi engine
119 */
120 static ddi_dma_attr_t dma_fasattr = {
121 DMA_ATTR_V0, (unsigned long long)0,
122 (unsigned long long)0xffffffff, (unsigned long long)((1<<24)-1),
123 1, DEFAULT_BURSTSIZE, 1,
124 (unsigned long long)0xffffffff, (unsigned long long)0xffffffff,
125 1, 512, 0
126 };
127
128 /*
129 * optional torture test stuff
130 */
131 #ifdef FASDEBUG
132 #define FAS_TEST
133 static int fas_ptest_emsgin;
134 static int fas_ptest_msgin;
135 static int fas_ptest_msg = -1;
136 static int fas_ptest_status;
137 static int fas_ptest_data_in;
138 static int fas_atest;
139 static int fas_atest_disc;
140 static int fas_atest_reconn;
141 static void fas_test_abort(struct fas *fas, int slot);
142 static int fas_rtest;
143 static int fas_rtest_type;
144 static void fas_test_reset(struct fas *fas, int slot);
145 static int fas_force_timeout;
146 static int fas_btest;
147 static int fas_test_stop;
148 static int fas_transport_busy;
149 static int fas_transport_busy_rqs;
150 static int fas_transport_reject;
151 static int fas_arqs_failure;
152 static int fas_tran_err;
153 static int fas_test_untagged;
154 static int fas_enable_untagged;
155 #endif
156
157 /*
158 * warlock directives
159 */
160 _NOTE(DATA_READABLE_WITHOUT_LOCK(dma fasdebug))
161 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy))
162 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy_rqs))
163 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_reject))
164 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_arqs_failure))
165 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_tran_err))
166 _NOTE(MUTEX_PROTECTS_DATA(fas_log_mutex, fas_log_buf))
167 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
168 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
169 fas_scsi_watchdog_tick fas_tick))
170
171 /*
172 * function prototypes
173 *
174 * scsa functions are exported by means of the transport table:
175 */
176 static int fas_scsi_tgt_probe(struct scsi_device *sd,
177 int (*waitfunc)(void));
178 static int fas_scsi_tgt_init(dev_info_t *, dev_info_t *,
179 scsi_hba_tran_t *, struct scsi_device *);
180 static int fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
181 static int fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
182 static int fas_scsi_reset(struct scsi_address *ap, int level);
183 static int fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
184 static int fas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
185 int whom);
186 static struct scsi_pkt *fas_scsi_init_pkt(struct scsi_address *ap,
187 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
188 int tgtlen, int flags, int (*callback)(), caddr_t arg);
189 static void fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
190 static void fas_scsi_dmafree(struct scsi_address *ap,
191 struct scsi_pkt *pkt);
192 static void fas_scsi_sync_pkt(struct scsi_address *ap,
193 struct scsi_pkt *pkt);
194
195 /*
196 * internal functions:
197 */
198 static int fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp);
199 static int fas_alloc_tag(struct fas *fas, struct fas_cmd *sp);
200 static int fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag);
201 static void fas_empty_waitQ(struct fas *fas);
202 static void fas_move_waitQ_to_readyQ(struct fas *fas);
203 static void fas_check_waitQ_and_mutex_exit(struct fas *fas);
204 static int fas_istart(struct fas *fas);
205 static int fas_ustart(struct fas *fas);
206 static int fas_startcmd(struct fas *fas, struct fas_cmd *sp);
207
208 static int fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
209 int cmdlen, int tgtlen, int statuslen, int kf);
210 static void fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp);
211 static int fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
212 static void fas_kmem_cache_destructor(void *buf, void *cdrarg);
213
214 static int fas_finish(struct fas *fas);
215 static void fas_handle_qfull(struct fas *fas, struct fas_cmd *sp);
216 static void fas_restart_cmd(void *);
217 static int fas_dopoll(struct fas *fas, int timeout);
218 static void fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp);
219 static uint_t fas_intr(caddr_t arg);
220 static int fas_intr_svc(struct fas *fas);
221 static int fas_phasemanage(struct fas *fas);
222 static int fas_handle_unknown(struct fas *fas);
223 static int fas_handle_cmd_start(struct fas *fas);
224 static int fas_handle_cmd_done(struct fas *fas);
225 static int fas_handle_msg_out_start(struct fas *fas);
226 static int fas_handle_msg_out_done(struct fas *fas);
227 static int fas_handle_clearing(struct fas *fas);
228 static int fas_handle_data_start(struct fas *fas);
229 static int fas_handle_data_done(struct fas *fas);
230 static int fas_handle_c_cmplt(struct fas *fas);
231 static int fas_handle_msg_in_start(struct fas *fas);
232 static int fas_handle_more_msgin(struct fas *fas);
233 static int fas_handle_msg_in_done(struct fas *fas);
234 static int fas_onebyte_msg(struct fas *fas);
235 static int fas_twobyte_msg(struct fas *fas);
236 static int fas_multibyte_msg(struct fas *fas);
237 static void fas_revert_to_async(struct fas *fas, int tgt);
238 static int fas_finish_select(struct fas *fas);
239 static int fas_reselect_preempt(struct fas *fas);
240 static int fas_reconnect(struct fas *fas);
241 static int fas_handle_selection(struct fas *fas);
242 static void fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp);
243 static int fas_handle_gross_err(struct fas *fas);
244 static int fas_illegal_cmd_or_bus_reset(struct fas *fas);
245 static int fas_check_dma_error(struct fas *fas);
246
247 static void fas_make_sdtr(struct fas *fas, int msgout_offset, int target);
248 static void fas_make_wdtr(struct fas *fas, int msgout_offset, int target,
249 int width);
250 static void fas_update_props(struct fas *fas, int tgt);
251 static void fas_update_this_prop(struct fas *fas, char *property, int value);
252
253 static int fas_commoncap(struct scsi_address *ap, char *cap, int val,
254 int tgtonly, int doset);
255
256 static void fas_watch(void *arg);
257 static void fas_watchsubr(struct fas *fas);
258 static void fas_cmd_timeout(struct fas *fas, int slot);
259 static void fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
260 int slot);
261 static void fas_reset_sync_wide(struct fas *fas);
262 static void fas_set_wide_conf3(struct fas *fas, int target, int width);
263 static void fas_force_renegotiation(struct fas *fas, int target);
264
265 static int fas_set_new_window(struct fas *fas, struct fas_cmd *sp);
266 static int fas_restore_pointers(struct fas *fas, struct fas_cmd *sp);
267 static int fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end);
268
269 /*PRINTFLIKE3*/
270 static void fas_log(struct fas *fas, int level, const char *fmt, ...);
271 /*PRINTFLIKE2*/
272 static void fas_printf(struct fas *fas, const char *fmt, ...);
273 static void fas_printstate(struct fas *fas, char *msg);
274 static void fas_dump_cmd(struct fas *fas, struct fas_cmd *sp);
275 static void fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp);
276 static char *fas_state_name(ushort_t state);
277
278 static void fas_makeproxy_cmd(struct fas_cmd *sp,
279 struct scsi_address *ap, struct scsi_pkt *pkt, int nmsg, ...);
280 static int fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
281 struct scsi_address *ap, char *what);
282
283 static void fas_internal_reset(struct fas *fas, int reset_action);
284 static int fas_alloc_active_slots(struct fas *fas, int slot, int flag);
285
286 static int fas_abort_curcmd(struct fas *fas);
287 static int fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot);
288 static int fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
289 static int fas_do_scsi_reset(struct scsi_address *ap, int level);
290 static int fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp,
291 int slot);
292 static void fas_flush_readyQ(struct fas *fas, int slot);
293 static void fas_flush_tagQ(struct fas *fas, int slot);
294 static void fas_flush_cmd(struct fas *fas, struct fas_cmd *sp,
295 uchar_t reason, uint_t stat);
296 static int fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp,
297 uchar_t msg);
298 static int fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
299 struct fas_cmd *sp, uchar_t msg, int slot);
300 static void fas_mark_packets(struct fas *fas, int slot, uchar_t reason,
301 uint_t stat);
302 static void fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp,
303 uchar_t reason, uint_t stat);
304
305 static int fas_reset_bus(struct fas *fas);
306 static int fas_reset_recovery(struct fas *fas);
307 static int fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap);
308 static int fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap);
309 static void fas_start_watch_reset_delay(struct fas *);
310 static void fas_setup_reset_delay(struct fas *fas);
311 static void fas_watch_reset_delay(void *arg);
312 static int fas_watch_reset_delay_subr(struct fas *fas);
313 static void fas_reset_cleanup(struct fas *fas, int slot);
314 static int fas_scsi_reset_notify(struct scsi_address *ap, int flag,
315 void (*callback)(caddr_t), caddr_t arg);
316 static int fas_scsi_quiesce(dev_info_t *hba_dip);
317 static int fas_scsi_unquiesce(dev_info_t *hba_dip);
318
319 static void fas_set_throttles(struct fas *fas, int slot,
320 int n, int what);
321 static void fas_set_all_lun_throttles(struct fas *fas, int slot, int what);
322 static void fas_full_throttle(struct fas *fas, int slot);
323 static void fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int timeout);
324 static void fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp);
325
326 static int fas_quiesce_bus(struct fas *fas);
327 static int fas_unquiesce_bus(struct fas *fas);
328 static void fas_ncmds_checkdrain(void *arg);
329 static int fas_check_outstanding(struct fas *fas);
330
331 static int fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap);
332 static int fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap);
333 static int fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp);
334 void fas_complete_arq_pkt(struct scsi_pkt *pkt);
335
336 void fas_call_pkt_comp(struct fas *fas, struct fas_cmd *sp);
337 void fas_empty_callbackQ(struct fas *fas);
338 int fas_init_callbacks(struct fas *fas);
339 void fas_destroy_callbacks(struct fas *fas);
340
341 static int fas_check_dma_error(struct fas *fas);
342 static int fas_init_chip(struct fas *fas, uchar_t id);
343
344 static void fas_read_fifo(struct fas *fas);
345 static void fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad);
346
347 #ifdef FASDEBUG
348 static void fas_reg_cmd_write(struct fas *fas, uint8_t cmd);
349 static void fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what);
350 static uint8_t fas_reg_read(struct fas *fas, volatile uint8_t *p);
351
352 static void fas_dma_reg_write(struct fas *fas, volatile uint32_t *p,
353 uint32_t what);
354 static uint32_t fas_dma_reg_read(struct fas *fas, volatile uint32_t *p);
355 #else
356 #define fas_reg_cmd_write(fas, cmd) \
357 fas->f_reg->fas_cmd = (cmd), fas->f_last_cmd = (cmd)
358 #define fas_reg_write(fas, p, what) *(p) = (what)
359 #define fas_reg_read(fas, p) *(p)
360 #define fas_dma_reg_write(fas, p, what) *(p) = (what)
361 #define fas_dma_reg_read(fas, p) *(p)
362 #endif
363
364 /*
365 * autoconfiguration data and routines.
366 */
367 static int fas_attach(dev_info_t *dev, ddi_attach_cmd_t cmd);
368 static int fas_detach(dev_info_t *dev, ddi_detach_cmd_t cmd);
369 static int fas_dr_detach(dev_info_t *dev);
370
371 static struct dev_ops fas_ops = {
372 DEVO_REV, /* devo_rev, */
373 0, /* refcnt */
374 ddi_no_info, /* info */
375 nulldev, /* identify */
376 nulldev, /* probe */
377 fas_attach, /* attach */
378 fas_detach, /* detach */
379 nodev, /* reset */
380 NULL, /* driver operations */
381 NULL, /* bus operations */
382 NULL, /* power */
383 ddi_quiesce_not_supported, /* devo_quiesce */
384 };
385
386 static struct modldrv modldrv = {
387 &mod_driverops, /* Type of module. This one is a driver */
388 "FAS SCSI HBA Driver", /* Name of the module. */
389 &fas_ops, /* driver ops */
390 };
391
392 static struct modlinkage modlinkage = {
393 MODREV_1, (void *)&modldrv, NULL
394 };
395
396 int
_init(void)397 _init(void)
398 {
399 int rval;
400 /* CONSTCOND */
401 ASSERT(NO_COMPETING_THREADS);
402
403 rval = ddi_soft_state_init(&fas_state, sizeof (struct fas),
404 FAS_INITIAL_SOFT_SPACE);
405 if (rval != 0) {
406 return (rval);
407 }
408
409 if ((rval = scsi_hba_init(&modlinkage)) != 0) {
410 ddi_soft_state_fini(&fas_state);
411 return (rval);
412 }
413
414 mutex_init(&fas_global_mutex, NULL, MUTEX_DRIVER, NULL);
415 rw_init(&fas_global_rwlock, NULL, RW_DRIVER, NULL);
416
417 mutex_init(&fas_log_mutex, NULL, MUTEX_DRIVER, NULL);
418
419 if ((rval = mod_install(&modlinkage)) != 0) {
420 mutex_destroy(&fas_log_mutex);
421 rw_destroy(&fas_global_rwlock);
422 mutex_destroy(&fas_global_mutex);
423 ddi_soft_state_fini(&fas_state);
424 scsi_hba_fini(&modlinkage);
425 return (rval);
426 }
427
428 return (rval);
429 }
430
431 int
_fini(void)432 _fini(void)
433 {
434 int rval;
435 /* CONSTCOND */
436 ASSERT(NO_COMPETING_THREADS);
437
438 if ((rval = mod_remove(&modlinkage)) == 0) {
439 ddi_soft_state_fini(&fas_state);
440 scsi_hba_fini(&modlinkage);
441 mutex_destroy(&fas_log_mutex);
442 rw_destroy(&fas_global_rwlock);
443 mutex_destroy(&fas_global_mutex);
444 }
445 return (rval);
446 }
447
448 int
_info(struct modinfo * modinfop)449 _info(struct modinfo *modinfop)
450 {
451 /* CONSTCOND */
452 ASSERT(NO_COMPETING_THREADS);
453
454 return (mod_info(&modlinkage, modinfop));
455 }
456
457 static int
fas_scsi_tgt_probe(struct scsi_device * sd,int (* waitfunc)(void))458 fas_scsi_tgt_probe(struct scsi_device *sd,
459 int (*waitfunc)(void))
460 {
461 dev_info_t *dip = ddi_get_parent(sd->sd_dev);
462 int rval = SCSIPROBE_FAILURE;
463 scsi_hba_tran_t *tran;
464 struct fas *fas;
465 int tgt = sd->sd_address.a_target;
466
467 tran = ddi_get_driver_private(dip);
468 ASSERT(tran != NULL);
469 fas = TRAN2FAS(tran);
470
471 /*
472 * force renegotiation since inquiry cmds do not cause
473 * check conditions
474 */
475 mutex_enter(FAS_MUTEX(fas));
476 fas_force_renegotiation(fas, tgt);
477 mutex_exit(FAS_MUTEX(fas));
478 rval = scsi_hba_probe(sd, waitfunc);
479
480 /*
481 * the scsi-options precedence is:
482 * target-scsi-options highest
483 * device-type-scsi-options
484 * per bus scsi-options
485 * global scsi-options lowest
486 */
487 mutex_enter(FAS_MUTEX(fas));
488 if ((rval == SCSIPROBE_EXISTS) &&
489 ((fas->f_target_scsi_options_defined & (1 << tgt)) == 0)) {
490 int options;
491
492 options = scsi_get_device_type_scsi_options(dip, sd, -1);
493 if (options != -1) {
494 fas->f_target_scsi_options[tgt] = options;
495 fas_log(fas, CE_NOTE,
496 "?target%x-scsi-options = 0x%x\n", tgt,
497 fas->f_target_scsi_options[tgt]);
498 fas_force_renegotiation(fas, tgt);
499 }
500 }
501 mutex_exit(FAS_MUTEX(fas));
502
503 IPRINTF2("target%x-scsi-options= 0x%x\n",
504 tgt, fas->f_target_scsi_options[tgt]);
505
506 return (rval);
507 }
508
509
510 /*ARGSUSED*/
511 static int
fas_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)512 fas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
513 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
514 {
515 return (((sd->sd_address.a_target < NTARGETS_WIDE) &&
516 (sd->sd_address.a_lun < NLUNS_PER_TARGET)) ?
517 DDI_SUCCESS : DDI_FAILURE);
518 }
519
520 /*ARGSUSED*/
521 static int
fas_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)522 fas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
523 {
524 struct fas *fas = NULL;
525 volatile struct dma *dmar = NULL;
526 volatile struct fasreg *fasreg;
527 ddi_dma_attr_t *fas_dma_attr;
528 ddi_device_acc_attr_t dev_attr;
529
530 int instance, id, slot, i, hm_rev;
531 size_t rlen;
532 uint_t count;
533 char buf[64];
534 scsi_hba_tran_t *tran = NULL;
535 char intr_added = 0;
536 char mutex_init_done = 0;
537 char hba_attached = 0;
538 char bound_handle = 0;
539 char *prop_template = "target%d-scsi-options";
540 char prop_str[32];
541
542 /* CONSTCOND */
543 ASSERT(NO_COMPETING_THREADS);
544
545 switch (cmd) {
546 case DDI_ATTACH:
547 break;
548
549 case DDI_RESUME:
550 if ((tran = ddi_get_driver_private(dip)) == NULL)
551 return (DDI_FAILURE);
552
553 fas = TRAN2FAS(tran);
554 if (!fas) {
555 return (DDI_FAILURE);
556 }
557 /*
558 * Reset hardware and softc to "no outstanding commands"
559 * Note that a check condition can result on first command
560 * to a target.
561 */
562 mutex_enter(FAS_MUTEX(fas));
563 fas_internal_reset(fas,
564 FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
565
566 (void) fas_reset_bus(fas);
567
568 fas->f_suspended = 0;
569
570 /* make sure that things get started */
571 (void) fas_istart(fas);
572 fas_check_waitQ_and_mutex_exit(fas);
573
574 mutex_enter(&fas_global_mutex);
575 if (fas_timeout_id == 0) {
576 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
577 fas_timeout_initted = 1;
578 }
579 mutex_exit(&fas_global_mutex);
580
581 return (DDI_SUCCESS);
582
583 default:
584 return (DDI_FAILURE);
585 }
586
587 instance = ddi_get_instance(dip);
588
589 /*
590 * Since we know that some instantiations of this device can
591 * be plugged into slave-only SBus slots, check to see whether
592 * this is one such.
593 */
594 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
595 cmn_err(CE_WARN,
596 "fas%d: device in slave-only slot", instance);
597 return (DDI_FAILURE);
598 }
599
600 if (ddi_intr_hilevel(dip, 0)) {
601 /*
602 * Interrupt number '0' is a high-level interrupt.
603 * At this point you either add a special interrupt
604 * handler that triggers a soft interrupt at a lower level,
605 * or - more simply and appropriately here - you just
606 * fail the attach.
607 */
608 cmn_err(CE_WARN,
609 "fas%d: Device is using a hilevel intr", instance);
610 return (DDI_FAILURE);
611 }
612
613 /*
614 * Allocate softc information.
615 */
616 if (ddi_soft_state_zalloc(fas_state, instance) != DDI_SUCCESS) {
617 cmn_err(CE_WARN,
618 "fas%d: cannot allocate soft state", instance);
619 goto fail;
620 }
621
622 fas = (struct fas *)ddi_get_soft_state(fas_state, instance);
623
624 if (fas == NULL) {
625 goto fail;
626 }
627
628 /*
629 * map in device registers
630 */
631 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
632 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
633 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
634
635 if (ddi_regs_map_setup(dip, (uint_t)0, (caddr_t *)&dmar,
636 (off_t)0, (off_t)sizeof (struct dma),
637 &dev_attr, &fas->f_dmar_acc_handle) != DDI_SUCCESS) {
638 cmn_err(CE_WARN, "fas%d: cannot map dma", instance);
639 goto fail;
640 }
641
642 if (ddi_regs_map_setup(dip, (uint_t)1, (caddr_t *)&fasreg,
643 (off_t)0, (off_t)sizeof (struct fasreg),
644 &dev_attr, &fas->f_regs_acc_handle) != DDI_SUCCESS) {
645 cmn_err(CE_WARN,
646 "fas%d: unable to map fas366 registers", instance);
647 goto fail;
648 }
649
650 fas_dma_attr = &dma_fasattr;
651 if (ddi_dma_alloc_handle(dip, fas_dma_attr,
652 DDI_DMA_SLEEP, NULL, &fas->f_dmahandle) != DDI_SUCCESS) {
653 cmn_err(CE_WARN,
654 "fas%d: cannot alloc dma handle", instance);
655 goto fail;
656 }
657
658 /*
659 * allocate cmdarea and its dma handle
660 */
661 if (ddi_dma_mem_alloc(fas->f_dmahandle,
662 (uint_t)2*FIFOSIZE,
663 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
664 NULL, (caddr_t *)&fas->f_cmdarea, &rlen,
665 &fas->f_cmdarea_acc_handle) != DDI_SUCCESS) {
666 cmn_err(CE_WARN,
667 "fas%d: cannot alloc cmd area", instance);
668 goto fail;
669 }
670
671 fas->f_reg = fasreg;
672 fas->f_dma = dmar;
673 fas->f_instance = instance;
674
675 if (ddi_dma_addr_bind_handle(fas->f_dmahandle,
676 NULL, (caddr_t)fas->f_cmdarea,
677 rlen, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
678 &fas->f_dmacookie, &count) != DDI_DMA_MAPPED) {
679 cmn_err(CE_WARN,
680 "fas%d: cannot bind cmdarea", instance);
681 goto fail;
682 }
683 bound_handle++;
684
685 ASSERT(count == 1);
686
687 /*
688 * Allocate a transport structure
689 */
690 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
691
692 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
693 scsi_size_clean(dip); /* SCSI_SIZE_CLEAN_VERIFY ok */
694
695 /*
696 * initialize transport structure
697 */
698 fas->f_tran = tran;
699 fas->f_dev = dip;
700 tran->tran_hba_private = fas;
701 tran->tran_tgt_private = NULL;
702 tran->tran_tgt_init = fas_scsi_tgt_init;
703 tran->tran_tgt_probe = fas_scsi_tgt_probe;
704 tran->tran_tgt_free = NULL;
705 tran->tran_start = fas_scsi_start;
706 tran->tran_abort = fas_scsi_abort;
707 tran->tran_reset = fas_scsi_reset;
708 tran->tran_getcap = fas_scsi_getcap;
709 tran->tran_setcap = fas_scsi_setcap;
710 tran->tran_init_pkt = fas_scsi_init_pkt;
711 tran->tran_destroy_pkt = fas_scsi_destroy_pkt;
712 tran->tran_dmafree = fas_scsi_dmafree;
713 tran->tran_sync_pkt = fas_scsi_sync_pkt;
714 tran->tran_reset_notify = fas_scsi_reset_notify;
715 tran->tran_get_bus_addr = NULL;
716 tran->tran_get_name = NULL;
717 tran->tran_quiesce = fas_scsi_quiesce;
718 tran->tran_unquiesce = fas_scsi_unquiesce;
719 tran->tran_bus_reset = NULL;
720 tran->tran_add_eventcall = NULL;
721 tran->tran_get_eventcookie = NULL;
722 tran->tran_post_event = NULL;
723 tran->tran_remove_eventcall = NULL;
724
725 fas->f_force_async = 0;
726
727 /*
728 * disable tagged queuing and wide for all targets
729 * (will be enabled by target driver if required)
730 * sync is enabled by default
731 */
732 fas->f_nowide = fas->f_notag = ALL_TARGETS;
733 fas->f_force_narrow = ALL_TARGETS;
734
735 /*
736 * By default we assume embedded devices and save time
737 * checking for timeouts in fas_watch() by skipping
738 * the rest of luns
739 * If we're talking to any non-embedded devices,
740 * we can't cheat and skip over non-zero luns anymore
741 * in fas_watch() and fas_ustart().
742 */
743 fas->f_dslot = NLUNS_PER_TARGET;
744
745 /*
746 * f_active is used for saving disconnected cmds;
747 * For tagged targets, we need to increase the size later
748 * Only allocate for Lun == 0, if we probe a lun > 0 then
749 * we allocate an active structure
750 * If TQ gets enabled then we need to increase the size
751 * to hold 256 cmds
752 */
753 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
754 (void) fas_alloc_active_slots(fas, slot, KM_SLEEP);
755 }
756
757 /*
758 * initialize the qfull retry counts
759 */
760 for (i = 0; i < NTARGETS_WIDE; i++) {
761 fas->f_qfull_retries[i] = QFULL_RETRIES;
762 fas->f_qfull_retry_interval[i] =
763 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
764
765 }
766
767 /*
768 * Initialize throttles.
769 */
770 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
771
772 /*
773 * Initialize mask of deferred property updates
774 */
775 fas->f_props_update = 0;
776
777 /*
778 * set host ID
779 */
780 fas->f_fasconf = DEFAULT_HOSTID;
781 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "initiator-id", -1);
782 if (id == -1) {
783 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
784 "scsi-initiator-id", -1);
785 }
786 if (id != DEFAULT_HOSTID && id >= 0 && id < NTARGETS_WIDE) {
787 fas_log(fas, CE_NOTE, "?initiator SCSI ID now %d\n", id);
788 fas->f_fasconf = (uchar_t)id;
789 }
790
791 /*
792 * find the burstsize and reduce ours if necessary
793 */
794 fas->f_dma_attr = fas_dma_attr;
795 fas->f_dma_attr->dma_attr_burstsizes &=
796 ddi_dma_burstsizes(fas->f_dmahandle);
797
798 #ifdef FASDEBUG
799 fas->f_dma_attr->dma_attr_burstsizes &= fas_burstsizes_limit;
800 IPRINTF1("dma burstsize=%x\n", fas->f_dma_attr->dma_attr_burstsizes);
801 #endif
802 /*
803 * Attach this instance of the hba
804 */
805 if (scsi_hba_attach_setup(dip, fas->f_dma_attr, tran, 0) !=
806 DDI_SUCCESS) {
807 fas_log(fas, CE_WARN, "scsi_hba_attach_setup failed");
808 goto fail;
809 }
810 hba_attached++;
811
812 /*
813 * if scsi-options property exists, use it
814 */
815 fas->f_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY,
816 dip, 0, "scsi-options", DEFAULT_SCSI_OPTIONS);
817
818 /*
819 * if scsi-selection-timeout property exists, use it
820 */
821 fas_selection_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
822 dip, 0, "scsi-selection-timeout", SCSI_DEFAULT_SELECTION_TIMEOUT);
823
824 /*
825 * if hm-rev property doesn't exist, use old scheme for rev
826 */
827 hm_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
828 "hm-rev", -1);
829
830 if (hm_rev == 0xa0 || hm_rev == -1) {
831 if (DMAREV(dmar) != 0) {
832 fas->f_hm_rev = 0x20;
833 fas_log(fas, CE_WARN,
834 "obsolete rev 2.0 FEPS chip, "
835 "possible data corruption");
836 } else {
837 fas->f_hm_rev = 0x10;
838 fas_log(fas, CE_WARN,
839 "obsolete and unsupported rev 1.0 FEPS chip");
840 goto fail;
841 }
842 } else if (hm_rev == 0x20) {
843 fas->f_hm_rev = 0x21;
844 fas_log(fas, CE_WARN, "obsolete rev 2.1 FEPS chip");
845 } else {
846 fas->f_hm_rev = (uchar_t)hm_rev;
847 fas_log(fas, CE_NOTE, "?rev %x.%x FEPS chip\n",
848 (hm_rev >> 4) & 0xf, hm_rev & 0xf);
849 }
850
851 if ((fas->f_scsi_options & SCSI_OPTIONS_SYNC) == 0) {
852 fas->f_nosync = ALL_TARGETS;
853 }
854
855 if ((fas->f_scsi_options & SCSI_OPTIONS_WIDE) == 0) {
856 fas->f_nowide = ALL_TARGETS;
857 }
858
859 /*
860 * if target<n>-scsi-options property exists, use it;
861 * otherwise use the f_scsi_options
862 */
863 for (i = 0; i < NTARGETS_WIDE; i++) {
864 (void) sprintf(prop_str, prop_template, i);
865 fas->f_target_scsi_options[i] = ddi_prop_get_int(
866 DDI_DEV_T_ANY, dip, 0, prop_str, -1);
867
868 if (fas->f_target_scsi_options[i] != -1) {
869 fas_log(fas, CE_NOTE, "?target%x-scsi-options=0x%x\n",
870 i, fas->f_target_scsi_options[i]);
871 fas->f_target_scsi_options_defined |= 1 << i;
872 } else {
873 fas->f_target_scsi_options[i] = fas->f_scsi_options;
874 }
875 if (((fas->f_target_scsi_options[i] &
876 SCSI_OPTIONS_DR) == 0) &&
877 (fas->f_target_scsi_options[i] & SCSI_OPTIONS_TAG)) {
878 fas->f_target_scsi_options[i] &= ~SCSI_OPTIONS_TAG;
879 fas_log(fas, CE_WARN,
880 "Disabled TQ since disconnects are disabled");
881 }
882 }
883
884 fas->f_scsi_tag_age_limit =
885 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-tag-age-limit",
886 DEFAULT_TAG_AGE_LIMIT);
887
888 fas->f_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
889 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
890 if (fas->f_scsi_reset_delay == 0) {
891 fas_log(fas, CE_NOTE,
892 "scsi_reset_delay of 0 is not recommended,"
893 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
894 fas->f_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
895 }
896
897 /*
898 * get iblock cookie and initialize mutexes
899 */
900 if (ddi_get_iblock_cookie(dip, (uint_t)0, &fas->f_iblock)
901 != DDI_SUCCESS) {
902 cmn_err(CE_WARN, "fas_attach: cannot get iblock cookie");
903 goto fail;
904 }
905
906 mutex_init(&fas->f_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
907 cv_init(&fas->f_cv, NULL, CV_DRIVER, NULL);
908
909 /*
910 * initialize mutex for waitQ
911 */
912 mutex_init(&fas->f_waitQ_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
913 mutex_init_done++;
914
915 /*
916 * initialize callback mechanism (immediate callback)
917 */
918 mutex_enter(&fas_global_mutex);
919 if (fas_init_callbacks(fas)) {
920 mutex_exit(&fas_global_mutex);
921 goto fail;
922 }
923 mutex_exit(&fas_global_mutex);
924
925 /*
926 * kstat_intr support
927 */
928 (void) sprintf(buf, "fas%d", instance);
929 fas->f_intr_kstat = kstat_create("fas", instance, buf, "controller", \
930 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
931 if (fas->f_intr_kstat)
932 kstat_install(fas->f_intr_kstat);
933
934 /*
935 * install interrupt handler
936 */
937 mutex_enter(FAS_MUTEX(fas));
938 if (ddi_add_intr(dip, (uint_t)0, &fas->f_iblock, NULL,
939 fas_intr, (caddr_t)fas)) {
940 cmn_err(CE_WARN, "fas: cannot add intr");
941 mutex_exit(FAS_MUTEX(fas));
942 goto fail;
943 }
944 intr_added++;
945
946 /*
947 * initialize fas chip
948 */
949 if (fas_init_chip(fas, id)) {
950 cmn_err(CE_WARN, "fas: cannot initialize");
951 mutex_exit(FAS_MUTEX(fas));
952 goto fail;
953 }
954 mutex_exit(FAS_MUTEX(fas));
955
956 /*
957 * create kmem cache for packets
958 */
959 (void) sprintf(buf, "fas%d_cache", instance);
960 fas->f_kmem_cache = kmem_cache_create(buf,
961 EXTCMD_SIZE, 8,
962 fas_kmem_cache_constructor, fas_kmem_cache_destructor,
963 NULL, (void *)fas, NULL, 0);
964 if (fas->f_kmem_cache == NULL) {
965 cmn_err(CE_WARN, "fas: cannot create kmem_cache");
966 goto fail;
967 }
968
969 /*
970 * at this point, we are not going to fail the attach
971 * so there is no need to undo the rest:
972 *
973 * add this fas to the list, this makes debugging easier
974 * and fas_watch() needs it to walk thru all fas's
975 */
976 rw_enter(&fas_global_rwlock, RW_WRITER);
977 if (fas_head == NULL) {
978 fas_head = fas;
979 } else {
980 fas_tail->f_next = fas;
981 }
982 fas_tail = fas; /* point to last fas in list */
983 rw_exit(&fas_global_rwlock);
984
985 /*
986 * there is one watchdog handler for all driver instances.
987 * start the watchdog if it hasn't been done yet
988 */
989 mutex_enter(&fas_global_mutex);
990 if (fas_scsi_watchdog_tick == 0) {
991 fas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
992 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
993 if (fas_scsi_watchdog_tick != DEFAULT_WD_TICK) {
994 fas_log(fas, CE_NOTE, "?scsi-watchdog-tick=%d\n",
995 fas_scsi_watchdog_tick);
996 }
997 fas_tick = drv_usectohz((clock_t)
998 fas_scsi_watchdog_tick * 1000000);
999 IPRINTF2("fas scsi watchdog tick=%x, fas_tick=%lx\n",
1000 fas_scsi_watchdog_tick, fas_tick);
1001 if (fas_timeout_id == 0) {
1002 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
1003 fas_timeout_initted = 1;
1004 }
1005 }
1006 mutex_exit(&fas_global_mutex);
1007
1008 ddi_report_dev(dip);
1009
1010 return (DDI_SUCCESS);
1011
1012 fail:
1013 cmn_err(CE_WARN, "fas%d: cannot attach", instance);
1014 if (fas) {
1015 for (slot = 0; slot < N_SLOTS; slot++) {
1016 struct f_slots *active = fas->f_active[slot];
1017 if (active) {
1018 kmem_free(active, active->f_size);
1019 fas->f_active[slot] = NULL;
1020 }
1021 }
1022 if (mutex_init_done) {
1023 mutex_destroy(&fas->f_mutex);
1024 mutex_destroy(&fas->f_waitQ_mutex);
1025 cv_destroy(&fas->f_cv);
1026 }
1027 if (intr_added) {
1028 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1029 }
1030 /*
1031 * kstat_intr support
1032 */
1033 if (fas->f_intr_kstat) {
1034 kstat_delete(fas->f_intr_kstat);
1035 }
1036 if (hba_attached) {
1037 (void) scsi_hba_detach(dip);
1038 }
1039 if (tran) {
1040 scsi_hba_tran_free(tran);
1041 }
1042 if (fas->f_kmem_cache) {
1043 kmem_cache_destroy(fas->f_kmem_cache);
1044 }
1045 if (fas->f_cmdarea) {
1046 if (bound_handle) {
1047 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1048 }
1049 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1050 }
1051 if (fas->f_dmahandle) {
1052 ddi_dma_free_handle(&fas->f_dmahandle);
1053 }
1054 fas_destroy_callbacks(fas);
1055 if (fas->f_regs_acc_handle) {
1056 ddi_regs_map_free(&fas->f_regs_acc_handle);
1057 }
1058 if (fas->f_dmar_acc_handle) {
1059 ddi_regs_map_free(&fas->f_dmar_acc_handle);
1060 }
1061 ddi_soft_state_free(fas_state, instance);
1062
1063 ddi_remove_minor_node(dip, NULL);
1064 }
1065 return (DDI_FAILURE);
1066 }
1067
1068 /*ARGSUSED*/
1069 static int
fas_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1070 fas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1071 {
1072 struct fas *fas, *nfas;
1073 scsi_hba_tran_t *tran;
1074
1075 /* CONSTCOND */
1076 ASSERT(NO_COMPETING_THREADS);
1077
1078 switch (cmd) {
1079 case DDI_DETACH:
1080 return (fas_dr_detach(dip));
1081
1082 case DDI_SUSPEND:
1083 if ((tran = ddi_get_driver_private(dip)) == NULL)
1084 return (DDI_FAILURE);
1085
1086 fas = TRAN2FAS(tran);
1087 if (!fas) {
1088 return (DDI_FAILURE);
1089 }
1090
1091 mutex_enter(FAS_MUTEX(fas));
1092
1093 fas->f_suspended = 1;
1094
1095 if (fas->f_ncmds) {
1096 (void) fas_reset_bus(fas);
1097 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
1098 }
1099 /*
1100 * disable dma and fas interrupt
1101 */
1102 fas->f_dma_csr &= ~DMA_INTEN;
1103 fas->f_dma_csr &= ~DMA_ENDVMA;
1104 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1105
1106 mutex_exit(FAS_MUTEX(fas));
1107
1108 if (fas->f_quiesce_timeid) {
1109 (void) untimeout(fas->f_quiesce_timeid);
1110 fas->f_quiesce_timeid = 0;
1111 }
1112
1113 if (fas->f_restart_cmd_timeid) {
1114 (void) untimeout(fas->f_restart_cmd_timeid);
1115 fas->f_restart_cmd_timeid = 0;
1116 }
1117
1118 /* Last fas? */
1119 rw_enter(&fas_global_rwlock, RW_WRITER);
1120 for (nfas = fas_head; nfas; nfas = nfas->f_next) {
1121 if (!nfas->f_suspended) {
1122 rw_exit(&fas_global_rwlock);
1123 return (DDI_SUCCESS);
1124 }
1125 }
1126 rw_exit(&fas_global_rwlock);
1127
1128 mutex_enter(&fas_global_mutex);
1129 if (fas_timeout_id != 0) {
1130 timeout_id_t tid = fas_timeout_id;
1131 fas_timeout_id = 0;
1132 fas_timeout_initted = 0;
1133 mutex_exit(&fas_global_mutex);
1134 (void) untimeout(tid);
1135 } else {
1136 mutex_exit(&fas_global_mutex);
1137 }
1138
1139 mutex_enter(&fas_global_mutex);
1140 if (fas_reset_watch) {
1141 timeout_id_t tid = fas_reset_watch;
1142 fas_reset_watch = 0;
1143 mutex_exit(&fas_global_mutex);
1144 (void) untimeout(tid);
1145 } else {
1146 mutex_exit(&fas_global_mutex);
1147 }
1148
1149 return (DDI_SUCCESS);
1150
1151 default:
1152 return (DDI_FAILURE);
1153 }
1154 _NOTE(NOT_REACHED)
1155 /* NOTREACHED */
1156 }
1157
1158 static int
fas_dr_detach(dev_info_t * dip)1159 fas_dr_detach(dev_info_t *dip)
1160 {
1161 struct fas *fas, *f;
1162 scsi_hba_tran_t *tran;
1163 short slot;
1164 int i, j;
1165
1166 if ((tran = ddi_get_driver_private(dip)) == NULL)
1167 return (DDI_FAILURE);
1168
1169 fas = TRAN2FAS(tran);
1170 if (!fas) {
1171 return (DDI_FAILURE);
1172 }
1173
1174 /*
1175 * disable interrupts
1176 */
1177 fas->f_dma_csr &= ~DMA_INTEN;
1178 fas->f_dma->dma_csr = fas->f_dma_csr;
1179 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1180
1181 /*
1182 * Remove device instance from the global linked list
1183 */
1184 rw_enter(&fas_global_rwlock, RW_WRITER);
1185
1186 if (fas_head == fas) {
1187 f = fas_head = fas->f_next;
1188 } else {
1189 for (f = fas_head; f != (struct fas *)NULL; f = f->f_next) {
1190 if (f->f_next == fas) {
1191 f->f_next = fas->f_next;
1192 break;
1193 }
1194 }
1195
1196 /*
1197 * Instance not in softc list. Since the
1198 * instance is not there in softc list, don't
1199 * enable interrupts, the instance is effectively
1200 * unusable.
1201 */
1202 if (f == (struct fas *)NULL) {
1203 cmn_err(CE_WARN, "fas_dr_detach: fas instance not"
1204 " in softc list!");
1205 rw_exit(&fas_global_rwlock);
1206 return (DDI_FAILURE);
1207 }
1208
1209
1210 }
1211
1212 if (fas_tail == fas)
1213 fas_tail = f;
1214
1215 rw_exit(&fas_global_rwlock);
1216
1217 if (fas->f_intr_kstat)
1218 kstat_delete(fas->f_intr_kstat);
1219
1220 fas_destroy_callbacks(fas);
1221
1222 scsi_hba_reset_notify_tear_down(fas->f_reset_notify_listf);
1223
1224 mutex_enter(&fas_global_mutex);
1225 /*
1226 * destroy any outstanding tagged command info
1227 */
1228 for (slot = 0; slot < N_SLOTS; slot++) {
1229 struct f_slots *active = fas->f_active[slot];
1230 if (active) {
1231 ushort_t tag;
1232 for (tag = 0; tag < active->f_n_slots; tag++) {
1233 struct fas_cmd *sp = active->f_slot[tag];
1234 if (sp) {
1235 struct scsi_pkt *pkt = sp->cmd_pkt;
1236 if (pkt) {
1237 (void) fas_scsi_destroy_pkt(
1238 &pkt->pkt_address, pkt);
1239 }
1240 /* sp freed in fas_scsi_destroy_pkt */
1241 active->f_slot[tag] = NULL;
1242 }
1243 }
1244 kmem_free(active, active->f_size);
1245 fas->f_active[slot] = NULL;
1246 }
1247 ASSERT(fas->f_tcmds[slot] == 0);
1248 }
1249
1250 /*
1251 * disallow timeout thread rescheduling
1252 */
1253 fas->f_flags |= FAS_FLG_NOTIMEOUTS;
1254 mutex_exit(&fas_global_mutex);
1255
1256 if (fas->f_quiesce_timeid) {
1257 (void) untimeout(fas->f_quiesce_timeid);
1258 }
1259
1260 /*
1261 * last fas? ... if active, CANCEL watch threads.
1262 */
1263 mutex_enter(&fas_global_mutex);
1264 if (fas_head == (struct fas *)NULL) {
1265 if (fas_timeout_initted) {
1266 timeout_id_t tid = fas_timeout_id;
1267 fas_timeout_initted = 0;
1268 fas_timeout_id = 0; /* don't resched */
1269 mutex_exit(&fas_global_mutex);
1270 (void) untimeout(tid);
1271 mutex_enter(&fas_global_mutex);
1272 }
1273
1274 if (fas_reset_watch) {
1275 mutex_exit(&fas_global_mutex);
1276 (void) untimeout(fas_reset_watch);
1277 mutex_enter(&fas_global_mutex);
1278 fas_reset_watch = 0;
1279 }
1280 }
1281 mutex_exit(&fas_global_mutex);
1282
1283 if (fas->f_restart_cmd_timeid) {
1284 (void) untimeout(fas->f_restart_cmd_timeid);
1285 fas->f_restart_cmd_timeid = 0;
1286 }
1287
1288 /*
1289 * destroy outstanding ARQ pkts
1290 */
1291 for (i = 0; i < NTARGETS_WIDE; i++) {
1292 for (j = 0; j < NLUNS_PER_TARGET; j++) {
1293 int slot = i * NLUNS_PER_TARGET | j;
1294 if (fas->f_arq_pkt[slot]) {
1295 struct scsi_address sa;
1296 sa.a_hba_tran = NULL; /* not used */
1297 sa.a_target = (ushort_t)i;
1298 sa.a_lun = (uchar_t)j;
1299 (void) fas_delete_arq_pkt(fas, &sa);
1300 }
1301 }
1302 }
1303
1304 /*
1305 * Remove device MT locks and CV
1306 */
1307 mutex_destroy(&fas->f_waitQ_mutex);
1308 mutex_destroy(&fas->f_mutex);
1309 cv_destroy(&fas->f_cv);
1310
1311 /*
1312 * Release miscellaneous device resources
1313 */
1314
1315 if (fas->f_kmem_cache) {
1316 kmem_cache_destroy(fas->f_kmem_cache);
1317 }
1318
1319 if (fas->f_cmdarea != (uchar_t *)NULL) {
1320 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1321 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1322 }
1323
1324 if (fas->f_dmahandle != (ddi_dma_handle_t)NULL) {
1325 ddi_dma_free_handle(&fas->f_dmahandle);
1326 }
1327
1328 if (fas->f_regs_acc_handle) {
1329 ddi_regs_map_free(&fas->f_regs_acc_handle);
1330 }
1331 if (fas->f_dmar_acc_handle) {
1332 ddi_regs_map_free(&fas->f_dmar_acc_handle);
1333 }
1334
1335 /*
1336 * Remove properties created during attach()
1337 */
1338 ddi_prop_remove_all(dip);
1339
1340 /*
1341 * Delete the DMA limits, transport vectors and remove the device
1342 * links to the scsi_transport layer.
1343 * -- ddi_set_driver_private(dip, NULL)
1344 */
1345 (void) scsi_hba_detach(dip);
1346
1347 /*
1348 * Free the scsi_transport structure for this device.
1349 */
1350 scsi_hba_tran_free(tran);
1351
1352 ddi_soft_state_free(fas_state, ddi_get_instance(dip));
1353
1354 return (DDI_SUCCESS);
1355 }
1356
1357 static int
fas_quiesce_bus(struct fas * fas)1358 fas_quiesce_bus(struct fas *fas)
1359 {
1360 mutex_enter(FAS_MUTEX(fas));
1361 IPRINTF("fas_quiesce: QUIESCEing\n");
1362 IPRINTF3("fas_quiesce: ncmds (%d) ndisc (%d) state (%d)\n",
1363 fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1364 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1365 if (fas_check_outstanding(fas)) {
1366 fas->f_softstate |= FAS_SS_DRAINING;
1367 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1368 fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1369 if (cv_wait_sig(FAS_CV(fas), FAS_MUTEX(fas)) == 0) {
1370 /*
1371 * quiesce has been interrupted.
1372 */
1373 IPRINTF("fas_quiesce: abort QUIESCE\n");
1374 fas->f_softstate &= ~FAS_SS_DRAINING;
1375 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1376 (void) fas_istart(fas);
1377 if (fas->f_quiesce_timeid != 0) {
1378 mutex_exit(FAS_MUTEX(fas));
1379 #ifndef __lock_lint /* warlock complains but there is a NOTE on this */
1380 (void) untimeout(fas->f_quiesce_timeid);
1381 fas->f_quiesce_timeid = 0;
1382 #endif
1383 return (-1);
1384 }
1385 mutex_exit(FAS_MUTEX(fas));
1386 return (-1);
1387 } else {
1388 IPRINTF("fas_quiesce: bus is QUIESCED\n");
1389 ASSERT(fas->f_quiesce_timeid == 0);
1390 fas->f_softstate &= ~FAS_SS_DRAINING;
1391 fas->f_softstate |= FAS_SS_QUIESCED;
1392 mutex_exit(FAS_MUTEX(fas));
1393 return (0);
1394 }
1395 }
1396 IPRINTF("fas_quiesce: bus was not busy QUIESCED\n");
1397 mutex_exit(FAS_MUTEX(fas));
1398 return (0);
1399 }
1400
1401 static int
fas_unquiesce_bus(struct fas * fas)1402 fas_unquiesce_bus(struct fas *fas)
1403 {
1404 mutex_enter(FAS_MUTEX(fas));
1405 fas->f_softstate &= ~FAS_SS_QUIESCED;
1406 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1407 (void) fas_istart(fas);
1408 IPRINTF("fas_quiesce: bus has been UNQUIESCED\n");
1409 mutex_exit(FAS_MUTEX(fas));
1410
1411 return (0);
1412 }
1413
1414 /*
1415 * invoked from timeout() to check the number of outstanding commands
1416 */
1417 static void
fas_ncmds_checkdrain(void * arg)1418 fas_ncmds_checkdrain(void *arg)
1419 {
1420 struct fas *fas = arg;
1421
1422 mutex_enter(FAS_MUTEX(fas));
1423 IPRINTF3("fas_checkdrain: ncmds (%d) ndisc (%d) state (%d)\n",
1424 fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1425 if (fas->f_softstate & FAS_SS_DRAINING) {
1426 fas->f_quiesce_timeid = 0;
1427 if (fas_check_outstanding(fas) == 0) {
1428 IPRINTF("fas_drain: bus has drained\n");
1429 cv_signal(FAS_CV(fas));
1430 } else {
1431 /*
1432 * throttle may have been reset by a bus reset
1433 * or fas_runpoll()
1434 * XXX shouldn't be necessary
1435 */
1436 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1437 IPRINTF("fas_drain: rescheduling timeout\n");
1438 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1439 fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1440 }
1441 }
1442 mutex_exit(FAS_MUTEX(fas));
1443 }
1444
1445 static int
fas_check_outstanding(struct fas * fas)1446 fas_check_outstanding(struct fas *fas)
1447 {
1448 uint_t slot;
1449 uint_t d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
1450 int ncmds = 0;
1451
1452 ASSERT(mutex_owned(FAS_MUTEX(fas)));
1453
1454 for (slot = 0; slot < N_SLOTS; slot += d)
1455 ncmds += fas->f_tcmds[slot];
1456
1457 return (ncmds);
1458 }
1459
1460
1461 #ifdef FASDEBUG
1462 /*
1463 * fas register read/write functions with tracing
1464 */
1465 static void
fas_reg_tracing(struct fas * fas,int type,int regno,uint32_t what)1466 fas_reg_tracing(struct fas *fas, int type, int regno, uint32_t what)
1467 {
1468 fas->f_reg_trace[fas->f_reg_trace_index++] = type;
1469 fas->f_reg_trace[fas->f_reg_trace_index++] = regno;
1470 fas->f_reg_trace[fas->f_reg_trace_index++] = what;
1471 fas->f_reg_trace[fas->f_reg_trace_index++] = gethrtime();
1472 fas->f_reg_trace[fas->f_reg_trace_index] = 0xff;
1473 if (fas->f_reg_trace_index >= REG_TRACE_BUF_SIZE) {
1474 fas->f_reg_trace_index = 0;
1475 }
1476 }
1477
1478 static void
fas_reg_cmd_write(struct fas * fas,uint8_t cmd)1479 fas_reg_cmd_write(struct fas *fas, uint8_t cmd)
1480 {
1481 volatile struct fasreg *fasreg = fas->f_reg;
1482 int regno = (uintptr_t)&fasreg->fas_cmd - (uintptr_t)fasreg;
1483
1484 fasreg->fas_cmd = cmd;
1485 fas->f_last_cmd = cmd;
1486
1487 EPRINTF1("issuing cmd %x\n", (uchar_t)cmd);
1488 fas_reg_tracing(fas, 0, regno, cmd);
1489
1490 fas->f_reg_cmds++;
1491 }
1492
1493 static void
fas_reg_write(struct fas * fas,volatile uint8_t * p,uint8_t what)1494 fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what)
1495 {
1496 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1497
1498 *p = what;
1499
1500 EPRINTF2("writing reg%x = %x\n", regno, what);
1501 fas_reg_tracing(fas, 1, regno, what);
1502
1503 fas->f_reg_writes++;
1504 }
1505
1506 static uint8_t
fas_reg_read(struct fas * fas,volatile uint8_t * p)1507 fas_reg_read(struct fas *fas, volatile uint8_t *p)
1508 {
1509 uint8_t what;
1510 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1511
1512 what = *p;
1513
1514 EPRINTF2("reading reg%x => %x\n", regno, what);
1515 fas_reg_tracing(fas, 2, regno, what);
1516
1517 fas->f_reg_reads++;
1518
1519 return (what);
1520 }
1521
1522 /*
1523 * dma register access routines
1524 */
1525 static void
fas_dma_reg_write(struct fas * fas,volatile uint32_t * p,uint32_t what)1526 fas_dma_reg_write(struct fas *fas, volatile uint32_t *p, uint32_t what)
1527 {
1528 *p = what;
1529 fas->f_reg_dma_writes++;
1530
1531 #ifdef DMA_REG_TRACING
1532 {
1533 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1534 EPRINTF2("writing dma reg%x = %x\n", regno, what);
1535 fas_reg_tracing(fas, 3, regno, what);
1536 }
1537 #endif
1538 }
1539
1540 static uint32_t
fas_dma_reg_read(struct fas * fas,volatile uint32_t * p)1541 fas_dma_reg_read(struct fas *fas, volatile uint32_t *p)
1542 {
1543 uint32_t what = *p;
1544 fas->f_reg_dma_reads++;
1545
1546 #ifdef DMA_REG_TRACING
1547 {
1548 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1549 EPRINTF2("reading dma reg%x => %x\n", regno, what);
1550 fas_reg_tracing(fas, 4, regno, what);
1551 }
1552 #endif
1553 return (what);
1554 }
1555 #endif
1556
1557 #define FIFO_EMPTY(fas) (fas_reg_read(fas, &fas->f_reg->fas_stat2) & \
1558 FAS_STAT2_EMPTY)
1559 #define FIFO_CNT(fas) \
1560 (fas_reg_read(fas, &fas->f_reg->fas_fifo_flag) & FIFO_CNT_MASK)
1561
1562 #ifdef FASDEBUG
1563 static void
fas_assert_atn(struct fas * fas)1564 fas_assert_atn(struct fas *fas)
1565 {
1566 fas_reg_cmd_write(fas, CMD_SET_ATN);
1567 #ifdef FAS_TEST
1568 if (fas_test_stop > 1)
1569 debug_enter("asserted atn");
1570 #endif
1571 }
1572 #else
1573 #define fas_assert_atn(fas) fas_reg_cmd_write(fas, CMD_SET_ATN)
1574 #endif
1575
1576 /*
1577 * DMA macros; we use a shadow copy of the dma_csr to save unnecessary
1578 * reads
1579 */
1580 #define FAS_DMA_WRITE(fas, count, base, cmd) { \
1581 volatile struct fasreg *fasreg = fas->f_reg; \
1582 volatile struct dma *dmar = fas->f_dma; \
1583 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1584 SET_FAS_COUNT(fasreg, count); \
1585 fas_reg_cmd_write(fas, cmd); \
1586 fas_dma_reg_write(fas, &dmar->dma_count, count); \
1587 fas->f_dma_csr |= \
1588 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1589 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1590 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1591 }
1592
1593 #define FAS_DMA_WRITE_SETUP(fas, count, base) { \
1594 volatile struct fasreg *fasreg = fas->f_reg; \
1595 volatile struct dma *dmar = fas->f_dma; \
1596 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1597 SET_FAS_COUNT(fasreg, count); \
1598 fas_dma_reg_write(fas, &dmar->dma_count, count); \
1599 fas->f_dma_csr |= \
1600 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1601 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1602 }
1603
1604
1605 #define FAS_DMA_READ(fas, count, base, dmacount, cmd) { \
1606 volatile struct fasreg *fasreg = fas->f_reg; \
1607 volatile struct dma *dmar = fas->f_dma; \
1608 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1609 SET_FAS_COUNT(fasreg, count); \
1610 fas_reg_cmd_write(fas, cmd); \
1611 fas->f_dma_csr |= \
1612 (fas->f_dma_csr & ~DMA_WRITE) | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1613 fas_dma_reg_write(fas, &dmar->dma_count, dmacount); \
1614 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1615 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1616 }
1617
1618 static void
FAS_FLUSH_DMA(struct fas * fas)1619 FAS_FLUSH_DMA(struct fas *fas)
1620 {
1621 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1622 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1623 DMA_DSBL_DRAIN);
1624 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1625 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1626 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1627 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1628 }
1629
1630 /*
1631 * FAS_FLUSH_DMA_HARD checks on REQPEND before taking away the reset
1632 */
1633 static void
FAS_FLUSH_DMA_HARD(struct fas * fas)1634 FAS_FLUSH_DMA_HARD(struct fas *fas)
1635 {
1636 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1637 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1638 DMA_DSBL_DRAIN);
1639 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1640 while (fas_dma_reg_read(fas, &fas->f_dma->dma_csr) & DMA_REQPEND)
1641 ;
1642 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1643 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1644 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1645 }
1646
1647 /*
1648 * update period, conf3, offset reg, if necessary
1649 */
1650 #define FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target) \
1651 { \
1652 uchar_t period, offset, conf3; \
1653 period = fas->f_sync_period[target] & SYNC_PERIOD_MASK; \
1654 offset = fas->f_offset[target]; \
1655 conf3 = fas->f_fasconf3[target]; \
1656 if ((period != fas->f_period_reg_last) || \
1657 (offset != fas->f_offset_reg_last) || \
1658 (conf3 != fas->f_fasconf3_reg_last)) { \
1659 fas->f_period_reg_last = period; \
1660 fas->f_offset_reg_last = offset; \
1661 fas->f_fasconf3_reg_last = conf3; \
1662 fas_reg_write(fas, &fasreg->fas_sync_period, period); \
1663 fas_reg_write(fas, &fasreg->fas_sync_offset, offset); \
1664 fas_reg_write(fas, &fasreg->fas_conf3, conf3); \
1665 } \
1666 }
1667
1668 /*
1669 * fifo read/write routines
1670 * always read the fifo bytes before reading the interrupt register
1671 */
1672
1673 static void
fas_read_fifo(struct fas * fas)1674 fas_read_fifo(struct fas *fas)
1675 {
1676 int stat = fas->f_stat;
1677 volatile struct fasreg *fasreg = fas->f_reg;
1678 int i;
1679
1680 i = fas_reg_read(fas, &fasreg->fas_fifo_flag) & FIFO_CNT_MASK;
1681 EPRINTF2("fas_read_fifo: fifo cnt=%x, stat=%x\n", i, stat);
1682 ASSERT(i <= FIFOSIZE);
1683
1684 fas->f_fifolen = 0;
1685 while (i-- > 0) {
1686 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1687 &fasreg->fas_fifo_data);
1688 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1689 &fasreg->fas_fifo_data);
1690 }
1691 if (fas->f_stat2 & FAS_STAT2_ISHUTTLE) {
1692
1693 /* write pad byte */
1694 fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1695 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1696 &fasreg->fas_fifo_data);
1697 /* flush pad byte */
1698 fas_reg_cmd_write(fas, CMD_FLUSH);
1699 }
1700 EPRINTF2("fas_read_fifo: fifo len=%x, stat2=%x\n",
1701 fas->f_fifolen, stat);
1702 } /* fas_read_fifo */
1703
1704 static void
fas_write_fifo(struct fas * fas,uchar_t * buf,int length,int pad)1705 fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad)
1706 {
1707 int i;
1708 volatile struct fasreg *fasreg = fas->f_reg;
1709
1710 EPRINTF1("writing fifo %x bytes\n", length);
1711 ASSERT(length <= 15);
1712 fas_reg_cmd_write(fas, CMD_FLUSH);
1713 for (i = 0; i < length; i++) {
1714 fas_reg_write(fas, &fasreg->fas_fifo_data, buf[i]);
1715 if (pad) {
1716 fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1717 }
1718 }
1719 }
1720
1721 /*
1722 * Hardware and Software internal reset routines
1723 */
1724 static int
fas_init_chip(struct fas * fas,uchar_t initiator_id)1725 fas_init_chip(struct fas *fas, uchar_t initiator_id)
1726 {
1727 int i;
1728 uchar_t clock_conv;
1729 uchar_t initial_conf3;
1730 uint_t ticks;
1731 static char *prop_cfreq = "clock-frequency";
1732
1733 /*
1734 * Determine clock frequency of attached FAS chip.
1735 */
1736 i = ddi_prop_get_int(DDI_DEV_T_ANY,
1737 fas->f_dev, DDI_PROP_DONTPASS, prop_cfreq, -1);
1738 clock_conv = (i + FIVE_MEG - 1) / FIVE_MEG;
1739 if (clock_conv != CLOCK_40MHZ) {
1740 fas_log(fas, CE_WARN, "Bad clock frequency");
1741 return (-1);
1742 }
1743
1744 fas->f_clock_conv = clock_conv;
1745 fas->f_clock_cycle = CLOCK_PERIOD(i);
1746 ticks = FAS_CLOCK_TICK(fas);
1747 fas->f_stval = FAS_CLOCK_TIMEOUT(ticks, fas_selection_timeout);
1748
1749 DPRINTF5("%d mhz, clock_conv %d, clock_cycle %d, ticks %d, stval %d\n",
1750 i, fas->f_clock_conv, fas->f_clock_cycle,
1751 ticks, fas->f_stval);
1752 /*
1753 * set up conf registers
1754 */
1755 fas->f_fasconf |= FAS_CONF_PAREN;
1756 fas->f_fasconf2 = (uchar_t)(FAS_CONF2_FENABLE | FAS_CONF2_XL32);
1757
1758 if (initiator_id < NTARGETS) {
1759 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO;
1760 } else {
1761 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO |
1762 FAS_CONF3_IDBIT3;
1763 }
1764
1765 for (i = 0; i < NTARGETS_WIDE; i++) {
1766 fas->f_fasconf3[i] = initial_conf3;
1767 }
1768
1769 /*
1770 * Avoid resetting the scsi bus since this causes a few seconds
1771 * delay per fas in boot and also causes busy conditions in some
1772 * tape devices.
1773 */
1774 fas_internal_reset(fas, FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
1775
1776 /*
1777 * initialize period and offset for each target
1778 */
1779 for (i = 0; i < NTARGETS_WIDE; i++) {
1780 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_SYNC) {
1781 fas->f_offset[i] = fas_default_offset |
1782 fas->f_req_ack_delay;
1783 } else {
1784 fas->f_offset[i] = 0;
1785 }
1786 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_FAST) {
1787 fas->f_neg_period[i] =
1788 (uchar_t)MIN_SYNC_PERIOD(fas);
1789 } else {
1790 fas->f_neg_period[i] =
1791 (uchar_t)CONVERT_PERIOD(DEFAULT_SYNC_PERIOD);
1792 }
1793 }
1794 return (0);
1795 }
1796
1797 /*
1798 * reset bus, chip, dma, or soft state
1799 */
1800 static void
fas_internal_reset(struct fas * fas,int reset_action)1801 fas_internal_reset(struct fas *fas, int reset_action)
1802 {
1803 volatile struct fasreg *fasreg = fas->f_reg;
1804 volatile struct dma *dmar = fas->f_dma;
1805
1806 if (reset_action & FAS_RESET_SCSIBUS) {
1807 fas_reg_cmd_write(fas, CMD_RESET_SCSI);
1808 fas_setup_reset_delay(fas);
1809 }
1810
1811 FAS_FLUSH_DMA_HARD(fas); /* resets and reinits the dma */
1812
1813 /*
1814 * NOTE: if dma is aborted while active, indefinite hangs
1815 * may occur; it is preferable to stop the target first before
1816 * flushing the dma
1817 */
1818 if (reset_action & FAS_RESET_DMA) {
1819 int burstsizes = fas->f_dma_attr->dma_attr_burstsizes;
1820 if (burstsizes & BURST64) {
1821 IPRINTF("64 byte burstsize\n");
1822 fas->f_dma_csr |= DMA_BURST64;
1823 } else if (burstsizes & BURST32) {
1824 IPRINTF("32 byte burstsize\n");
1825 fas->f_dma_csr |= DMA_BURST32;
1826 } else {
1827 IPRINTF("16 byte burstsize\n");
1828 }
1829 if ((fas->f_hm_rev > 0x20) && (fas_enable_sbus64) &&
1830 (ddi_dma_set_sbus64(fas->f_dmahandle, burstsizes) ==
1831 DDI_SUCCESS)) {
1832 IPRINTF("enabled 64 bit sbus\n");
1833 fas->f_dma_csr |= DMA_WIDE_EN;
1834 }
1835 }
1836
1837 if (reset_action & FAS_RESET_FAS) {
1838 /*
1839 * 2 NOPs with DMA are required here
1840 * id_code is unreliable if we don't do this)
1841 */
1842 uchar_t idcode, fcode;
1843 int dmarev;
1844
1845 fas_reg_cmd_write(fas, CMD_RESET_FAS);
1846 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1847 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1848
1849 /*
1850 * Re-load chip configurations
1851 * Only load registers which are not loaded in fas_startcmd()
1852 */
1853 fas_reg_write(fas, &fasreg->fas_clock_conv,
1854 (fas->f_clock_conv & CLOCK_MASK));
1855
1856 fas_reg_write(fas, &fasreg->fas_timeout, fas->f_stval);
1857
1858 /*
1859 * enable default configurations
1860 */
1861 fas->f_idcode = idcode =
1862 fas_reg_read(fas, &fasreg->fas_id_code);
1863 fcode = (uchar_t)(idcode & FAS_FCODE_MASK) >> (uchar_t)3;
1864 fas->f_type = FAS366;
1865 IPRINTF2("Family code %d, revision %d\n",
1866 fcode, (idcode & FAS_REV_MASK));
1867 dmarev = fas_dma_reg_read(fas, &dmar->dma_csr);
1868 dmarev = (dmarev >> 11) & 0xf;
1869 IPRINTF1("DMA channel revision %d\n", dmarev);
1870
1871 fas_reg_write(fas, &fasreg->fas_conf, fas->f_fasconf);
1872 fas_reg_write(fas, &fasreg->fas_conf2, fas->f_fasconf2);
1873
1874 fas->f_req_ack_delay = DEFAULT_REQ_ACK_DELAY;
1875
1876 /*
1877 * Just in case... clear interrupt
1878 */
1879 (void) fas_reg_read(fas, &fasreg->fas_intr);
1880 }
1881
1882 if (reset_action & FAS_RESET_SOFTC) {
1883 fas->f_wdtr_sent = fas->f_sdtr_sent = 0;
1884 fas->f_wide_known = fas->f_sync_known = 0;
1885 fas->f_wide_enabled = fas->f_sync_enabled = 0;
1886 fas->f_omsglen = 0;
1887 fas->f_cur_msgout[0] = fas->f_last_msgout =
1888 fas->f_last_msgin = INVALID_MSG;
1889 fas->f_abort_msg_sent = fas->f_reset_msg_sent = 0;
1890 fas->f_next_slot = 0;
1891 fas->f_current_sp = NULL;
1892 fas->f_fifolen = 0;
1893 fas->f_fasconf3_reg_last = fas->f_offset_reg_last =
1894 fas->f_period_reg_last = 0xff;
1895
1896 New_state(fas, STATE_FREE);
1897 }
1898 }
1899
1900
1901 #ifdef FASDEBUG
1902 /*
1903 * check if ncmds still reflects the truth
1904 * count all cmds for this driver instance and compare with ncmds
1905 */
1906 static void
fas_check_ncmds(struct fas * fas)1907 fas_check_ncmds(struct fas *fas)
1908 {
1909 int slot = 0;
1910 ushort_t tag, t;
1911 int n, total = 0;
1912
1913 do {
1914 if (fas->f_active[slot]) {
1915 struct fas_cmd *sp = fas->f_readyf[slot];
1916 t = fas->f_active[slot]->f_n_slots;
1917 while (sp != 0) {
1918 sp = sp->cmd_forw;
1919 total++;
1920 }
1921 for (n = tag = 0; tag < t; tag++) {
1922 if (fas->f_active[slot]->f_slot[tag] != 0) {
1923 n++;
1924 total++;
1925 }
1926 }
1927 ASSERT(n == fas->f_tcmds[slot]);
1928 }
1929 slot = NEXTSLOT(slot, fas->f_dslot);
1930 } while (slot != 0);
1931
1932 if (total != fas->f_ncmds) {
1933 IPRINTF2("fas_check_ncmds: total=%x, ncmds=%x\n",
1934 total, fas->f_ncmds);
1935 }
1936 ASSERT(fas->f_ncmds >= fas->f_ndisc);
1937 }
1938 #else
1939 #define fas_check_ncmds(fas)
1940 #endif
1941
1942 /*
1943 * SCSA Interface functions
1944 *
1945 * Visible to the external world via the transport structure.
1946 *
1947 * fas_scsi_abort: abort a current cmd or all cmds for a target
1948 */
1949 /*ARGSUSED*/
1950 static int
fas_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1951 fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1952 {
1953 struct fas *fas = ADDR2FAS(ap);
1954 int rval;
1955
1956 IPRINTF2("fas_scsi_abort: target %d.%d\n", ap->a_target, ap->a_lun);
1957
1958 mutex_enter(FAS_MUTEX(fas));
1959 rval = fas_do_scsi_abort(ap, pkt);
1960 fas_check_waitQ_and_mutex_exit(fas);
1961 return (rval);
1962 }
1963
1964 /*
1965 * reset handling: reset bus or target
1966 */
1967 /*ARGSUSED*/
1968 static int
fas_scsi_reset(struct scsi_address * ap,int level)1969 fas_scsi_reset(struct scsi_address *ap, int level)
1970 {
1971 struct fas *fas = ADDR2FAS(ap);
1972 int rval;
1973
1974 IPRINTF3("fas_scsi_reset: target %d.%d, level %d\n",
1975 ap->a_target, ap->a_lun, level);
1976
1977 mutex_enter(FAS_MUTEX(fas));
1978 rval = fas_do_scsi_reset(ap, level);
1979 fas_check_waitQ_and_mutex_exit(fas);
1980 return (rval);
1981 }
1982
1983 /*
1984 * entry point for reset notification setup, to register or to cancel.
1985 */
1986 static int
fas_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)1987 fas_scsi_reset_notify(struct scsi_address *ap, int flag,
1988 void (*callback)(caddr_t), caddr_t arg)
1989 {
1990 struct fas *fas = ADDR2FAS(ap);
1991
1992 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1993 &fas->f_mutex, &fas->f_reset_notify_listf));
1994 }
1995
1996 /*
1997 * capability interface
1998 */
1999 /*ARGSUSED*/
2000 static int
fas_scsi_getcap(struct scsi_address * ap,char * cap,int whom)2001 fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
2002 {
2003 struct fas *fas = ADDR2FAS(ap);
2004 DPRINTF3("fas_scsi_getcap: tgt=%x, cap=%s, whom=%x\n",
2005 ap->a_target, cap, whom);
2006 return (fas_commoncap(ap, cap, 0, whom, 0));
2007 }
2008
2009 /*ARGSUSED*/
2010 static int
fas_scsi_setcap(struct scsi_address * ap,char * cap,int value,int whom)2011 fas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2012 {
2013 struct fas *fas = ADDR2FAS(ap);
2014 IPRINTF4("fas_scsi_setcap: tgt=%x, cap=%s, value=%x, whom=%x\n",
2015 ap->a_target, cap, value, whom);
2016 return (fas_commoncap(ap, cap, value, whom, 1));
2017 }
2018
2019 /*
2020 * pkt and dma allocation and deallocation
2021 */
2022 /*ARGSUSED*/
2023 static void
fas_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)2024 fas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2025 {
2026 struct fas_cmd *cmd = PKT2CMD(pkt);
2027
2028 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2029 "fas_scsi_dmafree_start");
2030
2031 if (cmd->cmd_flags & CFLAG_DMAVALID) {
2032 /*
2033 * Free the mapping.
2034 */
2035 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
2036 cmd->cmd_flags ^= CFLAG_DMAVALID;
2037 }
2038 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2039 "fas_scsi_dmafree_end");
2040 }
2041
2042 /*ARGSUSED*/
2043 static void
fas_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2044 fas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2045 {
2046 struct fas_cmd *sp = PKT2CMD(pkt);
2047
2048 if (sp->cmd_flags & CFLAG_DMAVALID) {
2049 if (ddi_dma_sync(sp->cmd_dmahandle, 0, 0,
2050 (sp->cmd_flags & CFLAG_DMASEND) ?
2051 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
2052 DDI_SUCCESS) {
2053 fas_log(ADDR2FAS(ap), CE_WARN,
2054 "sync of pkt (%p) failed", (void *)pkt);
2055 }
2056 }
2057 }
2058
2059 /*
2060 * initialize pkt and allocate DVMA resources
2061 */
2062 static struct scsi_pkt *
fas_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)2063 fas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
2064 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
2065 int flags, int (*callback)(), caddr_t arg)
2066 {
2067 int kf;
2068 int failure = 1;
2069 struct fas_cmd *cmd;
2070 struct fas *fas = ADDR2FAS(ap);
2071 struct fas_cmd *new_cmd;
2072 int rval;
2073
2074 /* #define FAS_TEST_EXTRN_ALLOC */
2075 #ifdef FAS_TEST_EXTRN_ALLOC
2076 cmdlen *= 4; statuslen *= 4; tgtlen *= 4;
2077 #endif
2078 /*
2079 * if no pkt was passed then allocate a pkt first
2080 */
2081 if (pkt == NULL) {
2082 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_START,
2083 "fas_scsi_impl_pktalloc_start");
2084
2085 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
2086
2087 /*
2088 * only one size of pkt (with arq).
2089 */
2090 cmd = kmem_cache_alloc(fas->f_kmem_cache, kf);
2091
2092 if (cmd) {
2093
2094 ddi_dma_handle_t save_dma_handle;
2095
2096 save_dma_handle = cmd->cmd_dmahandle;
2097 bzero(cmd, EXTCMD_SIZE);
2098 cmd->cmd_dmahandle = save_dma_handle;
2099
2100 pkt = (struct scsi_pkt *)((uchar_t *)cmd +
2101 sizeof (struct fas_cmd));
2102 cmd->cmd_pkt = pkt;
2103 pkt->pkt_ha_private = (opaque_t)cmd;
2104 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
2105 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2106 pkt->pkt_address = *ap;
2107
2108 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2109 pkt->pkt_private = cmd->cmd_pkt_private;
2110
2111 cmd->cmd_cdblen = cmdlen;
2112 cmd->cmd_scblen = statuslen;
2113 cmd->cmd_privlen = tgtlen;
2114 cmd->cmd_slot =
2115 (Tgt(cmd) * NLUNS_PER_TARGET) | Lun(cmd);
2116 failure = 0;
2117 }
2118 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
2119 (tgtlen > PKT_PRIV_LEN) ||
2120 (statuslen > EXTCMDS_STATUS_SIZE)) {
2121 if (failure == 0) {
2122 /*
2123 * if extern alloc fails, all will be
2124 * deallocated, including cmd
2125 */
2126 failure = fas_pkt_alloc_extern(fas, cmd,
2127 cmdlen, tgtlen, statuslen, kf);
2128 }
2129 if (failure) {
2130 /*
2131 * nothing to deallocate so just return
2132 */
2133 TRACE_0(TR_FAC_SCSI_FAS,
2134 TR_FAS_SCSI_IMPL_PKTALLOC_END,
2135 "fas_scsi_impl_pktalloc_end");
2136 return (NULL);
2137 }
2138 }
2139
2140 new_cmd = cmd;
2141
2142 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_END,
2143 "fas_scsi_impl_pktalloc_end");
2144 } else {
2145 cmd = PKT2CMD(pkt);
2146 new_cmd = NULL;
2147 }
2148
2149 /*
2150 * Second step of fas_scsi_init_pkt:
2151 * bind the buf to the handle
2152 */
2153 if (bp && bp->b_bcount != 0 &&
2154 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
2155
2156 int cmd_flags, dma_flags;
2157 uint_t dmacookie_count;
2158
2159 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_START,
2160 "fas_scsi_impl_dmaget_start");
2161
2162 cmd_flags = cmd->cmd_flags;
2163
2164 if (bp->b_flags & B_READ) {
2165 cmd_flags &= ~CFLAG_DMASEND;
2166 dma_flags = DDI_DMA_READ | DDI_DMA_PARTIAL;
2167 } else {
2168 cmd_flags |= CFLAG_DMASEND;
2169 dma_flags = DDI_DMA_WRITE | DDI_DMA_PARTIAL;
2170 }
2171 if (flags & PKT_CONSISTENT) {
2172 cmd_flags |= CFLAG_CMDIOPB;
2173 dma_flags |= DDI_DMA_CONSISTENT;
2174 }
2175
2176 /*
2177 * bind the handle to the buf
2178 */
2179 ASSERT(cmd->cmd_dmahandle != NULL);
2180 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
2181 dma_flags, callback, arg, &cmd->cmd_dmacookie,
2182 &dmacookie_count);
2183
2184 if (rval && rval != DDI_DMA_PARTIAL_MAP) {
2185 switch (rval) {
2186 case DDI_DMA_NORESOURCES:
2187 bioerror(bp, 0);
2188 break;
2189 case DDI_DMA_BADATTR:
2190 case DDI_DMA_NOMAPPING:
2191 bioerror(bp, EFAULT);
2192 break;
2193 case DDI_DMA_TOOBIG:
2194 default:
2195 bioerror(bp, EINVAL);
2196 break;
2197 }
2198 cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
2199 if (new_cmd) {
2200 fas_scsi_destroy_pkt(ap, pkt);
2201 }
2202 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2203 "fas_scsi_impl_dmaget_end");
2204 return ((struct scsi_pkt *)NULL);
2205 }
2206 ASSERT(dmacookie_count == 1);
2207 cmd->cmd_dmacount = bp->b_bcount;
2208 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
2209
2210 ASSERT(cmd->cmd_dmahandle != NULL);
2211 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2212 "fas_scsi_impl_dmaget_end");
2213 }
2214
2215 return (pkt);
2216 }
2217
2218 /*
2219 * unbind dma resources and deallocate the pkt
2220 */
2221 static void
fas_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2222 fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2223 {
2224 struct fas_cmd *sp = PKT2CMD(pkt);
2225 struct fas *fas = ADDR2FAS(ap);
2226
2227 /*
2228 * fas_scsi_impl_dmafree inline to speed things up
2229 */
2230 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2231 "fas_scsi_impl_dmafree_start");
2232
2233 if (sp->cmd_flags & CFLAG_DMAVALID) {
2234 /*
2235 * Free the mapping.
2236 */
2237 (void) ddi_dma_unbind_handle(sp->cmd_dmahandle);
2238 sp->cmd_flags ^= CFLAG_DMAVALID;
2239 }
2240
2241 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2242 "fas_scsi_impl_dmafree_end");
2243
2244 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_START,
2245 "fas_scsi_impl_pktfree_start");
2246
2247 if ((sp->cmd_flags &
2248 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
2249 CFLAG_SCBEXTERN)) == 0) {
2250 sp->cmd_flags = CFLAG_FREE;
2251 kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2252 } else {
2253 fas_pkt_destroy_extern(fas, sp);
2254 }
2255
2256 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_END,
2257 "fas_scsi_impl_pktfree_end");
2258 }
2259
2260 /*
2261 * allocate and deallocate external pkt space (ie. not part of fas_cmd) for
2262 * non-standard length cdb, pkt_private, status areas
2263 * if allocation fails, then deallocate all external space and the pkt
2264 */
2265 /* ARGSUSED */
2266 static int
fas_pkt_alloc_extern(struct fas * fas,struct fas_cmd * sp,int cmdlen,int tgtlen,int statuslen,int kf)2267 fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
2268 int cmdlen, int tgtlen, int statuslen, int kf)
2269 {
2270 caddr_t cdbp, scbp, tgt;
2271 int failure = 0;
2272
2273 tgt = cdbp = scbp = NULL;
2274 if (cmdlen > sizeof (sp->cmd_cdb)) {
2275 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
2276 failure++;
2277 } else {
2278 sp->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
2279 sp->cmd_flags |= CFLAG_CDBEXTERN;
2280 }
2281 }
2282 if (tgtlen > PKT_PRIV_LEN) {
2283 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
2284 failure++;
2285 } else {
2286 sp->cmd_flags |= CFLAG_PRIVEXTERN;
2287 sp->cmd_pkt->pkt_private = tgt;
2288 }
2289 }
2290 if (statuslen > EXTCMDS_STATUS_SIZE) {
2291 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
2292 failure++;
2293 } else {
2294 sp->cmd_flags |= CFLAG_SCBEXTERN;
2295 sp->cmd_pkt->pkt_scbp = (opaque_t)scbp;
2296 }
2297 }
2298 if (failure) {
2299 fas_pkt_destroy_extern(fas, sp);
2300 }
2301 return (failure);
2302 }
2303
2304 /*
2305 * deallocate external pkt space and deallocate the pkt
2306 */
2307 static void
fas_pkt_destroy_extern(struct fas * fas,struct fas_cmd * sp)2308 fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp)
2309 {
2310 if (sp->cmd_flags & CFLAG_FREE) {
2311 panic("fas_pkt_destroy_extern: freeing free packet");
2312 _NOTE(NOT_REACHED)
2313 /* NOTREACHED */
2314 }
2315 if (sp->cmd_flags & CFLAG_CDBEXTERN) {
2316 kmem_free((caddr_t)sp->cmd_pkt->pkt_cdbp,
2317 (size_t)sp->cmd_cdblen);
2318 }
2319 if (sp->cmd_flags & CFLAG_SCBEXTERN) {
2320 kmem_free((caddr_t)sp->cmd_pkt->pkt_scbp,
2321 (size_t)sp->cmd_scblen);
2322 }
2323 if (sp->cmd_flags & CFLAG_PRIVEXTERN) {
2324 kmem_free((caddr_t)sp->cmd_pkt->pkt_private,
2325 (size_t)sp->cmd_privlen);
2326 }
2327 sp->cmd_flags = CFLAG_FREE;
2328 kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2329 }
2330
2331 /*
2332 * kmem cache constructor and destructor:
2333 * When constructing, we bzero the cmd and allocate the dma handle
2334 * When destructing, just free the dma handle
2335 */
2336 static int
fas_kmem_cache_constructor(void * buf,void * cdrarg,int kmflags)2337 fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
2338 {
2339 struct fas_cmd *cmd = buf;
2340 struct fas *fas = cdrarg;
2341 int (*callback)(caddr_t) = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP:
2342 DDI_DMA_DONTWAIT;
2343
2344 bzero(buf, EXTCMD_SIZE);
2345
2346 /*
2347 * allocate a dma handle
2348 */
2349 if ((ddi_dma_alloc_handle(fas->f_dev, fas->f_dma_attr, callback,
2350 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
2351 return (-1);
2352 }
2353 return (0);
2354 }
2355
2356 /*ARGSUSED*/
2357 static void
fas_kmem_cache_destructor(void * buf,void * cdrarg)2358 fas_kmem_cache_destructor(void *buf, void *cdrarg)
2359 {
2360 struct fas_cmd *cmd = buf;
2361 if (cmd->cmd_dmahandle) {
2362 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2363 }
2364 }
2365
2366 /*
2367 * fas_scsi_start - Accept commands for transport
2368 */
2369 static int
fas_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)2370 fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2371 {
2372 struct fas_cmd *sp = PKT2CMD(pkt);
2373 struct fas *fas = ADDR2FAS(ap);
2374 int rval;
2375 int intr = 0;
2376
2377 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_START, "fas_scsi_start_start");
2378
2379 #ifdef FAS_TEST
2380 if (fas_transport_busy > 0) {
2381 fas_transport_busy--;
2382 return (TRAN_BUSY);
2383 }
2384 if ((fas_transport_busy_rqs > 0) &&
2385 (*(sp->cmd_pkt->pkt_cdbp) == SCMD_REQUEST_SENSE)) {
2386 fas_transport_busy_rqs--;
2387 return (TRAN_BUSY);
2388 }
2389 if (fas_transport_reject > 0) {
2390 fas_transport_reject--;
2391 return (TRAN_BADPKT);
2392 }
2393 #endif
2394 /*
2395 * prepare packet before taking the mutex
2396 */
2397 rval = fas_prepare_pkt(fas, sp);
2398 if (rval != TRAN_ACCEPT) {
2399 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_PREPARE_PKT_END,
2400 "fas_scsi_start_end (prepare_pkt)");
2401 return (rval);
2402 }
2403
2404 /*
2405 * fas mutex can be held for a long time; therefore, if the mutex is
2406 * held, we queue the packet in a waitQ; we now should check
2407 * the waitQ on every mutex_exit(FAS_MUTEX(fas)) but we really only
2408 * need to do this when the bus is free
2409 * don't put NOINTR cmds including proxy cmds in waitQ! These
2410 * cmds are handled by fas_runpoll()
2411 * if the waitQ is non-empty, queue the pkt anyway to preserve
2412 * order
2413 * the goal is to queue in waitQ as much as possible so at
2414 * interrupt time, we can move the packets to readyQ or start
2415 * a packet immediately. It helps to do this at interrupt
2416 * time because we can then field more interrupts
2417 */
2418 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
2419
2420 /*
2421 * if the bus is not free, we will get an interrupt shortly
2422 * so we don't want to take the fas mutex but queue up
2423 * the packet in the waitQ
2424 * also, if the waitQ is non-empty or there is an interrupt
2425 * pending then queue up the packet in the waitQ and let the
2426 * interrupt handler empty the waitQ
2427 */
2428 mutex_enter(&fas->f_waitQ_mutex);
2429
2430 if ((fas->f_state != STATE_FREE) ||
2431 fas->f_waitf || (intr = INTPENDING(fas))) {
2432 goto queue_in_waitQ;
2433 }
2434
2435 /*
2436 * we didn't queue up in the waitQ, so now try to accept
2437 * the packet. if we fail to get the fas mutex, go back to
2438 * the waitQ again
2439 * do not release the waitQ mutex yet because that
2440 * leaves a window where the interrupt handler has
2441 * emptied the waitQ but not released the fas mutex yet
2442 *
2443 * the interrupt handler gets the locks in opposite order
2444 * but because we do a tryenter, there is no deadlock
2445 *
2446 * if another thread has the fas mutex then either this
2447 * thread or the other may find the bus free and
2448 * empty the waitQ
2449 */
2450 if (mutex_tryenter(FAS_MUTEX(fas))) {
2451 mutex_exit(&fas->f_waitQ_mutex);
2452 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2453 } else {
2454 /*
2455 * we didn't get the fas mutex so
2456 * the packet has to go in the waitQ now
2457 */
2458 goto queue_in_waitQ;
2459 }
2460 } else {
2461 /*
2462 * for polled cmds, we have to take the mutex and
2463 * start the packet using fas_runpoll()
2464 */
2465 mutex_enter(FAS_MUTEX(fas));
2466 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2467 }
2468
2469 /*
2470 * if the bus is free then empty waitQ and release the mutex
2471 * (this should be unlikely that the bus is still free after
2472 * accepting the packet. it may be the relatively unusual case
2473 * that we are throttling)
2474 */
2475 if (fas->f_state == STATE_FREE) {
2476 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2477 } else {
2478 mutex_exit(FAS_MUTEX(fas));
2479 }
2480
2481 done:
2482 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2483 "fas_scsi_start_end: fas 0x%p", fas);
2484 return (rval);
2485
2486 queue_in_waitQ:
2487 if (fas->f_waitf == NULL) {
2488 fas->f_waitb = fas->f_waitf = sp;
2489 sp->cmd_forw = NULL;
2490 } else {
2491 struct fas_cmd *dp = fas->f_waitb;
2492 dp->cmd_forw = fas->f_waitb = sp;
2493 sp->cmd_forw = NULL;
2494 }
2495
2496 /*
2497 * check again the fas mutex
2498 * if there was an interrupt then the interrupt
2499 * handler will eventually empty the waitQ
2500 */
2501 if ((intr == 0) && (fas->f_state == STATE_FREE) &&
2502 mutex_tryenter(FAS_MUTEX(fas))) {
2503 /*
2504 * double check if the bus is still free
2505 * (this actually reduced mutex contention a bit)
2506 */
2507 if (fas->f_state == STATE_FREE) {
2508 fas_empty_waitQ(fas);
2509 }
2510 mutex_exit(FAS_MUTEX(fas));
2511 }
2512 mutex_exit(&fas->f_waitQ_mutex);
2513
2514 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2515 "fas_scsi_start_end: fas 0x%p", fas);
2516 return (rval);
2517 }
2518
2519 /*
2520 * prepare the pkt:
2521 * the pkt may have been resubmitted or just reused so
2522 * initialize some fields, reset the dma window, and do some checks
2523 */
2524 static int
fas_prepare_pkt(struct fas * fas,struct fas_cmd * sp)2525 fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp)
2526 {
2527 struct scsi_pkt *pkt = CMD2PKT(sp);
2528
2529 /*
2530 * Reinitialize some fields that need it; the packet may
2531 * have been resubmitted
2532 */
2533 pkt->pkt_reason = CMD_CMPLT;
2534 pkt->pkt_state = 0;
2535 pkt->pkt_statistics = 0;
2536 pkt->pkt_resid = 0;
2537 sp->cmd_age = 0;
2538 sp->cmd_pkt_flags = pkt->pkt_flags;
2539
2540 /*
2541 * Copy the cdb pointer to the pkt wrapper area as we
2542 * might modify this pointer. Zero status byte
2543 */
2544 sp->cmd_cdbp = pkt->pkt_cdbp;
2545 *(pkt->pkt_scbp) = 0;
2546
2547 if (sp->cmd_flags & CFLAG_DMAVALID) {
2548 pkt->pkt_resid = sp->cmd_dmacount;
2549
2550 /*
2551 * if the pkt was resubmitted then the
2552 * windows may be at the wrong number
2553 */
2554 if (sp->cmd_cur_win) {
2555 sp->cmd_cur_win = 0;
2556 if (fas_set_new_window(fas, sp)) {
2557 IPRINTF("cannot reset window\n");
2558 return (TRAN_BADPKT);
2559 }
2560 }
2561 sp->cmd_saved_cur_addr =
2562 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
2563
2564 /*
2565 * the common case is just one window, we worry
2566 * about multiple windows when we run out of the
2567 * current window
2568 */
2569 sp->cmd_nwin = sp->cmd_saved_win = 0;
2570 sp->cmd_data_count = sp->cmd_saved_data_count = 0;
2571
2572 /*
2573 * consistent packets need to be sync'ed first
2574 * (only for data going out)
2575 */
2576 if ((sp->cmd_flags & (CFLAG_CMDIOPB | CFLAG_DMASEND)) ==
2577 (CFLAG_CMDIOPB | CFLAG_DMASEND)) {
2578 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
2579 DDI_DMA_SYNC_FORDEV);
2580 }
2581 }
2582
2583 sp->cmd_actual_cdblen = sp->cmd_cdblen;
2584
2585 #ifdef FAS_TEST
2586 #ifndef __lock_lint
2587 if (fas_test_untagged > 0) {
2588 if (TAGGED(Tgt(sp))) {
2589 int slot = sp->cmd_slot;
2590 sp->cmd_pkt_flags &= ~FLAG_TAGMASK;
2591 sp->cmd_pkt_flags &= ~FLAG_NODISCON;
2592 sp->cmd_pkt_flags |= 0x80000000;
2593 fas_log(fas, CE_NOTE,
2594 "starting untagged cmd, target=%d,"
2595 " tcmds=%d, sp=0x%p, throttle=%d\n",
2596 Tgt(sp), fas->f_tcmds[slot], (void *)sp,
2597 fas->f_throttle[slot]);
2598 fas_test_untagged = -10;
2599 }
2600 }
2601 #endif
2602 #endif
2603
2604 #ifdef FASDEBUG
2605 if (NOTAG(Tgt(sp)) && (pkt->pkt_flags & FLAG_TAGMASK)) {
2606 IPRINTF2("tagged packet for non-tagged target %d.%d\n",
2607 Tgt(sp), Lun(sp));
2608 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2609 "fas_prepare_pkt_end (tran_badpkt)");
2610 return (TRAN_BADPKT);
2611 }
2612
2613 /*
2614 * the SCSA spec states that it is an error to have no
2615 * completion function when FLAG_NOINTR is not set
2616 */
2617 if ((pkt->pkt_comp == NULL) &&
2618 ((pkt->pkt_flags & FLAG_NOINTR) == 0)) {
2619 IPRINTF("intr packet with pkt_comp == 0\n");
2620 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2621 "fas_prepare_pkt_end (tran_badpkt)");
2622 return (TRAN_BADPKT);
2623 }
2624 #endif /* FASDEBUG */
2625
2626 if ((fas->f_target_scsi_options[Tgt(sp)] & SCSI_OPTIONS_DR) == 0) {
2627 /*
2628 * no need to reset tag bits since tag queueing will
2629 * not be enabled if disconnects are disabled
2630 */
2631 sp->cmd_pkt_flags |= FLAG_NODISCON;
2632 }
2633
2634 sp->cmd_flags = (sp->cmd_flags & ~CFLAG_TRANFLAG) |
2635 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
2636
2637 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_ACCEPT_END,
2638 "fas_prepare_pkt_end (tran_accept)");
2639 return (TRAN_ACCEPT);
2640 }
2641
2642 /*
2643 * emptying the waitQ just before releasing FAS_MUTEX is a bit
2644 * tricky; if we release the waitQ mutex and then the FAS_MUTEX,
2645 * another thread could queue a cmd in the waitQ, just before
2646 * the FAS_MUTEX is released. This cmd is then stuck in the waitQ unless
2647 * another cmd comes in or fas_intr() or fas_watch() checks the waitQ.
2648 * Therefore, by releasing the FAS_MUTEX before releasing the waitQ mutex,
2649 * we prevent fas_scsi_start() filling the waitQ
2650 *
2651 * By setting NO_TRAN_BUSY, we force fas_accept_pkt() to queue up
2652 * the waitQ pkts in the readyQ.
2653 * If a QFull condition occurs, the target driver may set its throttle
2654 * too high because of the requests queued up in the readyQ but this
2655 * is not a big problem. The throttle should be periodically reset anyway.
2656 */
2657 static void
fas_empty_waitQ(struct fas * fas)2658 fas_empty_waitQ(struct fas *fas)
2659 {
2660 struct fas_cmd *sp;
2661 int rval;
2662 struct fas_cmd *waitf, *waitb;
2663
2664 ASSERT(mutex_owned(&fas->f_waitQ_mutex));
2665 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_START,
2666 "fas_empty_waitQ_start");
2667
2668 while (fas->f_waitf) {
2669
2670 /* copy waitQ, zero the waitQ and release the mutex */
2671 waitf = fas->f_waitf;
2672 waitb = fas->f_waitb;
2673 fas->f_waitf = fas->f_waitb = NULL;
2674 mutex_exit(&fas->f_waitQ_mutex);
2675
2676 do {
2677 sp = waitf;
2678 waitf = sp->cmd_forw;
2679 if (waitb == sp) {
2680 waitb = NULL;
2681 }
2682
2683 rval = fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
2684
2685 /*
2686 * If the packet was rejected for other reasons then
2687 * complete it here
2688 */
2689 if (rval != TRAN_ACCEPT) {
2690 ASSERT(rval != TRAN_BUSY);
2691 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
2692 if (sp->cmd_pkt->pkt_comp) {
2693 sp->cmd_flags |= CFLAG_FINISHED;
2694 fas_call_pkt_comp(fas, sp);
2695 }
2696 }
2697
2698 if (INTPENDING(fas)) {
2699 /*
2700 * stop processing the waitQ and put back
2701 * the remaining packets on the waitQ
2702 */
2703 mutex_enter(&fas->f_waitQ_mutex);
2704 if (waitf) {
2705 ASSERT(waitb != NULL);
2706 waitb->cmd_forw = fas->f_waitf;
2707 fas->f_waitf = waitf;
2708 if (fas->f_waitb == NULL) {
2709 fas->f_waitb = waitb;
2710 }
2711 }
2712 return;
2713 }
2714 } while (waitf);
2715
2716 mutex_enter(&fas->f_waitQ_mutex);
2717 }
2718 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_END,
2719 "fas_empty_waitQ_end");
2720 }
2721
2722 static void
fas_move_waitQ_to_readyQ(struct fas * fas)2723 fas_move_waitQ_to_readyQ(struct fas *fas)
2724 {
2725 /*
2726 * this may actually start cmds but it is most likely
2727 * that if waitQ is not empty that the bus is not free
2728 */
2729 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2730 mutex_enter(&fas->f_waitQ_mutex);
2731 fas_empty_waitQ(fas);
2732 mutex_exit(&fas->f_waitQ_mutex);
2733 }
2734
2735
2736 /*
2737 * function wrapper for two frequently used macros. for the non-critical
2738 * path we use the function
2739 */
2740 static void
fas_check_waitQ_and_mutex_exit(struct fas * fas)2741 fas_check_waitQ_and_mutex_exit(struct fas *fas)
2742 {
2743 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(fas->f_mutex))
2744 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2745 FAS_EMPTY_CALLBACKQ(fas);
2746 }
2747
2748 /*
2749 * fas_accept_pkt():
2750 * the flag argument is to force fas_accept_pkt to accept the pkt;
2751 * the caller cannot take the pkt back and it has to be queued up in
2752 * the readyQ
2753 */
2754 static int
fas_accept_pkt(struct fas * fas,struct fas_cmd * sp,int flag)2755 fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag)
2756 {
2757 short slot = sp->cmd_slot;
2758 int rval = TRAN_ACCEPT;
2759
2760 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_START, "fas_accept_pkt_start");
2761 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2762 ASSERT(fas->f_ncmds >= 0 && fas->f_ndisc >= 0);
2763 ASSERT(fas->f_ncmds >= fas->f_ndisc);
2764 ASSERT(fas->f_tcmds[slot] >= 0);
2765
2766 /*
2767 * prepare packet for transport if this hasn't been done yet and
2768 * do some checks
2769 */
2770 if ((sp->cmd_flags & CFLAG_PREPARED) == 0) {
2771 rval = fas_prepare_pkt(fas, sp);
2772 if (rval != TRAN_ACCEPT) {
2773 IPRINTF1("prepare pkt failed, slot=%x\n", slot);
2774 sp->cmd_flags &= ~CFLAG_TRANFLAG;
2775 goto done;
2776 }
2777 }
2778
2779 if (Lun(sp)) {
2780 EPRINTF("fas_accept_pkt: switching target and lun slot scan\n");
2781 fas->f_dslot = 1;
2782
2783 if ((fas->f_active[slot] == NULL) ||
2784 ((fas->f_active[slot]->f_n_slots != NTAGS) &&
2785 TAGGED(Tgt(sp)))) {
2786 (void) fas_alloc_active_slots(fas, slot, KM_NOSLEEP);
2787 }
2788 if ((fas->f_active[slot] == NULL) ||
2789 (NOTAG(Tgt(sp)) && (sp->cmd_pkt_flags & FLAG_TAGMASK))) {
2790 IPRINTF("fatal error on non-zero lun pkt\n");
2791 return (TRAN_FATAL_ERROR);
2792 }
2793 }
2794
2795 /*
2796 * we accepted the command; increment the count
2797 * (we may still reject later if TRAN_BUSY_OK)
2798 */
2799 fas_check_ncmds(fas);
2800 fas->f_ncmds++;
2801
2802 /*
2803 * if it is a nointr packet, start it now
2804 * (NO_INTR pkts are not queued in the waitQ)
2805 */
2806 if (sp->cmd_pkt_flags & FLAG_NOINTR) {
2807 EPRINTF("starting a nointr cmd\n");
2808 fas_runpoll(fas, slot, sp);
2809 sp->cmd_flags &= ~CFLAG_TRANFLAG;
2810 goto done;
2811 }
2812
2813 /*
2814 * reset the throttle if we were draining
2815 */
2816 if ((fas->f_tcmds[slot] == 0) &&
2817 (fas->f_throttle[slot] == DRAIN_THROTTLE)) {
2818 DPRINTF("reset throttle\n");
2819 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
2820 fas_full_throttle(fas, slot);
2821 }
2822
2823 /*
2824 * accept the command:
2825 * If no readyQ and no bus free, and throttle is OK,
2826 * run cmd immediately.
2827 */
2828 #ifdef FASDEBUG
2829 fas->f_total_cmds++;
2830 #endif
2831
2832 if ((fas->f_readyf[slot] == NULL) && (fas->f_state == STATE_FREE) &&
2833 (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
2834 ASSERT(fas->f_current_sp == 0);
2835 (void) fas_startcmd(fas, sp);
2836 goto exit;
2837 } else {
2838 /*
2839 * If FLAG_HEAD is set, run cmd if target and bus are
2840 * available. if first cmd in ready Q is request sense
2841 * then insert after this command, there shouldn't be more
2842 * than one request sense.
2843 */
2844 if (sp->cmd_pkt_flags & FLAG_HEAD) {
2845 struct fas_cmd *ssp = fas->f_readyf[slot];
2846 EPRINTF("que head\n");
2847 if (ssp &&
2848 *(ssp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
2849 fas_head_of_readyQ(fas, sp);
2850 } else if (ssp) {
2851 struct fas_cmd *dp = ssp->cmd_forw;
2852 ssp->cmd_forw = sp;
2853 sp->cmd_forw = dp;
2854 if (fas->f_readyb[slot] == ssp) {
2855 fas->f_readyb[slot] = sp;
2856 }
2857 } else {
2858 fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2859 sp->cmd_forw = NULL;
2860 }
2861
2862 /*
2863 * for tagged targets, check for qfull condition and
2864 * return TRAN_BUSY (if permitted), if throttle has been
2865 * exceeded
2866 */
2867 } else if (TAGGED(Tgt(sp)) &&
2868 (fas->f_tcmds[slot] >= fas->f_throttle[slot]) &&
2869 (fas->f_throttle[slot] > HOLD_THROTTLE) &&
2870 (flag == TRAN_BUSY_OK)) {
2871 IPRINTF2(
2872 "transport busy, slot=%x, ncmds=%x\n",
2873 slot, fas->f_ncmds);
2874 rval = TRAN_BUSY;
2875 fas->f_ncmds--;
2876 sp->cmd_flags &=
2877 ~(CFLAG_PREPARED | CFLAG_IN_TRANSPORT);
2878 goto done;
2879 /*
2880 * append to readyQ or start a new readyQ
2881 */
2882 } else if (fas->f_readyf[slot]) {
2883 struct fas_cmd *dp = fas->f_readyb[slot];
2884 ASSERT(dp != 0);
2885 fas->f_readyb[slot] = sp;
2886 sp->cmd_forw = NULL;
2887 dp->cmd_forw = sp;
2888 } else {
2889 fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2890 sp->cmd_forw = NULL;
2891 }
2892
2893 }
2894
2895 done:
2896 /*
2897 * just in case that the bus is free and we haven't
2898 * been able to restart for some reason
2899 */
2900 if (fas->f_state == STATE_FREE) {
2901 (void) fas_istart(fas);
2902 }
2903
2904 exit:
2905 fas_check_ncmds(fas);
2906 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2907 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_END, "fas_accept_pkt_end");
2908 return (rval);
2909 }
2910
2911 /*
2912 * allocate a tag byte and check for tag aging
2913 */
2914 static char fas_tag_lookup[] =
2915 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
2916
2917 static int
fas_alloc_tag(struct fas * fas,struct fas_cmd * sp)2918 fas_alloc_tag(struct fas *fas, struct fas_cmd *sp)
2919 {
2920 struct f_slots *tag_slots;
2921 int tag;
2922 short slot = sp->cmd_slot;
2923
2924 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_START, "fas_alloc_tag_start");
2925 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2926
2927 tag_slots = fas->f_active[slot];
2928 ASSERT(tag_slots->f_n_slots == NTAGS);
2929
2930 alloc_tag:
2931 tag = (fas->f_active[slot]->f_tags)++;
2932 if (fas->f_active[slot]->f_tags >= NTAGS) {
2933 /*
2934 * we reserve tag 0 for non-tagged cmds
2935 */
2936 fas->f_active[slot]->f_tags = 1;
2937 }
2938 EPRINTF1("tagged cmd, tag = %d\n", tag);
2939
2940 /* Validate tag, should never fail. */
2941 if (tag_slots->f_slot[tag] == 0) {
2942 /*
2943 * Store assigned tag and tag queue type.
2944 * Note, in case of multiple choice, default to simple queue.
2945 */
2946 ASSERT(tag < NTAGS);
2947 sp->cmd_tag[1] = (uchar_t)tag;
2948 sp->cmd_tag[0] = fas_tag_lookup[((sp->cmd_pkt_flags &
2949 FLAG_TAGMASK) >> 12)];
2950 EPRINTF1("tag= %d\n", tag);
2951 tag_slots->f_slot[tag] = sp;
2952 (fas->f_tcmds[slot])++;
2953 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2954 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
2955 "fas_alloc_tag_end");
2956 return (0);
2957
2958 } else {
2959 int age, i;
2960
2961 /*
2962 * Check tag age. If timeouts enabled and
2963 * tag age greater than 1, print warning msg.
2964 * If timeouts enabled and tag age greater than
2965 * age limit, begin draining tag que to check for
2966 * lost tag cmd.
2967 */
2968 age = tag_slots->f_slot[tag]->cmd_age++;
2969 if (age >= fas->f_scsi_tag_age_limit &&
2970 tag_slots->f_slot[tag]->cmd_pkt->pkt_time) {
2971 IPRINTF2("tag %d in use, age= %d\n", tag, age);
2972 DPRINTF("draining tag queue\n");
2973 if (fas->f_reset_delay[Tgt(sp)] == 0) {
2974 fas->f_throttle[slot] = DRAIN_THROTTLE;
2975 }
2976 }
2977
2978 /* If tag in use, scan until a free one is found. */
2979 for (i = 1; i < NTAGS; i++) {
2980 tag = fas->f_active[slot]->f_tags;
2981 if (!tag_slots->f_slot[tag]) {
2982 EPRINTF1("found free tag %d\n", tag);
2983 break;
2984 }
2985 if (++(fas->f_active[slot]->f_tags) >= NTAGS) {
2986 /*
2987 * we reserve tag 0 for non-tagged cmds
2988 */
2989 fas->f_active[slot]->f_tags = 1;
2990 }
2991 EPRINTF1("found in use tag %d\n", tag);
2992 }
2993
2994 /*
2995 * If no free tags, we're in serious trouble.
2996 * the target driver submitted more than 255
2997 * requests
2998 */
2999 if (tag_slots->f_slot[tag]) {
3000 IPRINTF1("slot %x: All tags in use!!!\n", slot);
3001 goto fail;
3002 }
3003 goto alloc_tag;
3004 }
3005
3006 fail:
3007 fas_head_of_readyQ(fas, sp);
3008
3009 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
3010 "fas_alloc_tag_end");
3011 return (-1);
3012 }
3013
3014 /*
3015 * Internal Search Routine.
3016 *
3017 * Search for a command to start.
3018 */
3019 static int
fas_istart(struct fas * fas)3020 fas_istart(struct fas *fas)
3021 {
3022 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_START,
3023 "fas_istart_start");
3024 EPRINTF("fas_istart:\n");
3025
3026 if (fas->f_state == STATE_FREE && fas->f_ncmds > fas->f_ndisc) {
3027 (void) fas_ustart(fas);
3028 }
3029 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_END,
3030 "fas_istart_end");
3031 return (ACTION_RETURN);
3032 }
3033
3034 static int
fas_ustart(struct fas * fas)3035 fas_ustart(struct fas *fas)
3036 {
3037 struct fas_cmd *sp;
3038 short slot = fas->f_next_slot;
3039 short start_slot = slot;
3040 short dslot = fas->f_dslot;
3041
3042 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_START, "fas_ustart_start");
3043 EPRINTF1("fas_ustart: start_slot=%x\n", fas->f_next_slot);
3044 ASSERT(fas->f_current_sp == NULL);
3045 ASSERT(dslot != 0);
3046 if (dslot == NLUNS_PER_TARGET) {
3047 ASSERT((slot % NLUNS_PER_TARGET) == 0);
3048 }
3049
3050 /*
3051 * if readyQ not empty and we are not draining, then we
3052 * can start another cmd
3053 */
3054 do {
3055 /*
3056 * If all cmds drained from tag Q, back to full throttle and
3057 * start queueing up new cmds again.
3058 */
3059 if (fas->f_throttle[slot] == DRAIN_THROTTLE &&
3060 fas->f_tcmds[slot] == 0) {
3061 fas_full_throttle(fas, slot);
3062 }
3063
3064 if (fas->f_readyf[slot] &&
3065 (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
3066 sp = fas->f_readyf[slot];
3067 fas->f_readyf[slot] = sp->cmd_forw;
3068 if (sp->cmd_forw == NULL) {
3069 fas->f_readyb[slot] = NULL;
3070 }
3071 fas->f_next_slot = NEXTSLOT(slot, dslot);
3072 ASSERT((sp->cmd_pkt_flags & FLAG_NOINTR) == 0);
3073 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_END,
3074 "fas_ustart_end");
3075 return (fas_startcmd(fas, sp));
3076 } else {
3077 slot = NEXTSLOT(slot, dslot);
3078 }
3079 } while (slot != start_slot);
3080
3081 EPRINTF("fas_ustart: no cmds to start\n");
3082 fas->f_next_slot = NEXTSLOT(slot, dslot);
3083 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_NOT_FOUND_END,
3084 "fas_ustart_end (not_found)");
3085 return (FALSE);
3086 }
3087
3088 /*
3089 * Start a command off
3090 */
3091 static int
fas_startcmd(struct fas * fas,struct fas_cmd * sp)3092 fas_startcmd(struct fas *fas, struct fas_cmd *sp)
3093 {
3094 volatile struct fasreg *fasreg = fas->f_reg;
3095 ushort_t nstate;
3096 uchar_t cmd, target, lun;
3097 ushort_t tshift;
3098 volatile uchar_t *tp = fas->f_cmdarea;
3099 struct scsi_pkt *pkt = CMD2PKT(sp);
3100 int slot = sp->cmd_slot;
3101 struct f_slots *slots = fas->f_active[slot];
3102 int i, cdb_len;
3103
3104 #define LOAD_CMDP *(tp++)
3105
3106 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_START, "fas_startcmd_start");
3107
3108 EPRINTF2("fas_startcmd: sp=0x%p flags=%x\n",
3109 (void *)sp, sp->cmd_pkt_flags);
3110 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3111 ASSERT((sp->cmd_flags & CFLAG_COMPLETED) == 0);
3112 ASSERT(fas->f_current_sp == NULL && fas->f_state == STATE_FREE);
3113 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3114 ASSERT(fas->f_throttle[slot] > 0);
3115 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
3116 }
3117
3118 target = Tgt(sp);
3119 lun = Lun(sp);
3120
3121 /*
3122 * if a non-tagged cmd is submitted to an active tagged target
3123 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
3124 * to be untagged
3125 */
3126 if (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
3127 TAGGED(target) && fas->f_tcmds[slot] &&
3128 ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) &&
3129 (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
3130 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3131 struct fas_cmd *dp;
3132
3133 IPRINTF("untagged cmd, start draining\n");
3134
3135 if (fas->f_reset_delay[Tgt(sp)] == 0) {
3136 fas->f_throttle[slot] = DRAIN_THROTTLE;
3137 }
3138 dp = fas->f_readyf[slot];
3139 fas->f_readyf[slot] = sp;
3140 sp->cmd_forw = dp;
3141 if (fas->f_readyb[slot] == NULL) {
3142 fas->f_readyb[slot] = sp;
3143 }
3144 }
3145 return (FALSE);
3146 }
3147
3148 /*
3149 * allocate a tag; if no tag available then put request back
3150 * on the ready queue and return; eventually a cmd returns and we
3151 * get going again or we timeout
3152 */
3153 if (TAGGED(target) && (sp->cmd_pkt_flags & FLAG_TAGMASK)) {
3154 if (fas_alloc_tag(fas, sp)) {
3155 return (FALSE);
3156 }
3157 } else {
3158 /*
3159 * tag slot 0 is reserved for non-tagged cmds
3160 * and should be empty because we have drained
3161 */
3162 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3163 ASSERT(fas->f_active[slot]->f_slot[0] == NULL);
3164 fas->f_active[slot]->f_slot[0] = sp;
3165 sp->cmd_tag[1] = 0;
3166 if (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
3167 ASSERT(fas->f_tcmds[slot] == 0);
3168 /*
3169 * don't start any other cmd until this
3170 * one is finished. The throttle is reset
3171 * later in fas_watch()
3172 */
3173 fas->f_throttle[slot] = 1;
3174 }
3175 (fas->f_tcmds[slot])++;
3176
3177 }
3178 }
3179
3180 fas->f_current_sp = sp;
3181 fas->f_omsglen = 0;
3182 tshift = 1<<target;
3183 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3184 cdb_len = sp->cmd_actual_cdblen;
3185
3186 if (sp->cmd_pkt_flags & FLAG_RENEGOTIATE_WIDE_SYNC) {
3187 fas_force_renegotiation(fas, Tgt(sp));
3188 }
3189
3190 /*
3191 * first send identify message, with or without disconnect priv.
3192 */
3193 if (sp->cmd_pkt_flags & FLAG_NODISCON) {
3194 LOAD_CMDP = fas->f_last_msgout = MSG_IDENTIFY | lun;
3195 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3196 } else {
3197 LOAD_CMDP = fas->f_last_msgout = MSG_DR_IDENTIFY | lun;
3198 }
3199
3200 /*
3201 * normal case, tagQ and we have negotiated wide and sync
3202 * or we don't need to renegotiate because wide and sync
3203 * have been disabled
3204 * (proxy msg's don't have tag flag set)
3205 */
3206 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) &&
3207 ((fas->f_wide_known | fas->f_nowide) &
3208 (fas->f_sync_known | fas->f_nosync) & tshift)) {
3209
3210 EPRINTF("tag cmd\n");
3211 ASSERT((sp->cmd_pkt_flags & FLAG_NODISCON) == 0);
3212
3213 fas->f_last_msgout = LOAD_CMDP = sp->cmd_tag[0];
3214 LOAD_CMDP = sp->cmd_tag[1];
3215
3216 nstate = STATE_SELECT_NORMAL;
3217 cmd = CMD_SEL_ATN3 | CMD_DMA;
3218
3219 /*
3220 * is this a proxy message
3221 */
3222 } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
3223
3224 IPRINTF2("proxy cmd, len=%x, msg=%x\n",
3225 sp->cmd_cdb[FAS_PROXY_DATA],
3226 sp->cmd_cdb[FAS_PROXY_DATA+1]);
3227 /*
3228 * This is a proxy command. It will have
3229 * a message to send as part of post-selection
3230 * (e.g, MSG_ABORT or MSG_DEVICE_RESET)
3231 */
3232 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
3233 for (i = 0; i < (uint_t)fas->f_omsglen; i++) {
3234 fas->f_cur_msgout[i] =
3235 sp->cmd_cdb[FAS_PROXY_DATA+1+i];
3236 }
3237 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
3238 cdb_len = 0;
3239 cmd = CMD_SEL_STOP | CMD_DMA;
3240 nstate = STATE_SELECT_N_SENDMSG;
3241
3242 /*
3243 * always negotiate wide first and sync after wide
3244 */
3245 } else if (((fas->f_wide_known | fas->f_nowide) & tshift) == 0) {
3246 int i = 0;
3247
3248 /* First the tag message bytes */
3249 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3250 fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3251 fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3252 }
3253
3254 /*
3255 * Set up to send wide negotiating message. This is getting
3256 * a bit tricky as we dma out the identify message and
3257 * send the other messages via the fifo buffer.
3258 */
3259 EPRINTF1("cmd with wdtr msg, tag=%x\n", sp->cmd_tag[1]);
3260
3261 fas_make_wdtr(fas, i, target, FAS_XFER_WIDTH);
3262
3263 cdb_len = 0;
3264 nstate = STATE_SELECT_N_SENDMSG;
3265 cmd = CMD_SEL_STOP | CMD_DMA;
3266
3267 /*
3268 * negotiate sync xfer rate
3269 */
3270 } else if (((fas->f_sync_known | fas->f_nosync) & tshift) == 0) {
3271 int i = 0;
3272 /*
3273 * Set up to send sync negotiating message. This is getting
3274 * a bit tricky as we dma out the identify message and
3275 * send the other messages via the fifo buffer.
3276 */
3277 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3278 fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3279 fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3280 }
3281
3282 fas_make_sdtr(fas, i, target);
3283
3284 cdb_len = 0;
3285 cmd = CMD_SEL_STOP | CMD_DMA;
3286 nstate = STATE_SELECT_N_SENDMSG;
3287
3288 /*
3289 * normal cmds, no negotiations and not a proxy and no TQ
3290 */
3291 } else {
3292
3293 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3294 EPRINTF("std. cmd\n");
3295
3296 nstate = STATE_SELECT_NORMAL;
3297 cmd = CMD_SEL_ATN | CMD_DMA;
3298 }
3299
3300 /*
3301 * Now load cdb (if any)
3302 */
3303 for (i = 0; i < cdb_len; i++) {
3304 LOAD_CMDP = sp->cmd_cdbp[i];
3305 }
3306
3307 /*
3308 * calculate total dma amount:
3309 */
3310 fas->f_lastcount = (uintptr_t)tp - (uintptr_t)fas->f_cmdarea;
3311
3312 /*
3313 * load target id and enable bus id encoding and 32 bit counter
3314 */
3315 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
3316 (target & 0xf) | FAS_BUSID_ENCODID | FAS_BUSID_32BIT_COUNTER);
3317
3318 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
3319
3320 fas_reg_cmd_write(fas, CMD_FLUSH);
3321
3322 FAS_DMA_READ(fas, fas->f_lastcount,
3323 fas->f_dmacookie.dmac_address, 16, cmd);
3324
3325 New_state(fas, (int)nstate);
3326
3327 #ifdef FASDEBUG
3328 if (DDEBUGGING) {
3329 fas_dump_cmd(fas, sp);
3330 }
3331 #endif /* FASDEBUG */
3332
3333 /*
3334 * if timeout == 0, then it has no effect on the timeout
3335 * handling; we deal with this when an actual timeout occurs.
3336 */
3337 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3338 ASSERT(fas->f_tcmds[slot] >= 1);
3339 }
3340 i = pkt->pkt_time - slots->f_timebase;
3341
3342 if (i == 0) {
3343 EPRINTF("dup timeout\n");
3344 (slots->f_dups)++;
3345 slots->f_timeout = slots->f_timebase;
3346 } else if (i > 0) {
3347 EPRINTF("new timeout\n");
3348 slots->f_timeout = slots->f_timebase = pkt->pkt_time;
3349 slots->f_dups = 1;
3350 }
3351
3352 fas_check_ncmds(fas);
3353
3354 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_END, "fas_startcmd_end");
3355
3356 return (TRUE);
3357 }
3358
3359 /*
3360 * Interrupt Entry Point.
3361 * Poll interrupts until they go away
3362 */
3363 static uint_t
fas_intr(caddr_t arg)3364 fas_intr(caddr_t arg)
3365 {
3366 struct fas *fas = (struct fas *)arg;
3367 int rval = DDI_INTR_UNCLAIMED;
3368 int kstat_updated = 0;
3369
3370 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_START, "fas_intr_start");
3371
3372 do {
3373 mutex_enter(FAS_MUTEX(fas));
3374
3375 do {
3376 if (fas_intr_svc(fas)) {
3377 /*
3378 * do not return immediately here because
3379 * we have to guarantee to always empty
3380 * the waitQ and callbackQ in the interrupt
3381 * handler
3382 */
3383 if (fas->f_polled_intr) {
3384 rval = DDI_INTR_CLAIMED;
3385 fas->f_polled_intr = 0;
3386 }
3387 } else {
3388 rval = DDI_INTR_CLAIMED;
3389 }
3390 } while (INTPENDING(fas));
3391
3392 if (!kstat_updated && fas->f_intr_kstat &&
3393 rval == DDI_INTR_CLAIMED) {
3394 FAS_KSTAT_INTR(fas);
3395 kstat_updated++;
3396 }
3397
3398 /*
3399 * check and empty the waitQ and the callbackQ
3400 */
3401 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
3402 FAS_EMPTY_CALLBACKQ(fas);
3403
3404 } while (INTPENDING(fas));
3405
3406 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_END, "fas_intr_end");
3407
3408 return (rval);
3409 }
3410
3411 /*
3412 * General interrupt service routine.
3413 */
3414 static char *dma_bits = DMA_BITS;
3415
3416 static int
fas_intr_svc(struct fas * fas)3417 fas_intr_svc(struct fas *fas)
3418 {
3419 static int (*evec[])(struct fas *fas) = {
3420 fas_finish_select,
3421 fas_reconnect,
3422 fas_phasemanage,
3423 fas_finish,
3424 fas_reset_recovery,
3425 fas_istart,
3426 fas_abort_curcmd,
3427 fas_reset_bus,
3428 fas_reset_bus,
3429 fas_handle_selection
3430 };
3431 int action;
3432 uchar_t intr, stat;
3433 volatile struct fasreg *fasreg = fas->f_reg;
3434 int i = 0;
3435
3436 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_START, "fas_intr_svc_start");
3437
3438 /*
3439 * A read of FAS interrupt register clears interrupt,
3440 * so any other volatile information needs to be latched
3441 * up prior to reading the interrupt register.
3442 */
3443 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
3444
3445 EPRINTF2("fas_intr_svc: state=%x stat=%x\n", fas->f_state,
3446 fas->f_stat);
3447
3448 /*
3449 * this wasn't our interrupt?
3450 */
3451 if ((fas->f_stat & FAS_STAT_IPEND) == 0) {
3452 if (fas_check_dma_error(fas)) {
3453 action = ACTION_RESET;
3454 goto start_action;
3455 }
3456 return (-1);
3457 }
3458
3459 /*
3460 * if we are reset state, handle this first
3461 */
3462 if (fas->f_state == ACTS_RESET) {
3463 action = ACTION_FINRST;
3464 goto start_action;
3465 }
3466
3467 /*
3468 * check for gross error. fas366 hardware seems to register
3469 * the gross error bit when a parity error is found. Make sure
3470 * to ignore the gross error bit when a parity error is detected.
3471 */
3472 if ((fas->f_stat & FAS_STAT_GERR) &&
3473 (fas->f_stat & FAS_STAT_PERR) == 0) {
3474 action = fas_handle_gross_err(fas);
3475 goto start_action;
3476 }
3477
3478 /*
3479 * now it is finally safe to read the interrupt register
3480 * if we haven't done so yet
3481 * Note: we don't read step register here but only in
3482 * fas_finish_select(). It is not entirely safe but saves
3483 * redundant PIOs or extra code in this critical path
3484 */
3485 fas->f_intr =
3486 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
3487
3488 /*
3489 * read the fifo if there is something there or still in the
3490 * input shuttle
3491 */
3492 stat = fas->f_stat & FAS_PHASE_MASK;
3493
3494 if ((intr & FAS_INT_RESEL) ||
3495 ((stat != FAS_PHASE_DATA_IN) && (stat != FAS_PHASE_DATA_OUT) &&
3496 ((fas->f_state & STATE_SELECTING) == 0) &&
3497 (fas->f_state != ACTS_DATA_DONE) &&
3498 (fas->f_state != ACTS_C_CMPLT))) {
3499
3500 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
3501
3502 if (((fas->f_stat2 & FAS_STAT2_EMPTY) == 0) ||
3503 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
3504 fas_read_fifo(fas);
3505 }
3506 }
3507
3508 EPRINTF2("fas_intr_svc: intr=%x, stat=%x\n", fas->f_intr, fas->f_stat);
3509 EPRINTF2("dmacsr=%b\n", fas->f_dma->dma_csr, dma_bits);
3510
3511 /*
3512 * Based upon the current state of the host adapter driver
3513 * we should be able to figure out what to do with an interrupt.
3514 *
3515 * The FAS asserts an interrupt with one or more of 8 possible
3516 * bits set in its interrupt register. These conditions are
3517 * SCSI bus reset detected, an illegal command fed to the FAS,
3518 * one of DISCONNECT, BUS SERVICE, FUNCTION COMPLETE conditions
3519 * for the FAS, a Reselection interrupt, or one of Selection
3520 * or Selection with Attention.
3521 *
3522 * Of these possible interrupts, we can deal with some right
3523 * here and now, irrespective of the current state of the driver.
3524 *
3525 * take care of the most likely interrupts first and call the action
3526 * immediately
3527 */
3528 if ((intr & (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN|
3529 FAS_INT_RESEL)) == 0) {
3530 /*
3531 * The rest of the reasons for an interrupt can
3532 * be handled based purely on the state that the driver
3533 * is currently in now.
3534 */
3535 if (fas->f_state & STATE_SELECTING) {
3536 action = fas_finish_select(fas);
3537
3538 } else if (fas->f_state & STATE_ITPHASES) {
3539 action = fas_phasemanage(fas);
3540
3541 } else {
3542 fas_log(fas, CE_WARN, "spurious interrupt");
3543 action = ACTION_RETURN;
3544 }
3545
3546 } else if ((intr & FAS_INT_RESEL) && ((intr &
3547 (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN)) == 0)) {
3548
3549 if ((fas->f_state & STATE_SELECTING) == 0) {
3550 ASSERT(fas->f_state == STATE_FREE);
3551 action = fas_reconnect(fas);
3552 } else {
3553 action = fas_reselect_preempt(fas);
3554 }
3555
3556 } else if (intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
3557 action = fas_illegal_cmd_or_bus_reset(fas);
3558
3559 } else if (intr & (FAS_INT_SEL|FAS_INT_SELATN)) {
3560 action = ACTION_SELECT;
3561 }
3562
3563 start_action:
3564 while (action != ACTION_RETURN) {
3565 ASSERT((action >= 0) && (action <= ACTION_SELECT));
3566 TRACE_3(TR_FAC_SCSI_FAS, TR_FASSVC_ACTION_CALL,
3567 "fas_intr_svc call: fas 0x%p, action %d (%d)",
3568 fas, action, i);
3569 i++;
3570 action = (*evec[action])(fas);
3571 }
3572 exit:
3573 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_END, "fas_intr_svc_end");
3574
3575 return (0);
3576 }
3577
3578 /*
3579 * Manage phase transitions.
3580 */
3581 static int
fas_phasemanage(struct fas * fas)3582 fas_phasemanage(struct fas *fas)
3583 {
3584 ushort_t state;
3585 int action;
3586 static int (*pvecs[])(struct fas *fas) = {
3587 fas_handle_cmd_start,
3588 fas_handle_cmd_done,
3589 fas_handle_msg_out_start,
3590 fas_handle_msg_out_done,
3591 fas_handle_msg_in_start,
3592 fas_handle_more_msgin,
3593 fas_handle_msg_in_done,
3594 fas_handle_clearing,
3595 fas_handle_data_start,
3596 fas_handle_data_done,
3597 fas_handle_c_cmplt,
3598 fas_reconnect,
3599 fas_handle_unknown,
3600 fas_reset_recovery
3601 };
3602 int i = 0;
3603
3604 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_START,
3605 "fas_phasemanage_start");
3606
3607 do {
3608 EPRINTF1("fas_phasemanage: %s\n",
3609 fas_state_name(fas->f_state & STATE_ITPHASES));
3610
3611 TRACE_2(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_CALL,
3612 "fas_phasemanage_call: fas 0x%p (%d)", fas, i++);
3613
3614 state = fas->f_state;
3615
3616 if (!(state == STATE_FREE || state > ACTS_ENDVEC)) {
3617 ASSERT(pvecs[state-1] != NULL);
3618 action = (*pvecs[state-1]) (fas);
3619 } else {
3620 fas_log(fas, CE_WARN, "lost state in phasemanage");
3621 action = ACTION_ABORT_ALLCMDS;
3622 }
3623
3624 } while (action == ACTION_PHASEMANAGE);
3625
3626 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_END,
3627 "fas_phasemanage_end");
3628 return (action);
3629 }
3630
3631 /*
3632 * remove a cmd from active list and if timeout flag is set, then
3633 * adjust timeouts; if a the same cmd will be resubmitted soon, don't
3634 * bother to adjust timeouts (ie. don't set this flag)
3635 */
3636 static void
fas_remove_cmd(struct fas * fas,struct fas_cmd * sp,int new_timeout_flag)3637 fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int new_timeout_flag)
3638 {
3639 int tag = sp->cmd_tag[1];
3640 int slot = sp->cmd_slot;
3641 struct f_slots *tag_slots = fas->f_active[slot];
3642
3643 ASSERT(sp != NULL);
3644 EPRINTF4("remove tag %d slot %d for target %d.%d\n",
3645 tag, slot, Tgt(sp), Lun(sp));
3646
3647 if (sp == tag_slots->f_slot[tag]) {
3648 tag_slots->f_slot[tag] = NULL;
3649 fas->f_tcmds[slot]--;
3650 }
3651 if (fas->f_current_sp == sp) {
3652 fas->f_current_sp = NULL;
3653 }
3654
3655 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
3656
3657 if (new_timeout_flag != NEW_TIMEOUT) {
3658 return;
3659 }
3660
3661 /*
3662 * Figure out what to set tag Q timeout for...
3663 *
3664 * Optimize: If we have duplicate's of same timeout
3665 * we're using, then we'll use it again until we run
3666 * out of duplicates. This should be the normal case
3667 * for block and raw I/O.
3668 * If no duplicates, we have to scan through tag que and
3669 * find the longest timeout value and use it. This is
3670 * going to take a while...
3671 */
3672 if (sp->cmd_pkt->pkt_time == tag_slots->f_timebase) {
3673 if (--(tag_slots->f_dups) <= 0) {
3674 if (fas->f_tcmds[slot]) {
3675 struct fas_cmd *ssp;
3676 uint_t n = 0;
3677 ushort_t t = tag_slots->f_n_slots;
3678 ushort_t i;
3679 /*
3680 * This crude check assumes we don't do
3681 * this too often which seems reasonable
3682 * for block and raw I/O.
3683 */
3684 for (i = 0; i < t; i++) {
3685 ssp = tag_slots->f_slot[i];
3686 if (ssp &&
3687 (ssp->cmd_pkt->pkt_time > n)) {
3688 n = ssp->cmd_pkt->pkt_time;
3689 tag_slots->f_dups = 1;
3690 } else if (ssp &&
3691 (ssp->cmd_pkt->pkt_time == n)) {
3692 tag_slots->f_dups++;
3693 }
3694 }
3695 tag_slots->f_timebase = n;
3696 EPRINTF1("searching, new_timeout= %d\n", n);
3697 } else {
3698 tag_slots->f_dups = 0;
3699 tag_slots->f_timebase = 0;
3700 }
3701 }
3702 }
3703 tag_slots->f_timeout = tag_slots->f_timebase;
3704
3705 ASSERT(fas->f_ncmds >= fas->f_ndisc);
3706 }
3707
3708 /*
3709 * decrement f_ncmds and f_ndisc for this cmd before completing
3710 */
3711 static void
fas_decrement_ncmds(struct fas * fas,struct fas_cmd * sp)3712 fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp)
3713 {
3714 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3715 if ((sp->cmd_flags & CFLAG_FINISHED) == 0) {
3716 fas->f_ncmds--;
3717 if (sp->cmd_flags & CFLAG_CMDDISC) {
3718 fas->f_ndisc--;
3719 }
3720 sp->cmd_flags |= CFLAG_FINISHED;
3721 sp->cmd_flags &= ~CFLAG_CMDDISC;
3722 }
3723 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
3724 ASSERT(fas->f_ncmds >= fas->f_ndisc);
3725 }
3726
3727 /*
3728 * Most commonly called phase handlers:
3729 *
3730 * Finish routines
3731 */
3732 static int
fas_finish(struct fas * fas)3733 fas_finish(struct fas *fas)
3734 {
3735 struct fas_cmd *sp = fas->f_current_sp;
3736 struct scsi_pkt *pkt = CMD2PKT(sp);
3737 int action = ACTION_SEARCH;
3738 struct scsi_status *status =
3739 (struct scsi_status *)sp->cmd_pkt->pkt_scbp;
3740
3741 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_START,
3742 "fas_finish_start");
3743 EPRINTF("fas_finish\n");
3744
3745 #ifdef FAS_TEST
3746 if (fas_test_stop && (sp->cmd_pkt_flags & 0x80000000)) {
3747 debug_enter("untagged cmd completed");
3748 }
3749 #endif
3750
3751 /*
3752 * immediately enable reselects
3753 */
3754 fas_reg_cmd_write(fas, CMD_EN_RESEL);
3755 if (status->sts_chk) {
3756 /*
3757 * In the case that we are getting a check condition
3758 * clear our knowledge of synchronous capabilities.
3759 * This will unambiguously force a renegotiation
3760 * prior to any possible data transfer (we hope),
3761 * including the data transfer for a UNIT ATTENTION
3762 * condition generated by somebody powering on and
3763 * off a target.
3764 */
3765 fas_force_renegotiation(fas, Tgt(sp));
3766 }
3767
3768 /*
3769 * backoff sync/wide if there were parity errors
3770 */
3771 if (sp->cmd_pkt->pkt_statistics & STAT_PERR) {
3772 fas_sync_wide_backoff(fas, sp, sp->cmd_slot);
3773 #ifdef FAS_TEST
3774 if (fas_test_stop) {
3775 debug_enter("parity error");
3776 }
3777 #endif
3778 }
3779
3780 /*
3781 * Free from active list and update counts
3782 * We need to clean up this cmd now, just in case fas_ustart()
3783 * hits a reset or other fatal transport error
3784 */
3785 fas_check_ncmds(fas);
3786 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
3787 fas_decrement_ncmds(fas, sp);
3788 fas_check_ncmds(fas);
3789
3790 /*
3791 * go to state free and try to start a new cmd now
3792 */
3793 New_state(fas, STATE_FREE);
3794
3795 if ((fas->f_ncmds > fas->f_ndisc) && (*((char *)status) == 0) &&
3796 (INTPENDING(fas) == 0)) {
3797 if (fas_ustart(fas)) {
3798 action = ACTION_RETURN;
3799 }
3800 }
3801
3802 /*
3803 * if there was a data xfer then calculate residue and
3804 * sync data for consistent memory xfers
3805 */
3806 if (pkt->pkt_state & STATE_XFERRED_DATA) {
3807 pkt->pkt_resid = sp->cmd_dmacount - sp->cmd_data_count;
3808 if (sp->cmd_flags & CFLAG_CMDIOPB) {
3809 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
3810 DDI_DMA_SYNC_FORCPU);
3811 }
3812 if (pkt->pkt_resid) {
3813 IPRINTF3("%d.%d finishes with %ld resid\n",
3814 Tgt(sp), Lun(sp), pkt->pkt_resid);
3815 }
3816 }
3817
3818 if (sp->cmd_pkt_flags & FLAG_NOINTR) {
3819 fas_call_pkt_comp(fas, sp);
3820 action = ACTION_RETURN;
3821 } else {
3822 /*
3823 * start an autorequest sense if there was a check condition.
3824 * if arq has not been enabled, fas_handle_sts_chk will do
3825 * do the callback
3826 */
3827 if (status->sts_chk) {
3828 if (fas_handle_sts_chk(fas, sp)) {
3829 /*
3830 * we can't start an arq because one is
3831 * already in progress. the target is
3832 * probably confused
3833 */
3834 action = ACTION_ABORT_CURCMD;
3835 }
3836 } else if ((*((char *)status) & STATUS_MASK) ==
3837 STATUS_QFULL) {
3838 fas_handle_qfull(fas, sp);
3839 } else {
3840 #ifdef FAS_TEST
3841 if (fas_arqs_failure && (status->sts_chk == 0)) {
3842 struct scsi_arq_status *arqstat;
3843 status->sts_chk = 1;
3844 arqstat = (struct scsi_arq_status *)
3845 (sp->cmd_pkt->pkt_scbp);
3846 arqstat->sts_rqpkt_reason = CMD_TRAN_ERR;
3847 sp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
3848 fas_arqs_failure = 0;
3849 }
3850 if (fas_tran_err) {
3851 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
3852 fas_tran_err = 0;
3853 }
3854 #endif
3855 fas_call_pkt_comp(fas, sp);
3856 }
3857 }
3858
3859 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_END, "fas_finish_end");
3860 return (action);
3861 }
3862
3863 /*
3864 * Complete the process of selecting a target
3865 */
3866 static int
fas_finish_select(struct fas * fas)3867 fas_finish_select(struct fas *fas)
3868 {
3869 volatile struct dma *dmar = fas->f_dma;
3870 struct fas_cmd *sp = fas->f_current_sp;
3871 uchar_t intr = fas->f_intr;
3872 uchar_t step;
3873
3874 step = fas_reg_read(fas, &fas->f_reg->fas_step) & FAS_STEP_MASK;
3875
3876 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_START,
3877 "fas_finish_select_start");
3878 EPRINTF("fas_finish_select:\n");
3879 ASSERT(sp != 0);
3880
3881 /*
3882 * Check for DMA gate array errors
3883 */
3884 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr))
3885 & DMA_ERRPEND) {
3886 /*
3887 * It would be desirable to set the ATN* line and attempt to
3888 * do the whole schmear of INITIATOR DETECTED ERROR here,
3889 * but that is too hard to do at present.
3890 */
3891 fas_log(fas, CE_WARN,
3892 "Unrecoverable DMA error during selection");
3893 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
3894
3895 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET1_END,
3896 "fas_finish_select_end (ACTION_RESET1)");
3897 return (ACTION_RESET);
3898 }
3899
3900 /*
3901 * Shut off DMA gate array
3902 */
3903 FAS_FLUSH_DMA(fas);
3904
3905 /*
3906 * Did something respond to selection?
3907 */
3908 if (intr == (FAS_INT_BUS|FAS_INT_FCMP)) {
3909 /*
3910 * We succesfully selected a target (we think).
3911 * Now we figure out how botched things are
3912 * based upon the kind of selection we were
3913 * doing and the state of the step register.
3914 */
3915 switch (step) {
3916 case FAS_STEP_ARBSEL:
3917 /*
3918 * In this case, we selected the target, but went
3919 * neither into MESSAGE OUT nor COMMAND phase.
3920 * However, this isn't a fatal error, so we just
3921 * drive on.
3922 *
3923 * This might be a good point to note that we have
3924 * a target that appears to not accomodate
3925 * disconnecting,
3926 * but it really isn't worth the effort to distinguish
3927 * such targets fasecially from others.
3928 */
3929 /* FALLTHROUGH */
3930
3931 case FAS_STEP_SENTID:
3932 /*
3933 * In this case, we selected the target and sent
3934 * message byte and have stopped with ATN* still on.
3935 * This case should only occur if we use the SELECT
3936 * AND STOP command.
3937 */
3938 /* FALLTHROUGH */
3939
3940 case FAS_STEP_NOTCMD:
3941 /*
3942 * In this case, we either didn't transition to command
3943 * phase, or,
3944 * if we were using the SELECT WITH ATN3 command,
3945 * we possibly didn't send all message bytes.
3946 */
3947 break;
3948
3949 case FAS_STEP_PCMD:
3950 /*
3951 * In this case, not all command bytes transferred.
3952 */
3953 /* FALLTHROUGH */
3954
3955 case FAS_STEP_DONE:
3956 /*
3957 * This is the usual 'good' completion point.
3958 * If we we sent message byte(s), we subtract
3959 * off the number of message bytes that were
3960 * ahead of the command.
3961 */
3962 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
3963 break;
3964
3965 default:
3966 fas_log(fas, CE_WARN,
3967 "bad sequence step (0x%x) in selection", step);
3968 TRACE_0(TR_FAC_SCSI_FAS,
3969 TR_FAS_FINISH_SELECT_RESET3_END,
3970 "fas_finish_select_end (ACTION_RESET3)");
3971 return (ACTION_RESET);
3972 }
3973
3974 /*
3975 * OR in common state...
3976 */
3977 sp->cmd_pkt->pkt_state |= (STATE_GOT_BUS|STATE_GOT_TARGET);
3978
3979 /*
3980 * data pointer initialization has already been done
3981 */
3982 New_state(fas, ACTS_UNKNOWN);
3983 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_ACTION3_END,
3984 "fas_finish_select_end (action3)");
3985 return (fas_handle_unknown(fas));
3986
3987 } else if (intr == FAS_INT_DISCON) {
3988 /*
3989 * make sure we negotiate when this target comes
3990 * on line later on
3991 */
3992 fas_force_renegotiation(fas, Tgt(sp));
3993
3994 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3995 sp->cmd_pkt->pkt_state |= STATE_GOT_BUS;
3996
3997 /*
3998 * Set the throttle to DRAIN_THROTTLE to make
3999 * sure any disconnected commands will get timed out
4000 * incase the drive dies
4001 */
4002
4003 if (fas->f_reset_delay[Tgt(sp)] == 0) {
4004 fas->f_throttle[sp->cmd_slot] = DRAIN_THROTTLE;
4005 }
4006
4007 fas_set_pkt_reason(fas, sp, CMD_INCOMPLETE, 0);
4008
4009 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_FINISH_END,
4010 "fas_finish_select_end (ACTION_FINISH)");
4011 return (ACTION_FINISH);
4012 } else {
4013 fas_printstate(fas, "undetermined selection failure");
4014 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET2_END,
4015 "fas_finish_select_end (ACTION_RESET2)");
4016 return (ACTION_RESET);
4017 }
4018 _NOTE(NOT_REACHED)
4019 /* NOTREACHED */
4020 }
4021
4022 /*
4023 * a selection got preempted by a reselection; shut down dma
4024 * and put back cmd in the ready queue unless NOINTR
4025 */
4026 static int
fas_reselect_preempt(struct fas * fas)4027 fas_reselect_preempt(struct fas *fas)
4028 {
4029 int rval;
4030
4031 /*
4032 * A reselection attempt glotzed our selection attempt.
4033 * we put request back in the ready queue
4034 */
4035 struct fas_cmd *sp = fas->f_current_sp;
4036
4037 /*
4038 * Shut off DMA gate array
4039 */
4040 FAS_FLUSH_DMA(fas);
4041
4042 /*
4043 * service the reconnect now and clean up later
4044 */
4045 New_state(fas, STATE_FREE);
4046 rval = fas_reconnect(fas);
4047
4048 /*
4049 * If selection for a non-tagged command is preempted, the
4050 * command could be stuck because throttle was set to DRAIN,
4051 * and a disconnected command timeout follows.
4052 */
4053 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0)
4054 fas->f_throttle[sp->cmd_slot] = 1;
4055
4056 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4057 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4058 }
4059
4060 /*
4061 * if we attempted to renegotiate on this cmd, undo this now
4062 */
4063 if (fas->f_wdtr_sent) {
4064 fas->f_wide_known &= ~(1<<Tgt(sp));
4065 fas->f_wdtr_sent = 0;
4066 }
4067 if (fas->f_sdtr_sent) {
4068 fas->f_sync_known &= ~(1<<Tgt(sp));
4069 fas->f_sdtr_sent = 0;
4070 }
4071
4072 fas_head_of_readyQ(fas, sp);
4073
4074 return (rval);
4075 }
4076
4077 /*
4078 * Handle the reconnection of a target
4079 */
4080 static int
fas_reconnect(struct fas * fas)4081 fas_reconnect(struct fas *fas)
4082 {
4083 volatile struct fasreg *fasreg = fas->f_reg;
4084 struct fas_cmd *sp = NULL;
4085 uchar_t target, lun;
4086 uchar_t tmp;
4087 uchar_t slot;
4088 char *bad_reselect = NULL;
4089
4090 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_START,
4091 "fas_reconnect_start");
4092 EPRINTF("fas_reconnect:\n");
4093
4094 fas_check_ncmds(fas);
4095
4096 switch (fas->f_state) {
4097 default:
4098 /*
4099 * Pick up target id from fifo
4100 *
4101 * There should only be the reselecting target's id
4102 * and an identify message in the fifo.
4103 */
4104 target = fas->f_fifo[0];
4105
4106 /*
4107 * we know the target so update period, conf3,
4108 * offset reg, if necessary, and accept the msg
4109 */
4110 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
4111
4112 /*
4113 * now we can accept the message. an untagged
4114 * target will go immediately into data phase so
4115 * the period/offset/conf3 registers need to be
4116 * updated before accepting the message
4117 */
4118 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4119
4120 if (fas->f_fifolen != 2) {
4121 bad_reselect = "bad reselect bytes";
4122 break;
4123 }
4124
4125 /*
4126 * normal initial reconnect; we get another interrupt later
4127 * for the tag
4128 */
4129 New_state(fas, ACTS_RESEL);
4130
4131 if (fas->f_stat & FAS_STAT_PERR) {
4132 break;
4133 }
4134
4135 /*
4136 * Check sanity of message.
4137 */
4138 tmp = fas->f_fifo[1];
4139 fas->f_last_msgin = tmp;
4140
4141 if (!(IS_IDENTIFY_MSG(tmp)) || (tmp & INI_CAN_DISCON)) {
4142 bad_reselect = "bad identify msg";
4143 break;
4144 }
4145
4146 lun = tmp & (NLUNS_PER_TARGET-1);
4147
4148 EPRINTF2("fas_reconnect: target=%x, idmsg=%x\n",
4149 target, tmp);
4150
4151 fas->f_resel_slot = slot = (target * NLUNS_PER_TARGET) | lun;
4152
4153 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
4154 (target & 0xf) | FAS_BUSID_ENCODID |
4155 FAS_BUSID_32BIT_COUNTER);
4156
4157 /*
4158 * If tag queueing in use, DMA in tag.
4159 * Otherwise, we're ready to go.
4160 * if tag 0 slot is non-empty, a non-tagged cmd is
4161 * reconnecting
4162 */
4163 if (TAGGED(target) && fas->f_tcmds[slot] &&
4164 (fas->f_active[slot]->f_slot[0] == NULL)) {
4165 volatile uchar_t *c =
4166 (uchar_t *)fas->f_cmdarea;
4167
4168 /*
4169 * If we've been doing tagged queueing and this
4170 * request doesn't do it,
4171 * maybe it was disabled for this one. This is rather
4172 * dangerous as it blows all pending tagged cmds away.
4173 * But if target is confused, then we'll blow up
4174 * shortly.
4175 */
4176 *c++ = INVALID_MSG;
4177 *c = INVALID_MSG;
4178
4179 FAS_DMA_WRITE_SETUP(fas, 2,
4180 fas->f_dmacookie.dmac_address);
4181
4182 /*
4183 * For tagged queuing, we should still be in msgin
4184 * phase.
4185 * If not, then either we aren't running tagged
4186 * queueing like we thought or the target died.
4187 */
4188 if (INTPENDING(fas) == 0) {
4189 EPRINTF1("slow reconnect, slot=%x\n", slot);
4190 TRACE_0(TR_FAC_SCSI_FAS,
4191 TR_FAS_RECONNECT_RETURN1_END,
4192 "fas_reconnect_end (_RETURN1)");
4193 return (ACTION_RETURN);
4194 }
4195
4196 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
4197 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
4198 if (fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET)) {
4199 return (fas_illegal_cmd_or_bus_reset(fas));
4200 }
4201
4202 if ((fas->f_stat & FAS_PHASE_MASK) !=
4203 FAS_PHASE_MSG_IN) {
4204 bad_reselect = "not in msgin phase";
4205 break;
4206 }
4207
4208 if (fas->f_intr & FAS_INT_DISCON) {
4209 bad_reselect = "unexpected bus free";
4210 break;
4211 }
4212 } else {
4213 fas->f_current_sp = sp = fas->f_active[slot]->f_slot[0];
4214 break;
4215 }
4216 /*FALLTHROUGH*/
4217
4218 case ACTS_RESEL:
4219 {
4220 volatile uchar_t *c =
4221 (uchar_t *)fas->f_cmdarea;
4222 struct f_slots *tag_slots;
4223 int id, tag;
4224 uint_t i;
4225
4226 slot = fas->f_resel_slot;
4227 target = slot/NLUNS_PER_TARGET;
4228
4229 if ((fas->f_stat & FAS_PHASE_MASK) !=
4230 FAS_PHASE_MSG_IN) {
4231 IPRINTF1("no tag for slot %x\n", slot);
4232 if (fas->f_intr & ~(FAS_INT_BUS |
4233 FAS_INT_FCMP)) {
4234 New_state(fas, ACTS_UNKNOWN);
4235 TRACE_0(TR_FAC_SCSI_FAS,
4236 TR_FAS_RECONNECT_PHASEMANAGE_END,
4237 "fas_reconnect_end (_PHASEMANAGE)");
4238 return (ACTION_PHASEMANAGE);
4239 } else {
4240 bad_reselect = "not in msgin phase";
4241 break;
4242 }
4243 }
4244 fas_reg_cmd_write(fas, CMD_TRAN_INFO|CMD_DMA);
4245 fas_dma_reg_write(fas, &fas->f_dma->dma_csr,
4246 fas->f_dma_csr);
4247
4248 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4249
4250 for (i = 0; i < (uint_t)RECONNECT_TAG_RCV_TIMEOUT;
4251 i++) {
4252 /*
4253 * timeout is not very accurate but this
4254 * should take no time at all
4255 */
4256 if (INTPENDING(fas)) {
4257 fas->f_stat = fas_reg_read(fas,
4258 (uchar_t *)&fas->f_reg->fas_stat);
4259 fas->f_intr = fas_reg_read(fas,
4260 (uchar_t *)&fas->f_reg->fas_intr);
4261 if (fas->f_intr & (FAS_INT_RESET |
4262 FAS_INT_ILLEGAL)) {
4263 return (
4264 fas_illegal_cmd_or_bus_reset
4265 (fas));
4266 }
4267 if (fas->f_intr & FAS_INT_FCMP) {
4268 break;
4269 }
4270 }
4271 }
4272
4273 if (i == (uint_t)RECONNECT_TAG_RCV_TIMEOUT) {
4274 bad_reselect = "timeout on receiving tag msg";
4275 break;
4276 }
4277
4278 FAS_FLUSH_DMA(fas);
4279
4280 /*
4281 * we should really do a sync here but that
4282 * hurts performance too much; we'll just hang
4283 * around till the tag byte flips
4284 * This is necessary on any system with an
4285 * XBox
4286 */
4287 if (*c == INVALID_MSG) {
4288 EPRINTF(
4289 "fas_reconnect: invalid msg, polling\n");
4290 for (i = 0; i < 1000000; i++) {
4291 if (*c != INVALID_MSG)
4292 break;
4293 }
4294 }
4295
4296 if (fas->f_stat & FAS_STAT_PERR) {
4297 break;
4298 }
4299
4300 if ((fas->f_stat & FAS_STAT_XZERO) == 0 ||
4301 (id = *c++) < MSG_SIMPLE_QTAG ||
4302 id > MSG_ORDERED_QTAG) {
4303 /*
4304 * Target agreed to do tagged queueing
4305 * and lied!
4306 * This problem implies the drive firmware is
4307 * broken.
4308 */
4309 bad_reselect = "botched tag";
4310 break;
4311 }
4312 tag = *c;
4313
4314 /* Set ptr to reconnecting scsi pkt */
4315 tag_slots = fas->f_active[slot];
4316 if (tag_slots != NULL) {
4317 sp = tag_slots->f_slot[tag];
4318 } else {
4319 bad_reselect = "Invalid tag";
4320 break;
4321 }
4322
4323 fas->f_current_sp = sp;
4324 }
4325 }
4326
4327 if (fas->f_stat & FAS_STAT_PERR) {
4328 sp = NULL;
4329 bad_reselect = "Parity error in reconnect msg's";
4330 }
4331
4332 if ((sp == NULL ||
4333 #ifdef FAS_TEST
4334 (fas_atest_reconn & (1<<Tgt(sp))) ||
4335 #endif
4336 (sp->cmd_flags & (CFLAG_CMDDISC|CFLAG_CMDPROXY)) == 0)) {
4337 /*
4338 * this shouldn't really happen, so it is better
4339 * to reset the bus; some disks accept the abort
4340 * and then still reconnect
4341 */
4342 if (bad_reselect == NULL) {
4343 bad_reselect = "no command";
4344 }
4345 #ifdef FAS_TEST
4346 if (sp && !(fas_atest_reconn & (1<<Tgt(sp))) &&
4347 fas_test_stop) {
4348 debug_enter("bad reconnect");
4349 } else {
4350 fas_atest_reconn = 0;
4351 }
4352 #endif
4353 goto bad;
4354
4355 /*
4356 * XXX remove this case or make it an ASSERT
4357 */
4358 } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
4359 /*
4360 * If we got here, we were already attempting to
4361 * run a polled proxy command for this target.
4362 * Set ATN and, copy in the message, and drive
4363 * on (ignoring any parity error on the identify).
4364 */
4365 IPRINTF1("fas_reconnect: fielding proxy cmd for %d\n",
4366 target);
4367 fas_assert_atn(fas);
4368 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
4369 tmp = 0;
4370 while (tmp < fas->f_omsglen) {
4371 fas->f_cur_msgout[tmp] =
4372 sp->cmd_cdb[FAS_PROXY_DATA+1+tmp];
4373 tmp++;
4374 }
4375 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
4376
4377 /*
4378 * pretend that the disconnected cmd is still disconnected
4379 * (this prevents ndisc from going negative)
4380 */
4381 fas->f_ndisc++;
4382 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4383 ASSERT(fas->f_ncmds >= fas->f_ndisc);
4384 }
4385
4386 ASSERT(fas->f_resel_slot == slot);
4387 ASSERT(fas->f_ndisc > 0);
4388 fas->f_ndisc--;
4389 sp->cmd_flags &= ~CFLAG_CMDDISC;
4390 New_state(fas, ACTS_UNKNOWN);
4391
4392 /*
4393 * A reconnect may imply a restore pointers operation
4394 * Note that some older disks (Micropolis in Pbox) do not
4395 * send a save data ptr on disconnect if all data has been
4396 * xferred. So, we cannot restore ptrs yet here.
4397 */
4398 if ((sp->cmd_flags & CFLAG_DMAVALID) &&
4399 (sp->cmd_data_count != sp->cmd_saved_data_count)) {
4400 sp->cmd_flags |= CFLAG_RESTORE_PTRS;
4401 }
4402
4403 /*
4404 * Return to await the FUNCTION COMPLETE interrupt we
4405 * should get out of accepting the IDENTIFY message.
4406 */
4407 EPRINTF2("Reconnecting %d.%d\n", target, slot % NLUNS_PER_TARGET);
4408 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RETURN2_END,
4409 "fas_reconnect_end (_RETURN2)");
4410 return (ACTION_RETURN);
4411
4412 bad:
4413 if (sp && (fas->f_stat & FAS_STAT_PERR)) {
4414 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4415 }
4416 fas_log(fas, CE_WARN, "target %x: failed reselection (%s)",
4417 target, bad_reselect);
4418
4419 #ifdef FASDEBUG
4420 fas_printstate(fas, "failed reselection");
4421 #endif
4422 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RESET5_END,
4423 "fas_reconnect_end (_RESET5)");
4424 return (ACTION_RESET);
4425 }
4426
4427 /*
4428 * handle unknown bus phase
4429 * we don't know what to expect so check status register for current
4430 * phase
4431 */
4432 int
fas_handle_unknown(struct fas * fas)4433 fas_handle_unknown(struct fas *fas)
4434 {
4435 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_START,
4436 "fas_handle_unknown_start: fas 0x%p", fas);
4437 EPRINTF("fas_handle_unknown:\n");
4438
4439 if ((fas->f_intr & FAS_INT_DISCON) == 0) {
4440 /*
4441 * we call actions here rather than returning to phasemanage
4442 * (this is the most frequently called action)
4443 */
4444 switch (fas->f_stat & FAS_PHASE_MASK) {
4445 case FAS_PHASE_DATA_IN:
4446 case FAS_PHASE_DATA_OUT:
4447 New_state(fas, ACTS_DATA);
4448 TRACE_0(TR_FAC_SCSI_FAS,
4449 TR_FAS_HANDLE_UNKNOWN_PHASE_DATA_END,
4450 "fas_handle_unknown_end (phase_data)");
4451 return (fas_handle_data_start(fas));
4452
4453 case FAS_PHASE_MSG_OUT:
4454 New_state(fas, ACTS_MSG_OUT);
4455 TRACE_0(TR_FAC_SCSI_FAS,
4456 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_OUT_END,
4457 "fas_handle_unknown_end (phase_msg_out)");
4458 return (fas_handle_msg_out_start(fas));
4459
4460 case FAS_PHASE_MSG_IN:
4461 New_state(fas, ACTS_MSG_IN);
4462 TRACE_0(TR_FAC_SCSI_FAS,
4463 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_IN_END,
4464 "fas_handle_unknown_end (phase_msg_in)");
4465 return (fas_handle_msg_in_start(fas));
4466
4467 case FAS_PHASE_STATUS:
4468 fas_reg_cmd_write(fas, CMD_FLUSH);
4469 #ifdef FAS_TEST
4470 if (fas_ptest_status & (1<<Tgt(fas->f_current_sp))) {
4471 fas_assert_atn(fas);
4472 }
4473 #endif /* FAS_TEST */
4474
4475 fas_reg_cmd_write(fas, CMD_COMP_SEQ);
4476 New_state(fas, ACTS_C_CMPLT);
4477
4478 TRACE_0(TR_FAC_SCSI_FAS,
4479 TR_FAS_HANDLE_UNKNOWN_PHASE_STATUS_END,
4480 "fas_handle_unknown_end (phase_status)");
4481 return (fas_handle_c_cmplt(fas));
4482
4483 case FAS_PHASE_COMMAND:
4484 New_state(fas, ACTS_CMD_START);
4485 TRACE_0(TR_FAC_SCSI_FAS,
4486 TR_FAS_HANDLE_UNKNOWN_PHASE_CMD_END,
4487 "fas_handle_unknown_end (phase_cmd)");
4488 return (fas_handle_cmd_start(fas));
4489 }
4490
4491 fas_printstate(fas, "Unknown bus phase");
4492 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_RESET_END,
4493 "fas_handle_unknown_end (reset)");
4494 return (ACTION_RESET);
4495
4496 } else {
4497 /*
4498 * Okay. What to do now? Let's try (for the time being)
4499 * assuming that the target went south and dropped busy,
4500 * as a disconnect implies that either we received
4501 * a completion or a disconnect message, or that we
4502 * had sent an ABORT OPERATION or BUS DEVICE RESET
4503 * message. In either case, we expected the disconnect
4504 * and should have fielded it elsewhere.
4505 *
4506 * If we see a chip disconnect here, this is an unexpected
4507 * loss of BSY*. Clean up the state of the chip and return.
4508 *
4509 */
4510 int msgout = fas->f_cur_msgout[0];
4511 struct fas_cmd *sp = fas->f_current_sp;
4512 int target = Tgt(sp);
4513
4514 if (msgout == MSG_HEAD_QTAG || msgout == MSG_SIMPLE_QTAG) {
4515 msgout = fas->f_cur_msgout[2];
4516 }
4517 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4518 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4519 fas->f_cur_msgout[2], fas->f_last_msgout);
4520
4521 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG ||
4522 msgout == MSG_DEVICE_RESET) {
4523 IPRINTF2("Successful %s message to target %d\n",
4524 scsi_mname(msgout), Tgt(sp));
4525 if (sp->cmd_flags & CFLAG_CMDPROXY) {
4526 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
4527 }
4528 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
4529 fas->f_abort_msg_sent++;
4530 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4531 fas_set_pkt_reason(fas, sp,
4532 CMD_ABORTED, STAT_ABORTED);
4533 }
4534 } else if (msgout == MSG_DEVICE_RESET) {
4535 fas->f_reset_msg_sent++;
4536 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4537 fas_set_pkt_reason(fas, sp,
4538 CMD_RESET, STAT_DEV_RESET);
4539 }
4540 fas_force_renegotiation(fas, target);
4541 }
4542 } else {
4543 if ((fas->f_last_msgout == MSG_EXTENDED) &&
4544 (fas->f_last_msgin == MSG_REJECT)) {
4545 /*
4546 * the target rejected the negotiations,
4547 * so resubmit again (no_sync/no_wide
4548 * is now set)
4549 */
4550 New_state(fas, STATE_FREE);
4551 fas_reg_cmd_write(fas, CMD_EN_RESEL);
4552 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4553 fas_decrement_ncmds(fas, sp);
4554 fas_check_ncmds(fas);
4555 sp->cmd_flags &= ~CFLAG_TRANFLAG;
4556 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
4557 fas_check_ncmds(fas);
4558 TRACE_0(TR_FAC_SCSI_FAS,
4559 TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4560 "fas_handle_unknown_end (int_discon)");
4561 return (ACTION_SEARCH);
4562
4563 } else if (fas->f_last_msgout == MSG_EXTENDED) {
4564 /*
4565 * target dropped off the bus during
4566 * negotiations
4567 */
4568 fas_reset_sync_wide(fas);
4569 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
4570 }
4571
4572 fas_set_pkt_reason(fas, sp, CMD_UNX_BUS_FREE, 0);
4573 #ifdef FASDEBUG
4574 fas_printstate(fas, "unexpected bus free");
4575 #endif
4576 }
4577 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4578 "fas_handle_unknown_end (int_discon)");
4579 return (ACTION_FINISH);
4580 }
4581 _NOTE(NOT_REACHED)
4582 /* NOTREACHED */
4583 }
4584
4585 /*
4586 * handle target disconnecting
4587 */
4588 static int
fas_handle_clearing(struct fas * fas)4589 fas_handle_clearing(struct fas *fas)
4590 {
4591 struct fas_cmd *sp = fas->f_current_sp;
4592
4593 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_START,
4594 "fas_handle_clearing_start");
4595 EPRINTF("fas_handle_clearing:\n");
4596
4597 if (fas->f_laststate == ACTS_C_CMPLT ||
4598 fas->f_laststate == ACTS_MSG_IN_DONE) {
4599 if (INTPENDING(fas)) {
4600 volatile struct fasreg *fasreg = fas->f_reg;
4601
4602 fas->f_stat = fas_reg_read(fas,
4603 (uchar_t *)&fasreg->fas_stat);
4604 fas->f_intr = fas_reg_read(fas,
4605 (uchar_t *)&fasreg->fas_intr);
4606 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
4607 return (fas_illegal_cmd_or_bus_reset(fas));
4608 }
4609 } else {
4610 /*
4611 * change e_laststate for the next time around
4612 */
4613 fas->f_laststate = ACTS_CLEARING;
4614 TRACE_0(TR_FAC_SCSI_FAS,
4615 TR_FAS_HANDLE_CLEARING_RETURN1_END,
4616 "fas_handle_clearing_end (ACTION_RETURN1)");
4617 return (ACTION_RETURN);
4618 }
4619 }
4620
4621 if (fas->f_intr == FAS_INT_DISCON) {
4622 /*
4623 * At this point the FAS chip has disconnected. The bus should
4624 * be either quiet or someone may be attempting a reselection
4625 * of us (or somebody else). Call the routine that sets the
4626 * chip back to a correct and known state.
4627 * If the last message in was a disconnect, search
4628 * for new work to do, else return to call fas_finish()
4629 */
4630 fas->f_last_msgout = 0xff;
4631 fas->f_omsglen = 0;
4632 if (fas->f_last_msgin == MSG_DISCONNECT) {
4633
4634 fas_reg_cmd_write(fas, CMD_EN_RESEL);
4635
4636 New_state(fas, STATE_FREE);
4637
4638 ASSERT(fas->f_current_sp != NULL);
4639 EPRINTF2("disconnecting %d.%d\n", Tgt(sp), Lun(sp));
4640
4641 sp->cmd_pkt->pkt_statistics |= STAT_DISCON;
4642 sp->cmd_flags |= CFLAG_CMDDISC;
4643 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4644 fas->f_ndisc++;
4645 }
4646 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4647 ASSERT(fas->f_ncmds >= fas->f_ndisc);
4648
4649 fas->f_current_sp = NULL;
4650
4651 /*
4652 * start a cmd here to save time
4653 */
4654 if ((fas->f_ncmds > fas->f_ndisc) && fas_ustart(fas)) {
4655 TRACE_0(TR_FAC_SCSI_FAS,
4656 TR_FAS_HANDLE_CLEARING_RETURN2_END,
4657 "fas_handle_clearing_end (ACTION_RETURN2)");
4658 return (ACTION_RETURN);
4659 }
4660
4661
4662 TRACE_0(TR_FAC_SCSI_FAS,
4663 TR_FAS_HANDLE_CLEARING_RETURN3_END,
4664 "fas_handle_clearing_end (ACTION_RETURN3)");
4665 return (ACTION_RETURN);
4666 } else {
4667 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_END,
4668 "fas_handle_clearing_end");
4669 return (fas_finish(fas));
4670 }
4671 } else {
4672 /*
4673 * If the target didn't disconnect from the
4674 * bus, that is a gross fatal error.
4675 * XXX this can be caused by asserting ATN
4676 * XXX check bus phase and if msgout, send a message
4677 */
4678 fas_log(fas, CE_WARN,
4679 "Target %d didn't disconnect after sending %s",
4680 Tgt(sp), scsi_mname(fas->f_last_msgin));
4681
4682 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4683
4684 #ifdef FASDEBUG
4685 IPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4686 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4687 fas->f_cur_msgout[2], fas->f_last_msgout);
4688 IPRINTF1("last msgin=%x\n", fas->f_last_msgin);
4689 #endif
4690 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_ABORT_END,
4691 "fas_handle_clearing_end (ACTION_ABORT_CURCMD)");
4692 return (ACTION_ABORT_ALLCMDS);
4693 }
4694 }
4695
4696 /*
4697 * handle data phase start
4698 */
4699 static int
fas_handle_data_start(struct fas * fas)4700 fas_handle_data_start(struct fas *fas)
4701 {
4702 uint64_t end;
4703 uint32_t amt;
4704 struct fas_cmd *sp = fas->f_current_sp;
4705 int sending, phase;
4706
4707 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_START,
4708 "fas_handle_data_start");
4709 EPRINTF("fas_handle_data_start:\n");
4710
4711 if ((sp->cmd_flags & CFLAG_DMAVALID) == 0) {
4712 fas_printstate(fas, "unexpected data phase");
4713 bad:
4714 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4715
4716 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT1_END,
4717 "fas_handle_data_end (ACTION_ABORT_CURCMD1)");
4718 return (ACTION_ABORT_CURCMD);
4719 } else {
4720 sending = (sp->cmd_flags & CFLAG_DMASEND)? 1 : 0;
4721 }
4722
4723 if (sp->cmd_flags & CFLAG_RESTORE_PTRS) {
4724 if (fas_restore_pointers(fas, sp)) {
4725 return (ACTION_ABORT_CURCMD);
4726 }
4727 sp->cmd_flags &= ~CFLAG_RESTORE_PTRS;
4728 }
4729
4730 /*
4731 * And make sure our DMA pointers are in good shape.
4732 *
4733 * Because SCSI is SCSI, the current DMA pointer has got to be
4734 * greater than or equal to our DMA base address. All other cases
4735 * that might have affected this always set curaddr to be >=
4736 * to the DMA base address.
4737 */
4738 ASSERT(sp->cmd_cur_addr >= sp->cmd_dmacookie.dmac_address);
4739 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4740 (uint64_t)sp->cmd_dmacookie.dmac_size;
4741
4742 DPRINTF5(
4743 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%"
4744 PRIx64 ", nwin=%x\n",
4745 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
4746 sp->cmd_nwin);
4747 DPRINTF2("dmac_address = %x, dmac_size=%lx\n",
4748 sp->cmd_dmacookie.dmac_address, sp->cmd_dmacookie.dmac_size);
4749
4750 if (sp->cmd_cur_addr >= end) {
4751 if (fas_next_window(fas, sp, end)) {
4752 goto bad;
4753 }
4754 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4755 (uint64_t)sp->cmd_dmacookie.dmac_size;
4756 DPRINTF2("dmac_address=%x, dmac_size=%lx\n",
4757 sp->cmd_dmacookie.dmac_address,
4758 sp->cmd_dmacookie.dmac_size);
4759 }
4760
4761 amt = end - sp->cmd_cur_addr;
4762 if (fas->f_dma_attr->dma_attr_count_max < amt) {
4763 amt = fas->f_dma_attr->dma_attr_count_max;
4764 }
4765 DPRINTF3("amt=%x, end=%lx, cur_addr=%x\n", amt, end, sp->cmd_cur_addr);
4766
4767 #ifdef FASDEBUG
4768 /*
4769 * Make sure that we don't cross a boundary we can't handle
4770 */
4771 end = (uint64_t)sp->cmd_cur_addr + (uint64_t)amt - 1;
4772 if ((end & ~fas->f_dma_attr->dma_attr_seg) !=
4773 (sp->cmd_cur_addr & ~fas->f_dma_attr->dma_attr_seg)) {
4774 EPRINTF3("curaddr %x curaddr+amt %" PRIx64
4775 " cntr_max %" PRIx64 "\n",
4776 sp->cmd_cur_addr, end, fas->f_dma_attr->dma_attr_seg);
4777 amt = (end & ~fas->f_dma_attr->dma_attr_seg) - sp->cmd_cur_addr;
4778 if (amt == 0 || amt > fas->f_dma_attr->dma_attr_count_max) {
4779 fas_log(fas, CE_WARN, "illegal dma boundary? %x", amt);
4780 goto bad;
4781 }
4782 }
4783 #endif
4784
4785 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4786 (uint64_t)sp->cmd_dmacookie.dmac_size -
4787 (uint64_t)sp->cmd_cur_addr;
4788 if (amt > end) {
4789 EPRINTF4("ovflow amt %x s.b. %" PRIx64 " curaddr %x count %x\n",
4790 amt, end, sp->cmd_cur_addr, sp->cmd_dmacount);
4791 amt = (uint32_t)end;
4792 }
4793
4794 fas->f_lastcount = amt;
4795
4796 EPRINTF4("%d.%d cmd 0x%x to xfer %x\n", Tgt(sp), Lun(sp),
4797 sp->cmd_pkt->pkt_cdbp[0], amt);
4798
4799 phase = fas->f_stat & FAS_PHASE_MASK;
4800
4801 if ((phase == FAS_PHASE_DATA_IN) && !sending) {
4802 FAS_DMA_WRITE(fas, amt, sp->cmd_cur_addr,
4803 CMD_TRAN_INFO|CMD_DMA);
4804 } else if ((phase == FAS_PHASE_DATA_OUT) && sending) {
4805 FAS_DMA_READ(fas, amt, sp->cmd_cur_addr, amt,
4806 CMD_TRAN_INFO|CMD_DMA);
4807 } else {
4808 fas_log(fas, CE_WARN,
4809 "unwanted data xfer direction for Target %d", Tgt(sp));
4810 fas_set_pkt_reason(fas, sp, CMD_DMA_DERR, 0);
4811 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT2_END,
4812 "fas_handle_data_end (ACTION_ABORT_CURCMD2)");
4813 return (ACTION_ABORT_CURCMD);
4814 }
4815
4816 #ifdef FAS_TEST
4817 if (!sending && (fas_ptest_data_in & (1<<Tgt(sp)))) {
4818 fas_assert_atn(fas);
4819 }
4820 #endif /* FAS_TEST */
4821
4822 New_state(fas, ACTS_DATA_DONE);
4823
4824 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_END,
4825 "fas_handle_data_end (ACTION_RETURN)");
4826 return (ACTION_RETURN);
4827 }
4828
4829 static int
fas_handle_data_done(struct fas * fas)4830 fas_handle_data_done(struct fas *fas)
4831 {
4832 volatile struct fasreg *fasreg = fas->f_reg;
4833 volatile struct dma *dmar = fas->f_dma;
4834 struct fas_cmd *sp = fas->f_current_sp;
4835 uint32_t xfer_amt;
4836 char was_sending;
4837 uchar_t stat, fifoamt, tgt;
4838
4839 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_START,
4840 "fas_handle_data_done_start");
4841 EPRINTF("fas_handle_data_done\n");
4842
4843 tgt = Tgt(sp);
4844 stat = fas->f_stat;
4845 was_sending = (sp->cmd_flags & CFLAG_DMASEND) ? 1 : 0;
4846
4847 /*
4848 * Check for DMA errors (parity or memory fault)
4849 */
4850 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr)) &
4851 DMA_ERRPEND) {
4852 /*
4853 * It would be desirable to set the ATN* line and attempt to
4854 * do the whole schmear of INITIATOR DETECTED ERROR here,
4855 * but that is too hard to do at present.
4856 */
4857 fas_log(fas, CE_WARN, "Unrecoverable DMA error on dma %s",
4858 (was_sending) ? "send" : "receive");
4859 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4860 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4861 "fas_handle_data_done_end (ACTION_RESET)");
4862 return (ACTION_RESET);
4863 }
4864
4865 /*
4866 * Data Receive conditions:
4867 *
4868 * Check for parity errors. If we have a parity error upon
4869 * receive, the FAS chip has asserted ATN* for us already.
4870 */
4871 if (!was_sending) {
4872 #ifdef FAS_TEST
4873 if (fas_ptest_data_in & (1<<tgt)) {
4874 fas_ptest_data_in = 0;
4875 stat |= FAS_STAT_PERR;
4876 if (fas_test_stop > 1) {
4877 debug_enter("ptest_data_in");
4878 }
4879 }
4880 #endif /* FAS_TEST */
4881 if (stat & FAS_STAT_PERR) {
4882 fas_log(fas, CE_WARN,
4883 "SCSI bus DATA IN phase parity error");
4884 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
4885 fas->f_omsglen = 1;
4886 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4887 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
4888 }
4889 }
4890
4891 FAS_FLUSH_DMA(fas);
4892
4893 /*
4894 * Check to make sure we're still connected to the target.
4895 * If the target dropped the bus, that is a fatal error.
4896 * We don't even attempt to count what we were transferring
4897 * here. Let fas_handle_unknown clean up for us.
4898 */
4899 if (fas->f_intr != FAS_INT_BUS) {
4900 New_state(fas, ACTS_UNKNOWN);
4901 TRACE_0(TR_FAC_SCSI_FAS,
4902 TR_FAS_HANDLE_DATA_DONE_PHASEMANAGE_END,
4903 "fas_handle_data_done_end (ACTION_PHASEMANAGE)");
4904 return (ACTION_PHASEMANAGE);
4905 }
4906
4907 /*
4908 * Figure out how far we got.
4909 * Latch up fifo amount first and double if wide has been enabled
4910 */
4911 fifoamt = FIFO_CNT(fas);
4912 if (fas->f_wide_enabled & (1<<tgt)) {
4913 fifoamt = fifoamt << 1;
4914 }
4915
4916 if (stat & FAS_STAT_XZERO) {
4917 xfer_amt = fas->f_lastcount;
4918 } else {
4919 GET_FAS_COUNT(fasreg, xfer_amt);
4920 xfer_amt = fas->f_lastcount - xfer_amt;
4921 }
4922 DPRINTF4("fifoamt=%x, xfer_amt=%x, lastcount=%x, stat=%x\n",
4923 fifoamt, xfer_amt, fas->f_lastcount, stat);
4924
4925
4926 /*
4927 * Unconditionally knock off by the amount left
4928 * in the fifo if we were sending out the SCSI bus.
4929 *
4930 * If we were receiving from the SCSI bus, believe
4931 * what the chip told us (either XZERO or by the
4932 * value calculated from the counter register).
4933 * The reason we don't look at the fifo for
4934 * incoming data is that in synchronous mode
4935 * the fifo may have further data bytes, and
4936 * for async mode we assume that all data in
4937 * the fifo will have been transferred before
4938 * the fas asserts an interrupt.
4939 */
4940 if (was_sending) {
4941 xfer_amt -= fifoamt;
4942 }
4943
4944 #ifdef FASDEBUG
4945 {
4946 int phase = stat & FAS_PHASE_MASK;
4947 fas->f_stat2 = fas_reg_read(fas,
4948 (uchar_t *)&fasreg->fas_stat2);
4949
4950 if (((fas->f_stat & FAS_STAT_XZERO) == 0) &&
4951 (phase != FAS_PHASE_DATA_IN) &&
4952 (phase != FAS_PHASE_DATA_OUT) &&
4953 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
4954 fas_log(fas, CE_WARN,
4955 "input shuttle not empty at end of data phase");
4956 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4957 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4958 "fas_handle_data_done_end (ACTION_RESET)");
4959 return (ACTION_RESET);
4960 }
4961 }
4962 #endif /* FASDEBUG */
4963
4964 /*
4965 * If this was a synchronous transfer, flag it.
4966 * Also check for the errata condition of long
4967 * last REQ/ pulse for some synchronous targets
4968 */
4969 if (fas->f_offset[tgt]) {
4970 /*
4971 * flag that a synchronous data xfer took place
4972 */
4973 sp->cmd_pkt->pkt_statistics |= STAT_SYNC;
4974
4975 if (was_sending)
4976 fas_reg_cmd_write(fas, CMD_FLUSH);
4977 } else {
4978 /*
4979 * If we aren't doing Synchronous Data Transfers,
4980 * definitely offload the fifo.
4981 */
4982 fas_reg_cmd_write(fas, CMD_FLUSH);
4983 }
4984
4985 /*
4986 * adjust pointers...
4987 */
4988 DPRINTF3("before:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4989 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4990 sp->cmd_data_count += xfer_amt;
4991 sp->cmd_cur_addr += xfer_amt;
4992 sp->cmd_pkt->pkt_state |= STATE_XFERRED_DATA;
4993 New_state(fas, ACTS_UNKNOWN);
4994 DPRINTF3("after:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4995 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4996
4997 stat &= FAS_PHASE_MASK;
4998 if (stat == FAS_PHASE_DATA_IN || stat == FAS_PHASE_DATA_OUT) {
4999 fas->f_state = ACTS_DATA;
5000 TRACE_0(TR_FAC_SCSI_FAS,
5001 TR_FAS_HANDLE_DATA_DONE_ACTION1_END,
5002 "fas_handle_data_done_end (action1)");
5003 return (fas_handle_data_start(fas));
5004 }
5005
5006 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_ACTION2_END,
5007 "fas_handle_data_done_end (action2)");
5008 return (fas_handle_unknown(fas));
5009 }
5010
5011 static char msginperr[] = "SCSI bus MESSAGE IN phase parity error";
5012
5013 static int
fas_handle_c_cmplt(struct fas * fas)5014 fas_handle_c_cmplt(struct fas *fas)
5015 {
5016 struct fas_cmd *sp = fas->f_current_sp;
5017 volatile struct fasreg *fasreg = fas->f_reg;
5018 uchar_t sts, msg, intr, perr;
5019
5020 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_START,
5021 "fas_handle_c_cmplt_start");
5022 EPRINTF("fas_handle_c_cmplt:\n");
5023
5024
5025 /*
5026 * if target is fast, we can get cmd. completion by the time we get
5027 * here. Otherwise, we'll have to taken an interrupt.
5028 */
5029 if (fas->f_laststate == ACTS_UNKNOWN) {
5030 if (INTPENDING(fas)) {
5031 fas->f_stat = fas_reg_read(fas,
5032 (uchar_t *)&fasreg->fas_stat);
5033 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
5034 fas->f_intr = intr;
5035 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5036 return (fas_illegal_cmd_or_bus_reset(fas));
5037 }
5038 } else {
5039 /*
5040 * change f_laststate for the next time around
5041 */
5042 fas->f_laststate = ACTS_C_CMPLT;
5043 TRACE_0(TR_FAC_SCSI_FAS,
5044 TR_FAS_HANDLE_C_CMPLT_RETURN1_END,
5045 "fas_handle_c_cmplt_end (ACTION_RETURN1)");
5046 return (ACTION_RETURN);
5047 }
5048 } else {
5049 intr = fas->f_intr;
5050 }
5051
5052 #ifdef FAS_TEST
5053 if (fas_ptest_status & (1<<Tgt(sp))) {
5054 fas_ptest_status = 0;
5055 fas->f_stat |= FAS_STAT_PERR;
5056 if (fas_test_stop > 1) {
5057 debug_enter("ptest_status");
5058 }
5059 } else if ((fas_ptest_msgin & (1<<Tgt(sp))) && fas_ptest_msg == 0) {
5060 fas_ptest_msgin = 0;
5061 fas_ptest_msg = -1;
5062 fas->f_stat |= FAS_STAT_PERR;
5063 if (fas_test_stop > 1) {
5064 debug_enter("ptest_completion");
5065 }
5066 }
5067 #endif /* FAS_TEST */
5068
5069 if (intr == FAS_INT_DISCON) {
5070 New_state(fas, ACTS_UNKNOWN);
5071 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION1_END,
5072 "fas_handle_c_cmplt_end (action1)");
5073 return (fas_handle_unknown(fas));
5074 }
5075
5076 if ((perr = (fas->f_stat & FAS_STAT_PERR)) != 0) {
5077 fas_assert_atn(fas);
5078 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5079 }
5080
5081 /*
5082 * do a msg accept now and read the fifo data
5083 */
5084 if (intr & FAS_INT_FCMP) {
5085 /*
5086 * The FAS manuals state that this sequence completes
5087 * with a BUS SERVICE interrupt if just the status
5088 * byte was received, else a FUNCTION COMPLETE interrupt
5089 * if both status and a message was received.
5090 *
5091 * if we give the MSG_ACT before reading the msg byte
5092 * we get the status byte again and if the status is zero
5093 * then we won't detect a failure
5094 */
5095 *(sp->cmd_pkt->pkt_scbp) =
5096 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5097 fas->f_last_msgin = fas->f_imsgarea[0] =
5098 msg = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5099
5100 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5101 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5102
5103 /*
5104 * The manuals also state that ATN* is asserted if
5105 * bad parity is detected.
5106 *
5107 * The one case that we cannot handle is where we detect
5108 * bad parity for the status byte, but the target refuses
5109 * to go to MESSAGE OUT phase right away. This means that
5110 * if that happens, we will misconstrue the parity error
5111 * to be for the completion message, not the status byte.
5112 */
5113 if (perr) {
5114 fas_log(fas, CE_WARN, msginperr);
5115 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5116
5117 fas->f_cur_msgout[0] = MSG_MSG_PARITY;
5118 fas->f_omsglen = 1;
5119 New_state(fas, ACTS_UNKNOWN);
5120 TRACE_0(TR_FAC_SCSI_FAS,
5121 TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5122 "fas_handle_c_cmplt_end (action5)");
5123 return (ACTION_RETURN);
5124 }
5125
5126 } else if (intr == FAS_INT_BUS) {
5127 /*
5128 * We only got the status byte.
5129 */
5130 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5131 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5132 *(sp->cmd_pkt->pkt_scbp) = sts;
5133 msg = INVALID_MSG;
5134
5135 IPRINTF1("fas_handle_cmd_cmplt: sts=%x, no msg byte\n", sts);
5136
5137 if (perr) {
5138 /*
5139 * If we get a parity error on a status byte
5140 * assume that it was a CHECK CONDITION
5141 */
5142 sts = STATUS_CHECK;
5143 fas_log(fas, CE_WARN,
5144 "SCSI bus STATUS phase parity error");
5145 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
5146 fas->f_omsglen = 1;
5147 New_state(fas, ACTS_UNKNOWN);
5148 TRACE_0(TR_FAC_SCSI_FAS,
5149 TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5150 "fas_handle_c_cmplt_end (action5)");
5151 return (fas_handle_unknown(fas));
5152 }
5153
5154 } else {
5155 msg = sts = INVALID_MSG;
5156 IPRINTF("fas_handle_cmd_cmplt: unexpected intr\n");
5157 New_state(fas, ACTS_UNKNOWN);
5158 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION2_END,
5159 "fas_handle_c_cmplt_end (action2)");
5160 return (fas_handle_unknown(fas));
5161 }
5162
5163 EPRINTF2("fas_handle_c_cmplt: status=%x, msg=%x\n", sts, msg);
5164
5165 EPRINTF1("Completion Message=%s\n", scsi_mname(msg));
5166 if (msg == MSG_COMMAND_COMPLETE) {
5167 /*
5168 * Actually, if the message was a 'linked command
5169 * complete' message, the target isn't going to be
5170 * clearing the bus.
5171 */
5172 New_state(fas, ACTS_CLEARING);
5173 TRACE_0(TR_FAC_SCSI_FAS,
5174 TR_FAS_HANDLE_C_CMPLT_ACTION4_END,
5175 "fas_handle_c_cmplt_end (action4)");
5176 return (fas_handle_clearing(fas));
5177 } else {
5178 fas->f_imsglen = 1;
5179 fas->f_imsgindex = 1;
5180 New_state(fas, ACTS_MSG_IN_DONE);
5181 TRACE_0(TR_FAC_SCSI_FAS,
5182 TR_FAS_HANDLE_C_CMPLT_ACTION3_END,
5183 "fas_handle_c_cmplt_end (action3)");
5184 return (fas_handle_msg_in_done(fas));
5185 }
5186 }
5187
5188 /*
5189 * prepare for accepting a message byte from the fifo
5190 */
5191 static int
fas_handle_msg_in_start(struct fas * fas)5192 fas_handle_msg_in_start(struct fas *fas)
5193 {
5194 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_START,
5195 "fas_handle_msg_in_start");
5196 EPRINTF("fas_handle_msg_in_start\n");
5197
5198 /*
5199 * Pick up a message byte.
5200 * Clear the FIFO so we
5201 * don't get confused.
5202 */
5203 if (!FIFO_EMPTY(fas)) {
5204 fas_reg_cmd_write(fas, CMD_FLUSH);
5205 }
5206 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5207 fas->f_imsglen = 1;
5208 fas->f_imsgindex = 0;
5209 New_state(fas, ACTS_MSG_IN_DONE);
5210
5211 /*
5212 * give a little extra time by returning to phasemanage
5213 */
5214 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_END,
5215 "fas_handle_msg_in_end (ACTION_PHASEMANAGE)");
5216 return (ACTION_PHASEMANAGE);
5217 }
5218
5219 /*
5220 * We come here after issuing a MSG_ACCEPT
5221 * command and are expecting more message bytes.
5222 * The FAS should be asserting a BUS SERVICE
5223 * interrupt status, but may have asserted
5224 * a different interrupt in the case that
5225 * the target disconnected and dropped BSY*.
5226 *
5227 * In the case that we are eating up message
5228 * bytes (and throwing them away unread) because
5229 * we have ATN* asserted (we are trying to send
5230 * a message), we do not consider it an error
5231 * if the phase has changed out of MESSAGE IN.
5232 */
5233 static int
fas_handle_more_msgin(struct fas * fas)5234 fas_handle_more_msgin(struct fas *fas)
5235 {
5236 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_START,
5237 "fas_handle_more_msgin_start");
5238 EPRINTF("fas_handle_more_msgin\n");
5239
5240 if (fas->f_intr & FAS_INT_BUS) {
5241 if ((fas->f_stat & FAS_PHASE_MASK) == FAS_PHASE_MSG_IN) {
5242 /*
5243 * Fetch another byte of a message in.
5244 */
5245 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5246 New_state(fas, ACTS_MSG_IN_DONE);
5247 TRACE_0(TR_FAC_SCSI_FAS,
5248 TR_FAS_HANDLE_MORE_MSGIN_RETURN1_END,
5249 "fas_handle_more_msgin_end (ACTION_RETURN)");
5250 return (ACTION_RETURN);
5251 }
5252
5253 /*
5254 * If we were gobbling up a message and we have
5255 * changed phases, handle this silently, else
5256 * complain. In either case, we return to let
5257 * fas_phasemanage() handle things.
5258 *
5259 * If it wasn't a BUS SERVICE interrupt,
5260 * let fas_phasemanage() find out if the
5261 * chip disconnected.
5262 */
5263 if (fas->f_imsglen != 0) {
5264 fas_log(fas, CE_WARN,
5265 "Premature end of extended message");
5266 }
5267 }
5268 New_state(fas, ACTS_UNKNOWN);
5269 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_RETURN2_END,
5270 "fas_handle_more_msgin_end (action)");
5271 return (fas_handle_unknown(fas));
5272 }
5273
5274 static int
fas_handle_msg_in_done(struct fas * fas)5275 fas_handle_msg_in_done(struct fas *fas)
5276 {
5277 struct fas_cmd *sp = fas->f_current_sp;
5278 volatile struct fasreg *fasreg = fas->f_reg;
5279 int sndmsg = 0;
5280 uchar_t msgin;
5281
5282 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_START,
5283 "fas_handle_msg_in_done_start");
5284 EPRINTF("fas_handle_msg_in_done:\n");
5285 if (fas->f_laststate == ACTS_MSG_IN) {
5286 if (INTPENDING(fas)) {
5287 fas->f_stat = fas_reg_read(fas,
5288 (uchar_t *)&fasreg->fas_stat);
5289 fas->f_stat2 = fas_reg_read(fas,
5290 (uchar_t *)&fasreg->fas_stat2);
5291
5292 fas_read_fifo(fas);
5293
5294 fas->f_intr = fas_reg_read(fas,
5295 (uchar_t *)&fasreg->fas_intr);
5296 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5297 return (fas_illegal_cmd_or_bus_reset(fas));
5298 }
5299 } else {
5300 /*
5301 * change f_laststate for the next time around
5302 */
5303 fas->f_laststate = ACTS_MSG_IN_DONE;
5304 TRACE_0(TR_FAC_SCSI_FAS,
5305 TR_FAS_HANDLE_MSG_IN_DONE_RETURN1_END,
5306 "fas_handle_msg_in_done_end (ACTION_RETURN1)");
5307 return (ACTION_RETURN);
5308 }
5309 }
5310
5311 /*
5312 * the most common case is a disconnect message. we do
5313 * a fast path for this condition and if it fails then
5314 * we go for the detailed error handling
5315 */
5316 #ifndef FAS_TEST
5317 if (((fas->f_laststate == ACTS_MSG_IN) ||
5318 (fas->f_laststate == ACTS_MSG_IN_DONE)) &&
5319 ((fas->f_intr & FAS_INT_DISCON) == 0) &&
5320 ((fas->f_stat & FAS_STAT_PERR) == 0) &&
5321 ((sp->cmd_pkt_flags & FLAG_NODISCON) == 0)) {
5322
5323 if ((fas->f_fifolen == 1) &&
5324 (fas->f_imsglen == 1) &&
5325 (fas->f_fifo[0] == MSG_DISCONNECT)) {
5326
5327 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5328 fas->f_imsgarea[fas->f_imsgindex++] = fas->f_fifo[0];
5329 fas->f_last_msgin = MSG_DISCONNECT;
5330 New_state(fas, ACTS_CLEARING);
5331
5332 TRACE_0(TR_FAC_SCSI_FAS,
5333 TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5334 "fas_handle_msg_in_done_end (action)");
5335
5336 return (fas_handle_clearing(fas));
5337 }
5338 }
5339 #endif /* not FAS_TEST */
5340
5341 /*
5342 * We can be called here for both the case where
5343 * we had requested the FAS chip to fetch a message
5344 * byte from the target (at the target's request).
5345 * We can also be called in the case where we had
5346 * been using the CMD_COMP_SEQ command to pick up
5347 * both a status byte and a completion message from
5348 * a target, but where the message wasn't one of
5349 * COMMAND COMPLETE, LINKED COMMAND COMPLETE, or
5350 * LINKED COMMAND COMPLETE (with flag). This is a
5351 * legal (albeit extremely unusual) SCSI bus trans-
5352 * -ition, so we have to handle it.
5353 */
5354 if (fas->f_laststate != ACTS_C_CMPLT) {
5355 #ifdef FAS_TEST
5356 reloop:
5357 #endif /* FAS_TEST */
5358
5359 if (fas->f_intr & FAS_INT_DISCON) {
5360 fas_log(fas, CE_WARN,
5361 "premature end of input message");
5362 New_state(fas, ACTS_UNKNOWN);
5363 TRACE_0(TR_FAC_SCSI_FAS,
5364 TR_FAS_HANDLE_MSG_IN_DONE_PHASEMANAGE_END,
5365 "fas_handle_msg_in_done_end (ACTION_PHASEMANAGE)");
5366 return (ACTION_PHASEMANAGE);
5367 }
5368
5369 /*
5370 * Note that if f_imsglen is zero, then we are skipping
5371 * input message bytes, so there is no reason to look for
5372 * parity errors.
5373 */
5374 if (fas->f_imsglen != 0 && (fas->f_stat & FAS_STAT_PERR)) {
5375 fas_log(fas, CE_WARN, msginperr);
5376 sndmsg = MSG_MSG_PARITY;
5377 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5378 fas_reg_cmd_write(fas, CMD_FLUSH);
5379
5380 } else if ((msgin = fas->f_fifolen) != 1) {
5381
5382 /*
5383 * If we have got more than one or 0 bytes in the fifo,
5384 * that is a gross screwup, and we should let the
5385 * target know that we have completely fouled up.
5386 */
5387 fas_printf(fas, "fifocount=%x", msgin);
5388 fas_printstate(fas, "input message botch");
5389 sndmsg = MSG_INITIATOR_ERROR;
5390 fas_reg_cmd_write(fas, CMD_FLUSH);
5391 fas_log(fas, CE_WARN, "input message botch");
5392
5393 } else if (fas->f_imsglen == 0) {
5394 /*
5395 * If we are in the middle of gobbling up and throwing
5396 * away a message (due to a previous message input
5397 * error), drive on.
5398 */
5399 msgin = fas_reg_read(fas,
5400 (uchar_t *)&fasreg->fas_fifo_data);
5401 New_state(fas, ACTS_MSG_IN_MORE);
5402
5403 } else {
5404 msgin = fas->f_fifo[0];
5405 fas->f_imsgarea[fas->f_imsgindex++] = msgin;
5406 }
5407
5408 } else {
5409 /*
5410 * In this case, we have been called (from
5411 * fas_handle_c_cmplt()) with the message
5412 * already stored in the message array.
5413 */
5414 msgin = fas->f_imsgarea[0];
5415 }
5416
5417 /*
5418 * Process this message byte (but not if we are
5419 * going to be trying to send back some error
5420 * anyway)
5421 */
5422 if (sndmsg == 0 && fas->f_imsglen != 0) {
5423
5424 if (fas->f_imsgindex < fas->f_imsglen) {
5425
5426 EPRINTF2("message byte %d: 0x%x\n",
5427 fas->f_imsgindex-1,
5428 fas->f_imsgarea[fas->f_imsgindex-1]);
5429
5430 New_state(fas, ACTS_MSG_IN_MORE);
5431
5432 } else if (fas->f_imsglen == 1) {
5433
5434 #ifdef FAS_TEST
5435 if ((fas_ptest_msgin & (1<<Tgt(sp))) &&
5436 fas_ptest_msg == msgin) {
5437 fas_ptest_msgin = 0;
5438 fas_ptest_msg = -1;
5439 fas_assert_atn(fas);
5440 fas->f_stat |= FAS_STAT_PERR;
5441 fas->f_imsgindex -= 1;
5442 if (fas_test_stop > 1) {
5443 debug_enter("ptest msgin");
5444 }
5445 goto reloop;
5446 }
5447 #endif /* FAS_TEST */
5448
5449 sndmsg = fas_onebyte_msg(fas);
5450
5451 } else if (fas->f_imsglen == 2) {
5452 #ifdef FAS_TEST
5453 if (fas_ptest_emsgin & (1<<Tgt(sp))) {
5454 fas_ptest_emsgin = 0;
5455 fas_assert_atn(fas);
5456 fas->f_stat |= FAS_STAT_PERR;
5457 fas->f_imsgindex -= 1;
5458 if (fas_test_stop > 1) {
5459 debug_enter("ptest emsgin");
5460 }
5461 goto reloop;
5462 }
5463 #endif /* FAS_TEST */
5464
5465 if (fas->f_imsgarea[0] == MSG_EXTENDED) {
5466 static char *tool =
5467 "Extended message 0x%x is too long";
5468
5469 /*
5470 * Is the incoming message too long
5471 * to be stored in our local array?
5472 */
5473 if ((int)(msgin+2) > IMSGSIZE) {
5474 fas_log(fas, CE_WARN,
5475 tool, fas->f_imsgarea[0]);
5476 sndmsg = MSG_REJECT;
5477 } else {
5478 fas->f_imsglen = msgin + 2;
5479 New_state(fas, ACTS_MSG_IN_MORE);
5480 }
5481 } else {
5482 sndmsg = fas_twobyte_msg(fas);
5483 }
5484
5485 } else {
5486 sndmsg = fas_multibyte_msg(fas);
5487 }
5488 }
5489
5490 if (sndmsg < 0) {
5491 /*
5492 * If sndmsg is less than zero, one of the subsidiary
5493 * routines needs to return some other state than
5494 * ACTION_RETURN.
5495 */
5496 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_SNDMSG_END,
5497 "fas_handle_msg_in_done_end (-sndmsg)");
5498 return (-sndmsg);
5499
5500 } else if (sndmsg > 0) {
5501 if (IS_1BYTE_MSG(sndmsg)) {
5502 fas->f_omsglen = 1;
5503 }
5504 fas->f_cur_msgout[0] = (uchar_t)sndmsg;
5505
5506 /*
5507 * The target is not guaranteed to go to message out
5508 * phase, period. Moreover, until the entire incoming
5509 * message is transferred, the target may (and likely
5510 * will) continue to transfer message bytes (which
5511 * we will have to ignore).
5512 *
5513 * In order to do this, we'll go to 'infinite'
5514 * message in handling by setting the current input
5515 * message length to a sentinel of zero.
5516 *
5517 * This works regardless of the message we are trying
5518 * to send out. At the point in time which we want
5519 * to send a message in response to an incoming message
5520 * we do not care any more about the incoming message.
5521 *
5522 * If we are sending a message in response to detecting
5523 * a parity error on input, the FAS chip has already
5524 * set ATN* for us, but it doesn't hurt to set it here
5525 * again anyhow.
5526 */
5527 fas_assert_atn(fas);
5528 New_state(fas, ACTS_MSG_IN_MORE);
5529 fas->f_imsglen = 0;
5530 }
5531
5532 fas_reg_cmd_write(fas, CMD_FLUSH);
5533
5534 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5535
5536 if ((fas->f_laststate == ACTS_MSG_IN_DONE) &&
5537 (fas->f_state == ACTS_CLEARING)) {
5538 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5539 "fas_handle_msg_in_done_end (action)");
5540 return (fas_handle_clearing(fas));
5541 }
5542 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_RETURN2_END,
5543 "fas_handle_msg_in_done_end (ACTION_RETURN2)");
5544 return (ACTION_RETURN);
5545 }
5546
5547 static int
fas_onebyte_msg(struct fas * fas)5548 fas_onebyte_msg(struct fas *fas)
5549 {
5550 struct fas_cmd *sp = fas->f_current_sp;
5551 int msgout = 0;
5552 uchar_t msgin = fas->f_last_msgin = fas->f_imsgarea[0];
5553 int tgt = Tgt(sp);
5554
5555 EPRINTF("fas_onebyte_msg\n");
5556
5557 if (msgin & MSG_IDENTIFY) {
5558 /*
5559 * How did we get here? We should only see identify
5560 * messages on a reconnection, but we'll handle this
5561 * fine here (just in case we get this) as long as
5562 * we believe that this is a valid identify message.
5563 *
5564 * For this to be a valid incoming message,
5565 * bits 6-4 must must be zero. Also, the
5566 * bit that says that I'm an initiator and
5567 * can support disconnection cannot possibly
5568 * be set here.
5569 */
5570
5571 char garbled = ((msgin & (BAD_IDENTIFY|INI_CAN_DISCON)) != 0);
5572
5573 fas_log(fas, CE_WARN, "%s message 0x%x from Target %d",
5574 garbled ? "Garbled" : "Identify", msgin, tgt);
5575
5576 if (garbled) {
5577 /*
5578 * If it's a garbled message,
5579 * try and tell the target...
5580 */
5581 msgout = MSG_INITIATOR_ERROR;
5582 } else {
5583 New_state(fas, ACTS_UNKNOWN);
5584 }
5585 return (msgout);
5586
5587 } else if (IS_2BYTE_MSG(msgin) || IS_EXTENDED_MSG(msgin)) {
5588 fas->f_imsglen = 2;
5589 New_state(fas, ACTS_MSG_IN_MORE);
5590 return (0);
5591 }
5592
5593 New_state(fas, ACTS_UNKNOWN);
5594
5595 switch (msgin) {
5596 case MSG_DISCONNECT:
5597 /*
5598 * If we 'cannot' disconnect- reject this message.
5599 * Note that we only key off of the pkt_flags here-
5600 * the FLAG_NODISCON was set in fas_accept_pkt() if
5601 * no disconnect was enabled in scsi_options
5602 */
5603 if (sp->cmd_pkt_flags & FLAG_NODISCON) {
5604 msgout = MSG_REJECT;
5605 break;
5606 }
5607 /* FALLTHROUGH */
5608 case MSG_COMMAND_COMPLETE:
5609 fas->f_state = ACTS_CLEARING;
5610 break;
5611
5612 case MSG_NOP:
5613 break;
5614
5615 /* XXX Make it a MSG_REJECT handler */
5616 case MSG_REJECT:
5617 {
5618 uchar_t reason = 0;
5619 uchar_t lastmsg = fas->f_last_msgout;
5620 /*
5621 * The target is rejecting the last message we sent.
5622 *
5623 * If the last message we attempted to send out was an
5624 * extended message, we were trying to negotiate sync
5625 * xfers- and we're okay.
5626 *
5627 * Otherwise, a target has rejected a message that
5628 * it should have handled. We will abort the operation
5629 * in progress and set the pkt_reason value here to
5630 * show why we have completed. The process of aborting
5631 * may be via a message or may be via a bus reset (as
5632 * a last resort).
5633 */
5634 msgout = (TAGGED(tgt)? MSG_ABORT_TAG : MSG_ABORT);
5635
5636 switch (lastmsg) {
5637 case MSG_EXTENDED:
5638 if (fas->f_wdtr_sent) {
5639 /*
5640 * Disable wide, Target rejected
5641 * out WDTR message
5642 */
5643 fas_set_wide_conf3(fas, tgt, 0);
5644 fas->f_nowide |= (1<<tgt);
5645 fas->f_wdtr_sent = 0;
5646 /*
5647 * we still want to negotiate sync
5648 */
5649 if ((fas->f_nosync & (1<<tgt)) == 0) {
5650 fas_assert_atn(fas);
5651 fas_make_sdtr(fas, 0, tgt);
5652 }
5653 } else if (fas->f_sdtr_sent) {
5654 fas_reg_cmd_write(fas, CMD_CLR_ATN);
5655 fas_revert_to_async(fas, tgt);
5656 fas->f_nosync |= (1<<tgt);
5657 fas->f_sdtr_sent = 0;
5658 }
5659 msgout = 0;
5660 break;
5661 case MSG_NOP:
5662 reason = CMD_NOP_FAIL;
5663 break;
5664 case MSG_INITIATOR_ERROR:
5665 reason = CMD_IDE_FAIL;
5666 break;
5667 case MSG_MSG_PARITY:
5668 reason = CMD_PER_FAIL;
5669 break;
5670 case MSG_REJECT:
5671 reason = CMD_REJECT_FAIL;
5672 break;
5673 /* XXX - abort not good, queue full handling or drain (?) */
5674 case MSG_SIMPLE_QTAG:
5675 case MSG_ORDERED_QTAG:
5676 case MSG_HEAD_QTAG:
5677 msgout = MSG_ABORT;
5678 reason = CMD_TAG_REJECT;
5679 break;
5680 case MSG_DEVICE_RESET:
5681 reason = CMD_BDR_FAIL;
5682 msgout = -ACTION_ABORT_CURCMD;
5683 break;
5684 case MSG_ABORT:
5685 case MSG_ABORT_TAG:
5686 /*
5687 * If an RESET/ABORT OPERATION message is rejected
5688 * it is time to yank the chain on the bus...
5689 */
5690 reason = CMD_ABORT_FAIL;
5691 msgout = -ACTION_ABORT_CURCMD;
5692 break;
5693 default:
5694 if (IS_IDENTIFY_MSG(lastmsg)) {
5695 if (TAGGED(tgt)) {
5696 /*
5697 * this often happens when the
5698 * target rejected our tag
5699 */
5700 reason = CMD_TAG_REJECT;
5701 } else {
5702 reason = CMD_ID_FAIL;
5703 }
5704 } else {
5705 reason = CMD_TRAN_ERR;
5706 msgout = -ACTION_ABORT_CURCMD;
5707 }
5708
5709 break;
5710 }
5711
5712 if (msgout) {
5713 fas_log(fas, CE_WARN,
5714 "Target %d rejects our message '%s'",
5715 tgt, scsi_mname(lastmsg));
5716 fas_set_pkt_reason(fas, sp, reason, 0);
5717 }
5718
5719 break;
5720 }
5721 case MSG_RESTORE_PTRS:
5722 sp->cmd_cdbp = sp->cmd_pkt->pkt_cdbp;
5723 if (sp->cmd_data_count != sp->cmd_saved_data_count) {
5724 if (fas_restore_pointers(fas, sp)) {
5725 msgout = -ACTION_ABORT_CURCMD;
5726 } else if ((sp->cmd_pkt->pkt_reason & CMD_TRAN_ERR) &&
5727 (sp->cmd_pkt->pkt_statistics & STAT_PERR) &&
5728 (sp->cmd_cur_win == 0) &&
5729 (sp->cmd_data_count == 0)) {
5730 sp->cmd_pkt->pkt_reason &= ~CMD_TRAN_ERR;
5731 }
5732 }
5733 break;
5734
5735 case MSG_SAVE_DATA_PTR:
5736 sp->cmd_saved_data_count = sp->cmd_data_count;
5737 sp->cmd_saved_win = sp->cmd_cur_win;
5738 sp->cmd_saved_cur_addr = sp->cmd_cur_addr;
5739 break;
5740
5741 /* These don't make sense for us, and */
5742 /* will be rejected */
5743 /* case MSG_INITIATOR_ERROR */
5744 /* case MSG_ABORT */
5745 /* case MSG_MSG_PARITY */
5746 /* case MSG_DEVICE_RESET */
5747 default:
5748 msgout = MSG_REJECT;
5749 fas_log(fas, CE_WARN,
5750 "Rejecting message '%s' from Target %d",
5751 scsi_mname(msgin), tgt);
5752 break;
5753 }
5754
5755 EPRINTF1("Message in: %s\n", scsi_mname(msgin));
5756
5757 return (msgout);
5758 }
5759
5760 /*
5761 * phase handlers that are rarely used
5762 */
5763 static int
fas_handle_cmd_start(struct fas * fas)5764 fas_handle_cmd_start(struct fas *fas)
5765 {
5766 struct fas_cmd *sp = fas->f_current_sp;
5767 volatile uchar_t *tp = fas->f_cmdarea;
5768 int i;
5769 int amt = sp->cmd_cdblen;
5770
5771 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_START,
5772 "fas_handle_cmd_start_start");
5773 EPRINTF("fas_handle_cmd: send cmd\n");
5774
5775 for (i = 0; i < amt; i++) {
5776 *tp++ = sp->cmd_cdbp[i];
5777 }
5778 fas_reg_cmd_write(fas, CMD_FLUSH);
5779
5780 FAS_DMA_READ(fas, amt, fas->f_dmacookie.dmac_address, amt,
5781 CMD_TRAN_INFO|CMD_DMA);
5782 fas->f_lastcount = amt;
5783
5784 New_state(fas, ACTS_CMD_DONE);
5785
5786 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_END,
5787 "fas_handle_cmd_start_end");
5788 return (ACTION_RETURN);
5789 }
5790
5791 static int
fas_handle_cmd_done(struct fas * fas)5792 fas_handle_cmd_done(struct fas *fas)
5793 {
5794 struct fas_cmd *sp = fas->f_current_sp;
5795 uchar_t intr = fas->f_intr;
5796 volatile struct dma *dmar = fas->f_dma;
5797
5798 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_START,
5799 "fas_handle_cmd_done_start");
5800 EPRINTF("fas_handle_cmd_done\n");
5801
5802 /*
5803 * We should have gotten a BUS SERVICE interrupt.
5804 * If it isn't that, and it isn't a DISCONNECT
5805 * interrupt, we have a "cannot happen" situation.
5806 */
5807 if ((intr & FAS_INT_BUS) == 0) {
5808 if ((intr & FAS_INT_DISCON) == 0) {
5809 fas_printstate(fas, "cmd transmission error");
5810 TRACE_0(TR_FAC_SCSI_FAS,
5811 TR_FAS_HANDLE_CMD_DONE_ABORT1_END,
5812 "fas_handle_cmd_done_end (abort1)");
5813 return (ACTION_ABORT_CURCMD);
5814 }
5815 } else {
5816 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
5817 }
5818
5819 fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr);
5820 FAS_FLUSH_DMA(fas);
5821
5822 New_state(fas, ACTS_UNKNOWN);
5823 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_END,
5824 "fas_handle_cmd_done_end");
5825 return (fas_handle_unknown(fas));
5826 }
5827
5828 /*
5829 * Begin to send a message out
5830 */
5831 static int
fas_handle_msg_out_start(struct fas * fas)5832 fas_handle_msg_out_start(struct fas *fas)
5833 {
5834 struct fas_cmd *sp = fas->f_current_sp;
5835 uchar_t *msgout = fas->f_cur_msgout;
5836 uchar_t amt = fas->f_omsglen;
5837
5838 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_START,
5839 "fas_handle_msg_out_start");
5840 EPRINTF("fas_handle_msg_out_start\n");
5841
5842 /*
5843 * Check to make *sure* that we are really
5844 * in MESSAGE OUT phase. If the last state
5845 * was ACTS_MSG_OUT_DONE, then we are trying
5846 * to resend a message that the target stated
5847 * had a parity error in it.
5848 *
5849 * If this is the case, and mark completion reason as CMD_NOMSGOUT.
5850 * XXX: Right now, we just *drive* on. Should we abort the command?
5851 */
5852 if ((fas->f_stat & FAS_PHASE_MASK) != FAS_PHASE_MSG_OUT &&
5853 fas->f_laststate == ACTS_MSG_OUT_DONE) {
5854 fas_log(fas, CE_WARN,
5855 "Target %d refused message resend", Tgt(sp));
5856 fas_set_pkt_reason(fas, sp, CMD_NOMSGOUT, 0);
5857 New_state(fas, ACTS_UNKNOWN);
5858 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_PHASEMANAGE_END,
5859 "fas_handle_msg_out_end (ACTION_PHASEMANAGE)");
5860 return (ACTION_PHASEMANAGE);
5861 }
5862
5863 /*
5864 * Clean the fifo.
5865 */
5866 fas_reg_cmd_write(fas, CMD_FLUSH);
5867
5868 if (amt == 0) {
5869 /*
5870 * no msg to send
5871 */
5872 *msgout = MSG_NOP;
5873 amt = fas->f_omsglen = 1;
5874 }
5875
5876 /*
5877 * If msg only 1 byte, just dump it in the fifo and go. For
5878 * multi-byte msgs, dma them to save time. If we have no
5879 * msg to send and we're in msg out phase, send a NOP.
5880 */
5881 fas->f_last_msgout = *msgout;
5882
5883 /*
5884 * There is a bug in the fas366 that occasionaly
5885 * deasserts the ATN signal prematurely when we send
5886 * the sync/wide negotiation bytes out using DMA. The
5887 * workaround here is to send the negotiation bytes out
5888 * using PIO
5889 */
5890 fas_write_fifo(fas, msgout, fas->f_omsglen, 1);
5891 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5892
5893 EPRINTF2("amt=%x, last_msgout=%x\n", amt, fas->f_last_msgout);
5894
5895 New_state(fas, ACTS_MSG_OUT_DONE);
5896 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_END,
5897 "fas_handle_msg_out_end");
5898 return (ACTION_RETURN);
5899 }
5900
5901 static int
fas_handle_msg_out_done(struct fas * fas)5902 fas_handle_msg_out_done(struct fas *fas)
5903 {
5904 struct fas_cmd *sp = fas->f_current_sp;
5905 uchar_t msgout, phase;
5906 int target = Tgt(sp);
5907 int amt = fas->f_omsglen;
5908 int action;
5909
5910 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_START,
5911 "fas_handle_msg_out_done_start");
5912 msgout = fas->f_cur_msgout[0];
5913 if ((msgout == MSG_HEAD_QTAG) || (msgout == MSG_SIMPLE_QTAG)) {
5914 msgout = fas->f_cur_msgout[2];
5915 }
5916 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
5917 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
5918 fas->f_cur_msgout[2], fas->f_last_msgout);
5919
5920 EPRINTF1("fas_handle_msgout_done: msgout=%x\n", msgout);
5921
5922 /*
5923 * flush fifo, just in case some bytes were not sent
5924 */
5925 fas_reg_cmd_write(fas, CMD_FLUSH);
5926
5927 /*
5928 * If the FAS disconnected, then the message we sent caused
5929 * the target to decide to drop BSY* and clear the bus.
5930 */
5931 if (fas->f_intr == FAS_INT_DISCON) {
5932 if (msgout == MSG_DEVICE_RESET || msgout == MSG_ABORT ||
5933 msgout == MSG_ABORT_TAG) {
5934 /*
5935 * If we sent a device reset msg, then we need to do
5936 * a synch negotiate again unless we have already
5937 * inhibited synch.
5938 */
5939 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
5940 fas->f_abort_msg_sent++;
5941 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5942 fas_set_pkt_reason(fas, sp,
5943 CMD_ABORTED, STAT_ABORTED);
5944 }
5945 } else if (msgout == MSG_DEVICE_RESET) {
5946 fas->f_reset_msg_sent++;
5947 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5948 fas_set_pkt_reason(fas, sp,
5949 CMD_RESET, STAT_DEV_RESET);
5950 }
5951 fas_force_renegotiation(fas, Tgt(sp));
5952 }
5953 EPRINTF2("Successful %s message to target %d\n",
5954 scsi_mname(msgout), target);
5955
5956 if (sp->cmd_flags & CFLAG_CMDPROXY) {
5957 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
5958 }
5959 TRACE_0(TR_FAC_SCSI_FAS,
5960 TR_FAS_HANDLE_MSG_OUT_DONE_FINISH_END,
5961 "fas_handle_msg_out_done_end (ACTION_FINISH)");
5962 return (ACTION_FINISH);
5963 }
5964 /*
5965 * If the target dropped busy on any other message, it
5966 * wasn't expected. We will let the code in fas_phasemanage()
5967 * handle this unexpected bus free event.
5968 */
5969 goto out;
5970 }
5971
5972 /*
5973 * What phase have we transitioned to?
5974 */
5975 phase = fas->f_stat & FAS_PHASE_MASK;
5976
5977 /*
5978 * If we finish sending a message out, and we are
5979 * still in message out phase, then the target has
5980 * detected one or more parity errors in the message
5981 * we just sent and it is asking us to resend the
5982 * previous message.
5983 */
5984 if ((fas->f_intr & FAS_INT_BUS) && phase == FAS_PHASE_MSG_OUT) {
5985 /*
5986 * As per SCSI-2 specification, if the message to
5987 * be re-sent is greater than one byte, then we
5988 * have to set ATN*.
5989 */
5990 if (amt > 1) {
5991 fas_assert_atn(fas);
5992 }
5993 fas_log(fas, CE_WARN,
5994 "SCSI bus MESSAGE OUT phase parity error");
5995 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5996 New_state(fas, ACTS_MSG_OUT);
5997 TRACE_0(TR_FAC_SCSI_FAS,
5998 TR_FAS_HANDLE_MSG_OUT_DONE_PHASEMANAGE_END,
5999 "fas_handle_msg_out_done_end (ACTION_PHASEMANAGE)");
6000 return (ACTION_PHASEMANAGE);
6001 }
6002
6003
6004 out:
6005 fas->f_last_msgout = msgout;
6006 fas->f_omsglen = 0;
6007 New_state(fas, ACTS_UNKNOWN);
6008 action = fas_handle_unknown(fas);
6009 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_END,
6010 "fas_handle_msg_out_done_end");
6011 return (action);
6012 }
6013
6014 static int
fas_twobyte_msg(struct fas * fas)6015 fas_twobyte_msg(struct fas *fas)
6016 {
6017 struct fas_cmd *sp = fas->f_current_sp;
6018
6019 if ((fas->f_imsgarea[0] == MSG_IGNORE_WIDE_RESID) &&
6020 (fas->f_imsgarea[1] == 1)) {
6021 int xfer_amt;
6022
6023 /*
6024 * Knock off one byte if there
6025 * is a last transfer and is even number of bytes
6026 */
6027 xfer_amt = sp->cmd_data_count - sp->cmd_saved_data_count;
6028 if (xfer_amt && (!(xfer_amt & 1))) {
6029 ASSERT(sp->cmd_data_count > 0);
6030 sp->cmd_data_count--;
6031 sp->cmd_cur_addr--;
6032 }
6033 IPRINTF1("ignore wide resid %d\n", fas->f_imsgarea[1]);
6034 New_state(fas, ACTS_UNKNOWN);
6035 return (0);
6036 }
6037
6038 fas_log(fas, CE_WARN,
6039 "Two byte message '%s' 0x%x rejected",
6040 scsi_mname(fas->f_imsgarea[0]), fas->f_imsgarea[1]);
6041 return (MSG_REJECT);
6042 }
6043
6044 /*
6045 * handle receiving extended messages
6046 */
6047 static int
fas_multibyte_msg(struct fas * fas)6048 fas_multibyte_msg(struct fas *fas)
6049 {
6050 #ifdef FASDEBUG
6051 static char *mbs =
6052 "Target %d now Synchronous at %d.%d MB/s max transmit rate\n";
6053 static char *mbs1 =
6054 "Target %d now Synchronous at %d.0%d MB/s max transmit rate\n";
6055 static char *mbs2 =
6056 "Target %d now Synchronous at %d.00%d MB/s max transmit rate\n";
6057 #endif
6058 struct fas_cmd *sp = fas->f_current_sp;
6059 volatile struct fasreg *fasreg = fas->f_reg;
6060 uchar_t emsg = fas->f_imsgarea[2];
6061 int tgt = Tgt(sp);
6062 int msgout = 0;
6063
6064 EPRINTF("fas_multibyte_msg:\n");
6065
6066 if (emsg == MSG_SYNCHRONOUS) {
6067 uint_t period, offset, regval;
6068 uint_t minsync, maxsync, clockval;
6069 uint_t xfer_freq, xfer_div, xfer_mod, xfer_rate;
6070
6071 period = fas->f_imsgarea[3] & 0xff;
6072 offset = fas->f_imsgarea[4] & 0xff;
6073 minsync = MIN_SYNC_PERIOD(fas);
6074 maxsync = MAX_SYNC_PERIOD(fas);
6075 DPRINTF5("sync msg received: %x %x %x %x %x\n",
6076 fas->f_imsgarea[0], fas->f_imsgarea[1],
6077 fas->f_imsgarea[2], fas->f_imsgarea[3],
6078 fas->f_imsgarea[4]);
6079 DPRINTF3("received period %d offset %d from tgt %d\n",
6080 period, offset, tgt);
6081 DPRINTF3("calculated minsync %d, maxsync %d for tgt %d\n",
6082 minsync, maxsync, tgt);
6083 DPRINTF2("sync period %d, neg period %d\n",
6084 fas->f_sync_period[tgt], fas->f_neg_period[tgt]);
6085
6086 if ((++(fas->f_sdtr_sent)) & 1) {
6087 /*
6088 * In cases where the target negotiates synchronous
6089 * mode before we do, and we either have sync mode
6090 * disabled, or this target is known to be a weak
6091 * signal target, we send back a message indicating
6092 * a desire to stay in asynchronous mode (the SCSI-2
6093 * spec states that if we have synchronous capability
6094 * then we cannot reject a SYNCHRONOUS DATA TRANSFER
6095 * REQUEST message).
6096 */
6097 IPRINTF1("SYNC negotiation initiated by target %d\n",
6098 tgt);
6099
6100 msgout = MSG_EXTENDED;
6101
6102 period =
6103 period ? max(period, MIN_SYNC_PERIOD(fas)) : 0;
6104
6105 if (fas->f_backoff & (1<<tgt)) {
6106 period = period ?
6107 max(period, fas->f_neg_period[tgt]) : 0;
6108 }
6109 offset = min(offset, fas_default_offset);
6110 }
6111 xfer_freq = regval = 0;
6112
6113 /*
6114 * If the target's offset is bigger than ours,
6115 * the target has violated the scsi protocol.
6116 */
6117 if (offset > fas_default_offset) {
6118 period = offset = 0;
6119 msgout = MSG_REJECT;
6120 }
6121
6122 if (offset && (period > maxsync)) {
6123 /*
6124 * We cannot transmit data in synchronous
6125 * mode this slow, so convert to asynchronous
6126 * mode.
6127 */
6128 msgout = MSG_EXTENDED;
6129 period = offset = 0;
6130
6131 } else if (offset && (period < minsync)) {
6132 /*
6133 * If the target's period is less than ours,
6134 * the target has violated the scsi protocol.
6135 */
6136 period = offset = 0;
6137 msgout = MSG_REJECT;
6138
6139 } else if (offset) {
6140 /*
6141 * Conversion method for received PERIOD value
6142 * to the number of input clock ticks to the FAS.
6143 *
6144 * We adjust the input period value such that
6145 * we always will transmit data *not* faster
6146 * than the period value received.
6147 */
6148
6149 clockval = fas->f_clock_cycle / 1000;
6150 regval = (((period << 2) + clockval - 1) / clockval);
6151
6152 /*
6153 * correction if xfer rate <= 5MB/sec
6154 * XXX do we need this?
6155 */
6156 if (regval && (period >= FASTSCSI_THRESHOLD)) {
6157 regval--;
6158 }
6159 }
6160
6161 fas->f_offset[tgt] = offset;
6162 fas->f_neg_period[tgt] = period;
6163
6164 /*
6165 * Is is now safe to produce a responce to a target
6166 * initiated sdtr. period and offset have been checked.
6167 */
6168 if (msgout == MSG_EXTENDED) {
6169 fas_make_sdtr(fas, 0, tgt);
6170 period = fas->f_neg_period[tgt];
6171 offset = (fas->f_offset[tgt] & 0xf);
6172 }
6173
6174 if (offset) {
6175 fas->f_sync_period[tgt] = regval & SYNC_PERIOD_MASK;
6176 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period,
6177 fas->f_sync_period[tgt]);
6178
6179 fas->f_offset[tgt] = offset | fas->f_req_ack_delay;
6180 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset,
6181 fas->f_offset[tgt]);
6182
6183 /*
6184 * if transferring > 5 MB/sec then enable
6185 * fastscsi in conf3
6186 */
6187 if (period < FASTSCSI_THRESHOLD) {
6188 fas->f_fasconf3[tgt] |= FAS_CONF3_FASTSCSI;
6189 } else {
6190 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6191 }
6192
6193 fas_reg_write(fas, (uchar_t *)&fasreg->fas_conf3,
6194 fas->f_fasconf3[tgt]);
6195
6196 DPRINTF4("period %d (%d), offset %d to tgt %d\n",
6197 period,
6198 fas->f_sync_period[tgt] & SYNC_PERIOD_MASK,
6199 fas->f_offset[tgt] & 0xf, tgt);
6200 DPRINTF1("req/ack delay = %x\n", fas->f_req_ack_delay);
6201 DPRINTF1("conf3 = %x\n", fas->f_fasconf3[tgt]);
6202 #ifdef FASDEBUG
6203 /*
6204 * Convert input clock cycle per
6205 * byte to nanoseconds per byte.
6206 * (ns/b), and convert that to
6207 * k-bytes/second.
6208 */
6209 xfer_freq = FAS_SYNC_KBPS((regval *
6210 fas->f_clock_cycle) / 1000);
6211 xfer_rate = ((fas->f_nowide & (1<<tgt))? 1 : 2) *
6212 xfer_freq;
6213 xfer_div = xfer_rate / 1000;
6214 xfer_mod = xfer_rate % 1000;
6215
6216
6217 if (xfer_mod > 99) {
6218 IPRINTF3(mbs, tgt, xfer_div, xfer_mod);
6219 } else if (xfer_mod > 9) {
6220 IPRINTF3(mbs1, tgt, xfer_div, xfer_mod);
6221 } else {
6222 IPRINTF3(mbs2, tgt, xfer_div, xfer_mod);
6223 }
6224 #endif
6225 fas->f_sync_enabled |= (1<<tgt);
6226
6227 } else {
6228 /*
6229 * We are converting back to async mode.
6230 */
6231 fas_revert_to_async(fas, tgt);
6232 }
6233
6234 /*
6235 * If this target violated the scsi spec, reject the
6236 * sdtr msg and don't negotiate sdtr again.
6237 */
6238 if (msgout == MSG_REJECT) {
6239 fas->f_nosync |= (1<<tgt);
6240 }
6241
6242 fas->f_props_update |= (1<<tgt);
6243
6244 } else if (emsg == MSG_WIDE_DATA_XFER) {
6245 uchar_t width = fas->f_imsgarea[3] & 0xff;
6246
6247 DPRINTF4("wide msg received: %x %x %x %x\n",
6248 fas->f_imsgarea[0], fas->f_imsgarea[1],
6249 fas->f_imsgarea[2], fas->f_imsgarea[3]);
6250
6251 /* always renegotiate sync after wide */
6252 msgout = MSG_EXTENDED;
6253
6254 if ((++(fas->f_wdtr_sent)) & 1) {
6255 IPRINTF1("Wide negotiation initiated by target %d\n",
6256 tgt);
6257 /*
6258 * allow wide neg even if the target driver hasn't
6259 * enabled wide yet.
6260 */
6261 fas->f_nowide &= ~(1<<tgt);
6262 fas_make_wdtr(fas, 0, tgt, width);
6263 IPRINTF1("sending wide sync %d back\n", width);
6264 /*
6265 * Let us go back to async mode(SCSI spec)
6266 * and depend on target to do sync
6267 * after wide negotiations.
6268 * If target does not do a sync neg and enters
6269 * async mode we will negotiate sync on next command
6270 */
6271 fas_revert_to_async(fas, tgt);
6272 fas->f_sync_known &= ~(1<<tgt);
6273 } else {
6274 /*
6275 * renegotiate sync after wide
6276 */
6277 fas_set_wide_conf3(fas, tgt, width);
6278 ASSERT(width <= 1);
6279 fas->f_wdtr_sent = 0;
6280 if ((fas->f_nosync & (1<<tgt)) == 0) {
6281 fas_make_sdtr(fas, 0, tgt);
6282 } else {
6283 msgout = 0;
6284 }
6285 }
6286
6287 fas->f_props_update |= (1<<tgt);
6288
6289 } else if (emsg == MSG_MODIFY_DATA_PTR) {
6290 msgout = MSG_REJECT;
6291 } else {
6292 fas_log(fas, CE_WARN,
6293 "Rejecting message %s 0x%x from Target %d",
6294 scsi_mname(MSG_EXTENDED), emsg, tgt);
6295 msgout = MSG_REJECT;
6296 }
6297 out:
6298 New_state(fas, ACTS_UNKNOWN);
6299 return (msgout);
6300 }
6301
6302 /*
6303 * Back off sync negotiation
6304 * and got to async mode
6305 */
6306 static void
fas_revert_to_async(struct fas * fas,int tgt)6307 fas_revert_to_async(struct fas *fas, int tgt)
6308 {
6309 volatile struct fasreg *fasreg = fas->f_reg;
6310
6311 fas->f_sync_period[tgt] = 0;
6312 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period, 0);
6313 fas->f_offset[tgt] = 0;
6314 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset, 0);
6315 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6316 fas_reg_write(fas, &fasreg->fas_conf3, fas->f_fasconf3[tgt]);
6317 fas->f_sync_enabled &= ~(1<<tgt);
6318 }
6319
6320 /*
6321 * handle an unexpected selection attempt
6322 * XXX look for better way: msg reject, drop off the bus
6323 */
6324 static int
fas_handle_selection(struct fas * fas)6325 fas_handle_selection(struct fas *fas)
6326 {
6327 fas_reg_cmd_write(fas, CMD_DISCONNECT);
6328 fas_reg_cmd_write(fas, CMD_FLUSH);
6329 fas_reg_cmd_write(fas, CMD_EN_RESEL);
6330 return (ACTION_RETURN);
6331 }
6332
6333 /*
6334 * dma window handling
6335 */
6336 static int
fas_restore_pointers(struct fas * fas,struct fas_cmd * sp)6337 fas_restore_pointers(struct fas *fas, struct fas_cmd *sp)
6338 {
6339 if (sp->cmd_data_count != sp->cmd_saved_data_count) {
6340 sp->cmd_data_count = sp->cmd_saved_data_count;
6341 sp->cmd_cur_addr = sp->cmd_saved_cur_addr;
6342
6343 if (sp->cmd_cur_win != sp->cmd_saved_win) {
6344 sp->cmd_cur_win = sp->cmd_saved_win;
6345 if (fas_set_new_window(fas, sp)) {
6346 return (-1);
6347 }
6348 }
6349 DPRINTF1("curaddr=%x\n", sp->cmd_cur_addr);
6350 }
6351 return (0);
6352 }
6353
6354 static int
fas_set_new_window(struct fas * fas,struct fas_cmd * sp)6355 fas_set_new_window(struct fas *fas, struct fas_cmd *sp)
6356 {
6357 off_t offset;
6358 size_t len;
6359 uint_t count;
6360
6361 if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_cur_win,
6362 &offset, &len, &sp->cmd_dmacookie, &count) != DDI_SUCCESS) {
6363 return (-1);
6364 }
6365
6366 DPRINTF4("new window %x: off=%lx, len=%lx, count=%x\n",
6367 sp->cmd_cur_win, offset, len, count);
6368
6369 ASSERT(count == 1);
6370 return (0);
6371 }
6372
6373 static int
fas_next_window(struct fas * fas,struct fas_cmd * sp,uint64_t end)6374 fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end)
6375 {
6376
6377 /* are there more windows? */
6378 if (sp->cmd_nwin == 0) {
6379 uint_t nwin = 0;
6380 (void) ddi_dma_numwin(sp->cmd_dmahandle, &nwin);
6381 sp->cmd_nwin = (uchar_t)nwin;
6382 }
6383
6384 DPRINTF5(
6385 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%lx, nwin=%x\n",
6386 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
6387 sp->cmd_nwin);
6388
6389 if (sp->cmd_cur_win < sp->cmd_nwin) {
6390 sp->cmd_cur_win++;
6391 if (fas_set_new_window(fas, sp)) {
6392 fas_printstate(fas, "cannot set new window");
6393 sp->cmd_cur_win--;
6394 return (-1);
6395 }
6396 /*
6397 * if there are no more windows, we have a data overrun condition
6398 */
6399 } else {
6400 int slot = sp->cmd_slot;
6401
6402 fas_printstate(fas, "data transfer overrun");
6403 fas_set_pkt_reason(fas, sp, CMD_DATA_OVR, 0);
6404
6405 /*
6406 * if we get data transfer overruns, assume we have
6407 * a weak scsi bus. Note that this won't catch consistent
6408 * underruns or other noise related syndromes.
6409 */
6410 fas_sync_wide_backoff(fas, sp, slot);
6411 return (-1);
6412 }
6413 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
6414 DPRINTF1("cur_addr=%x\n", sp->cmd_cur_addr);
6415 return (0);
6416 }
6417
6418 /*
6419 * dma error handler
6420 */
6421 static int
fas_check_dma_error(struct fas * fas)6422 fas_check_dma_error(struct fas *fas)
6423 {
6424 /*
6425 * was there a dma error that caused fas_intr_svc() to be called?
6426 */
6427 if (fas->f_dma->dma_csr & DMA_ERRPEND) {
6428 /*
6429 * It would be desirable to set the ATN* line and attempt to
6430 * do the whole schmear of INITIATOR DETECTED ERROR here,
6431 * but that is too hard to do at present.
6432 */
6433 fas_log(fas, CE_WARN, "Unrecoverable DMA error");
6434 fas_printstate(fas, "dma error");
6435 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6436 return (-1);
6437 }
6438 return (0);
6439 }
6440
6441 /*
6442 * check for gross error or spurious interrupt
6443 */
6444 static int
fas_handle_gross_err(struct fas * fas)6445 fas_handle_gross_err(struct fas *fas)
6446 {
6447 volatile struct fasreg *fasreg = fas->f_reg;
6448
6449 fas_log(fas, CE_WARN,
6450 "gross error in fas status (%x)", fas->f_stat);
6451
6452 IPRINTF5("fas_cmd=%x, stat=%x, intr=%x, step=%x, fifoflag=%x\n",
6453 fasreg->fas_cmd, fas->f_stat, fas->f_intr, fasreg->fas_step,
6454 fasreg->fas_fifo_flag);
6455
6456 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6457
6458 fas_internal_reset(fas, FAS_RESET_FAS);
6459 return (ACTION_RESET);
6460 }
6461
6462
6463 /*
6464 * handle illegal cmd interrupt or (external) bus reset cleanup
6465 */
6466 static int
fas_illegal_cmd_or_bus_reset(struct fas * fas)6467 fas_illegal_cmd_or_bus_reset(struct fas *fas)
6468 {
6469 /*
6470 * If we detect a SCSI reset, we blow away the current
6471 * command (if there is one) and all disconnected commands
6472 * because we now don't know the state of them at all.
6473 */
6474 ASSERT(fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET));
6475
6476 if (fas->f_intr & FAS_INT_RESET) {
6477 return (ACTION_FINRST);
6478 }
6479
6480 /*
6481 * Illegal cmd to fas:
6482 * This should not happen. The one situation where
6483 * we can get an ILLEGAL COMMAND interrupt is due to
6484 * a bug in the FAS366 during reselection which we
6485 * should be handling in fas_reconnect().
6486 */
6487 if (fas->f_intr & FAS_INT_ILLEGAL) {
6488 IPRINTF1("lastcmd=%x\n", fas->f_reg->fas_cmd);
6489 fas_printstate(fas, "ILLEGAL bit set");
6490 return (ACTION_RESET);
6491 }
6492 /*NOTREACHED*/
6493 return (ACTION_RETURN);
6494 }
6495
6496 /*
6497 * set throttles for all luns of this target
6498 */
6499 static void
fas_set_throttles(struct fas * fas,int slot,int n,int what)6500 fas_set_throttles(struct fas *fas, int slot, int n, int what)
6501 {
6502 int i;
6503
6504 /*
6505 * if the bus is draining/quiesced, no changes to the throttles
6506 * are allowed. Not allowing change of throttles during draining
6507 * limits error recovery but will reduce draining time
6508 *
6509 * all throttles should have been set to HOLD_THROTTLE
6510 */
6511 if (fas->f_softstate & (FAS_SS_QUIESCED | FAS_SS_DRAINING)) {
6512 return;
6513 }
6514
6515 ASSERT((n == 1) || (n == N_SLOTS) || (n == NLUNS_PER_TARGET));
6516 ASSERT((slot + n) <= N_SLOTS);
6517 if (n == NLUNS_PER_TARGET) {
6518 slot &= ~(NLUNS_PER_TARGET - 1);
6519 }
6520
6521 for (i = slot; i < (slot + n); i++) {
6522 if (what == HOLD_THROTTLE) {
6523 fas->f_throttle[i] = HOLD_THROTTLE;
6524 } else if ((fas->f_reset_delay[i/NLUNS_PER_TARGET]) == 0) {
6525 if (what == MAX_THROTTLE) {
6526 int tshift = 1 << (i/NLUNS_PER_TARGET);
6527 fas->f_throttle[i] = (short)
6528 ((fas->f_notag & tshift)? 1 : what);
6529 } else {
6530 fas->f_throttle[i] = what;
6531 }
6532 }
6533 }
6534 }
6535
6536 static void
fas_set_all_lun_throttles(struct fas * fas,int slot,int what)6537 fas_set_all_lun_throttles(struct fas *fas, int slot, int what)
6538 {
6539 /*
6540 * fas_set_throttle will adjust slot to starting at LUN 0
6541 */
6542 fas_set_throttles(fas, slot, NLUNS_PER_TARGET, what);
6543 }
6544
6545 static void
fas_full_throttle(struct fas * fas,int slot)6546 fas_full_throttle(struct fas *fas, int slot)
6547 {
6548 fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
6549 }
6550
6551 /*
6552 * run a polled cmd
6553 */
6554 static void
fas_runpoll(struct fas * fas,short slot,struct fas_cmd * sp)6555 fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp)
6556 {
6557 int limit, i, n;
6558 int timeout = 0;
6559
6560 DPRINTF4("runpoll: slot=%x, cmd=%x, current_sp=0x%p, tcmds=%x\n",
6561 slot, *((uchar_t *)sp->cmd_pkt->pkt_cdbp),
6562 (void *)fas->f_current_sp, fas->f_tcmds[slot]);
6563
6564 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_START, "fas_runpoll_start");
6565
6566 /*
6567 * wait for cmd to complete
6568 * don't start new cmds so set throttles to HOLD_THROTTLE
6569 */
6570 while ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6571 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6572 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
6573 }
6574 if ((fas->f_state != STATE_FREE) || INTPENDING(fas)) {
6575 if (fas_dopoll(fas, POLL_TIMEOUT) <= 0) {
6576 IPRINTF("runpoll: timeout on draining\n");
6577 goto bad;
6578 }
6579 }
6580
6581 ASSERT(fas->f_state == STATE_FREE);
6582 ASSERT(fas->f_current_sp == NULL);
6583
6584 /*
6585 * if this is not a proxy cmd, don't start the cmd
6586 * without draining the active cmd(s)
6587 * for proxy cmds, we zap the active cmd and assume
6588 * that the caller will take care of this
6589 * For tagged cmds, wait with submitting a non-tagged
6590 * cmd until the queue has been drained
6591 * If the cmd is a request sense, then draining won't
6592 * help since we are in contingence allegiance condition
6593 */
6594 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6595 uchar_t *cmdp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
6596
6597 if ((fas->f_tcmds[slot]) &&
6598 (NOTAG(Tgt(sp)) ||
6599 (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
6600 (*cmdp != SCMD_REQUEST_SENSE)))) {
6601 if (timeout < POLL_TIMEOUT) {
6602 timeout += 100;
6603 drv_usecwait(100);
6604 continue;
6605 } else {
6606 fas_log(fas, CE_WARN,
6607 "polled cmd failed (target busy)");
6608 goto cleanup;
6609 }
6610 }
6611 }
6612
6613 /*
6614 * If the draining of active commands killed the
6615 * the current polled command, we're done..
6616 */
6617 if (sp->cmd_flags & CFLAG_COMPLETED) {
6618 break;
6619 }
6620
6621 /*
6622 * ensure we are not accessing a target too quickly
6623 * after a reset. the throttles get set back later
6624 * by the reset delay watch; hopefully, we don't go
6625 * thru this loop more than once
6626 */
6627 if (fas->f_reset_delay[slot/NLUNS_PER_TARGET]) {
6628 IPRINTF1("reset delay set for slot %x\n", slot);
6629 drv_usecwait(fas->f_scsi_reset_delay * 1000);
6630 for (i = 0; i < NTARGETS_WIDE; i++) {
6631 if (fas->f_reset_delay[i]) {
6632 int s = i * NLUNS_PER_TARGET;
6633 int e = s + NLUNS_PER_TARGET;
6634 fas->f_reset_delay[i] = 0;
6635 for (; s < e; s++) {
6636 fas_full_throttle(fas, s);
6637 }
6638 }
6639 }
6640 }
6641
6642 /*
6643 * fas_startcmd() will return false if preempted
6644 * or draining
6645 */
6646 if (fas_startcmd(fas, sp) != TRUE) {
6647 IPRINTF("runpoll: cannot start new cmds\n");
6648 ASSERT(fas->f_current_sp != sp);
6649 continue;
6650 }
6651
6652 /*
6653 * We're now 'running' this command.
6654 *
6655 * fas_dopoll will always return when
6656 * fas->f_state is STATE_FREE, and
6657 */
6658 limit = sp->cmd_pkt->pkt_time * 1000000;
6659 if (limit == 0) {
6660 limit = POLL_TIMEOUT;
6661 }
6662
6663 /*
6664 * if the cmd disconnected, the first call to fas_dopoll
6665 * will return with bus free; we go thru the loop one more
6666 * time and wait limit usec for the target to reconnect
6667 */
6668 for (i = 0; i <= POLL_TIMEOUT; i += 100) {
6669
6670 if ((n = fas_dopoll(fas, limit)) <= 0) {
6671 IPRINTF("runpoll: timeout on polling\n");
6672 goto bad;
6673 }
6674
6675 /*
6676 * If a preemption occurred that caused this
6677 * command to actually not start, go around
6678 * the loop again. If CFLAG_COMPLETED is set, the
6679 * command completed
6680 */
6681 if ((sp->cmd_flags & CFLAG_COMPLETED) ||
6682 (sp->cmd_pkt->pkt_state == 0)) {
6683 break;
6684 }
6685
6686 /*
6687 * the bus may have gone free because the target
6688 * disconnected; go thru the loop again
6689 */
6690 ASSERT(fas->f_state == STATE_FREE);
6691 if (n == 0) {
6692 /*
6693 * bump i, we have waited limit usecs in
6694 * fas_dopoll
6695 */
6696 i += limit - 100;
6697 }
6698 }
6699
6700 if ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6701
6702 if (i > POLL_TIMEOUT) {
6703 IPRINTF("polled timeout on disc. cmd\n");
6704 goto bad;
6705 }
6706
6707 if (sp->cmd_pkt->pkt_state) {
6708 /*
6709 * don't go thru the loop again; the cmd
6710 * was already started
6711 */
6712 IPRINTF("fas_runpoll: cmd started??\n");
6713 goto bad;
6714 }
6715 }
6716 }
6717
6718 /*
6719 * blindly restore throttles which is preferable over
6720 * leaving throttle hanging at 0 and noone to clear it
6721 */
6722 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6723 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6724 }
6725
6726 /*
6727 * ensure that the cmd is completely removed
6728 */
6729 fas_remove_cmd(fas, sp, 0);
6730
6731 /*
6732 * If we stored up commands to do, start them off now.
6733 */
6734 if ((fas->f_state == STATE_FREE) &&
6735 (!(sp->cmd_flags & CFLAG_CMDPROXY))) {
6736 (void) fas_ustart(fas);
6737 }
6738 exit:
6739 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_END, "fas_runpoll_end");
6740 return;
6741
6742 bad:
6743 fas_log(fas, CE_WARN, "Polled cmd failed");
6744 #ifdef FASDEBUG
6745 fas_printstate(fas, "fas_runpoll: polled cmd failed");
6746 #endif /* FASDEBUG */
6747
6748 cleanup:
6749 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6750
6751 /*
6752 * clean up all traces of this sp because fas_runpoll will return
6753 * before fas_reset_recovery() cleans up
6754 */
6755 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
6756 fas_decrement_ncmds(fas, sp);
6757 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
6758
6759 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
6760 (void) fas_reset_bus(fas);
6761 }
6762 goto exit;
6763 }
6764
6765 /*
6766 * Poll for command completion (i.e., no interrupts)
6767 * limit is in usec (and will not be very accurate)
6768 *
6769 * the assumption is that we only run polled cmds in interrupt context
6770 * as scsi_transport will filter out FLAG_NOINTR
6771 */
6772 static int
fas_dopoll(struct fas * fas,int limit)6773 fas_dopoll(struct fas *fas, int limit)
6774 {
6775 int i, n;
6776
6777 /*
6778 * timeout is not very accurate since we don't know how
6779 * long the poll takes
6780 * also if the packet gets started fairly late, we may
6781 * timeout prematurely
6782 * fas_dopoll always returns if e_state transitions to STATE_FREE
6783 */
6784 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_START, "fas_dopoll_start");
6785
6786 if (limit == 0) {
6787 limit = POLL_TIMEOUT;
6788 }
6789
6790 for (n = i = 0; i < limit; i += 100) {
6791 if (INTPENDING(fas)) {
6792 fas->f_polled_intr = 1;
6793 n++;
6794 (void) fas_intr_svc(fas);
6795 if (fas->f_state == STATE_FREE)
6796 break;
6797 }
6798 drv_usecwait(100);
6799 }
6800
6801 if (i >= limit && fas->f_state != STATE_FREE) {
6802 fas_printstate(fas, "polled command timeout");
6803 n = -1;
6804 }
6805 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_END,
6806 "fas_dopoll_end: rval %x", n);
6807 return (n);
6808 }
6809
6810 /*
6811 * prepare a sync negotiation message
6812 */
6813 static void
fas_make_sdtr(struct fas * fas,int msgout_offset,int target)6814 fas_make_sdtr(struct fas *fas, int msgout_offset, int target)
6815 {
6816 uchar_t *p = fas->f_cur_msgout + msgout_offset;
6817 ushort_t tshift = 1<<target;
6818 uchar_t period = MIN_SYNC_PERIOD(fas);
6819 uchar_t offset = fas_default_offset;
6820
6821 /*
6822 * If this target experienced a sync backoff use the
6823 * target's sync speed that was adjusted in
6824 * fas_sync_wide_backoff. For second sync backoff,
6825 * offset will be ajusted below in sanity checks.
6826 */
6827 if (fas->f_backoff & tshift) {
6828 period = fas->f_neg_period[target];
6829 }
6830
6831 /*
6832 * If this is a responce to a target initiated sdtr,
6833 * use the agreed upon values.
6834 */
6835 if (fas->f_sdtr_sent & 1) {
6836 period = fas->f_neg_period[target];
6837 offset = fas->f_offset[target];
6838 }
6839
6840 /*
6841 * If the target driver disabled
6842 * sync then make offset = 0
6843 */
6844 if (fas->f_force_async & tshift) {
6845 offset = 0;
6846 }
6847
6848 /*
6849 * sanity check of period and offset
6850 */
6851 if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_FAST) {
6852 if (period < (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4)) {
6853 period = (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4);
6854 }
6855 } else if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_SYNC) {
6856 if (period < (uchar_t)(DEFAULT_SYNC_PERIOD/4)) {
6857 period = (uchar_t)(DEFAULT_SYNC_PERIOD/4);
6858 }
6859 } else {
6860 fas->f_nosync |= tshift;
6861 }
6862
6863 if (fas->f_nosync & tshift) {
6864 offset = 0;
6865 }
6866
6867 if ((uchar_t)(offset & 0xf) > fas_default_offset) {
6868 offset = fas_default_offset | fas->f_req_ack_delay;
6869 }
6870
6871 fas->f_neg_period[target] = (uchar_t)period;
6872 fas->f_offset[target] = (uchar_t)offset;
6873
6874 *p++ = (uchar_t)MSG_EXTENDED;
6875 *p++ = (uchar_t)3;
6876 *p++ = (uchar_t)MSG_SYNCHRONOUS;
6877 *p++ = period;
6878 *p++ = offset & 0xf;
6879 fas->f_omsglen = 5 + msgout_offset;
6880
6881 IPRINTF2("fas_make_sdtr: period = %x, offset = %x\n",
6882 period, offset);
6883 /*
6884 * increment sdtr flag, odd value indicates that we initiated
6885 * the negotiation
6886 */
6887 fas->f_sdtr_sent++;
6888
6889 /*
6890 * the target may reject the optional sync message so
6891 * to avoid negotiating on every cmd, set sync known here
6892 * we should not negotiate wide after sync again
6893 */
6894 fas->f_sync_known |= 1<<target;
6895 fas->f_wide_known |= 1<<target;
6896 }
6897
6898 /*
6899 * prepare a wide negotiation message
6900 */
6901 static void
fas_make_wdtr(struct fas * fas,int msgout_offset,int target,int width)6902 fas_make_wdtr(struct fas *fas, int msgout_offset, int target, int width)
6903 {
6904 uchar_t *p = fas->f_cur_msgout + msgout_offset;
6905
6906 if (((fas->f_target_scsi_options[target] & SCSI_OPTIONS_WIDE) == 0) ||
6907 (fas->f_nowide & (1<<target))) {
6908 fas->f_nowide |= 1<<target;
6909 width = 0;
6910 }
6911 if (fas->f_force_narrow & (1<<target)) {
6912 width = 0;
6913 }
6914 width = min(FAS_XFER_WIDTH, width);
6915
6916 *p++ = (uchar_t)MSG_EXTENDED;
6917 *p++ = (uchar_t)2;
6918 *p++ = (uchar_t)MSG_WIDE_DATA_XFER;
6919 *p++ = (uchar_t)width;
6920 fas->f_omsglen = 4 + msgout_offset;
6921 IPRINTF1("fas_make_wdtr: width=%x\n", width);
6922
6923 /*
6924 * increment wdtr flag, odd value indicates that we initiated
6925 * the negotiation
6926 */
6927 fas->f_wdtr_sent++;
6928
6929 /*
6930 * the target may reject the optional wide message so
6931 * to avoid negotiating on every cmd, set wide known here
6932 */
6933 fas->f_wide_known |= 1<<target;
6934
6935 fas_set_wide_conf3(fas, target, width);
6936 }
6937
6938 /*
6939 * auto request sense support
6940 * create or destroy an auto request sense packet
6941 */
6942 static int
fas_create_arq_pkt(struct fas * fas,struct scsi_address * ap)6943 fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap)
6944 {
6945 /*
6946 * Allocate a request sense packet using get_pktiopb
6947 */
6948 struct fas_cmd *rqpktp;
6949 uchar_t slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
6950 struct buf *bp;
6951 struct arq_private_data *arq_data;
6952
6953 /*
6954 * if one exists, don't create another
6955 */
6956 if (fas->f_arq_pkt[slot] != 0) {
6957 return (0);
6958 }
6959
6960 /*
6961 * it would be nicer if we could allow the target driver
6962 * to specify the size but this is easier and OK for most
6963 * drivers to use SENSE_LENGTH
6964 * Allocate a request sense packet.
6965 */
6966 bp = scsi_alloc_consistent_buf(ap, (struct buf *)NULL,
6967 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
6968 rqpktp = PKT2CMD(scsi_init_pkt(ap,
6969 NULL, bp, CDB_GROUP0, 1, PKT_PRIV_LEN,
6970 PKT_CONSISTENT, SLEEP_FUNC, NULL));
6971 arq_data =
6972 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
6973 arq_data->arq_save_bp = bp;
6974
6975 RQ_MAKECOM_G0((CMD2PKT(rqpktp)),
6976 FLAG_SENSING | FLAG_HEAD | FLAG_NODISCON,
6977 (char)SCMD_REQUEST_SENSE, 0, (char)SENSE_LENGTH);
6978 rqpktp->cmd_flags |= CFLAG_CMDARQ;
6979 rqpktp->cmd_slot = slot;
6980 rqpktp->cmd_pkt->pkt_ha_private = rqpktp;
6981 fas->f_arq_pkt[slot] = rqpktp;
6982
6983 /*
6984 * we need a function ptr here so abort/reset can
6985 * defer callbacks; fas_call_pkt_comp() calls
6986 * fas_complete_arq_pkt() directly without releasing the lock
6987 * However, since we are not calling back directly thru
6988 * pkt_comp, don't check this with warlock
6989 */
6990 #ifndef __lock_lint
6991 rqpktp->cmd_pkt->pkt_comp =
6992 (void (*)(struct scsi_pkt *))fas_complete_arq_pkt;
6993 #endif
6994 return (0);
6995 }
6996
6997 static int
fas_delete_arq_pkt(struct fas * fas,struct scsi_address * ap)6998 fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap)
6999 {
7000 struct fas_cmd *rqpktp;
7001 int slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
7002
7003 /*
7004 * if there is still a pkt saved or no rqpkt
7005 * then we cannot deallocate or there is nothing to do
7006 */
7007 if ((rqpktp = fas->f_arq_pkt[slot]) != NULL) {
7008 struct arq_private_data *arq_data =
7009 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
7010 struct buf *bp = arq_data->arq_save_bp;
7011 /*
7012 * is arq pkt in use?
7013 */
7014 if (arq_data->arq_save_sp) {
7015 return (-1);
7016 }
7017
7018 scsi_destroy_pkt(CMD2PKT(rqpktp));
7019 scsi_free_consistent_buf(bp);
7020 fas->f_arq_pkt[slot] = 0;
7021 }
7022 return (0);
7023 }
7024
7025 /*
7026 * complete an arq packet by copying over transport info and the actual
7027 * request sense data; called with mutex held from fas_call_pkt_comp()
7028 */
7029 void
fas_complete_arq_pkt(struct scsi_pkt * pkt)7030 fas_complete_arq_pkt(struct scsi_pkt *pkt)
7031 {
7032 struct fas *fas = ADDR2FAS(&pkt->pkt_address);
7033 struct fas_cmd *sp = pkt->pkt_ha_private;
7034 struct scsi_arq_status *arqstat;
7035 struct arq_private_data *arq_data =
7036 (struct arq_private_data *)sp->cmd_pkt->pkt_private;
7037 struct fas_cmd *ssp = arq_data->arq_save_sp;
7038 struct buf *bp = arq_data->arq_save_bp;
7039 int slot = sp->cmd_slot;
7040
7041 DPRINTF1("completing arq pkt sp=0x%p\n", (void *)sp);
7042 ASSERT(sp == fas->f_arq_pkt[slot]);
7043 ASSERT(arq_data->arq_save_sp != NULL);
7044 ASSERT(ssp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7045
7046 arqstat = (struct scsi_arq_status *)(ssp->cmd_pkt->pkt_scbp);
7047 arqstat->sts_rqpkt_status = *((struct scsi_status *)
7048 (sp->cmd_pkt->pkt_scbp));
7049 arqstat->sts_rqpkt_reason = sp->cmd_pkt->pkt_reason;
7050 arqstat->sts_rqpkt_state = sp->cmd_pkt->pkt_state;
7051 arqstat->sts_rqpkt_statistics = sp->cmd_pkt->pkt_statistics;
7052 arqstat->sts_rqpkt_resid = sp->cmd_pkt->pkt_resid;
7053 arqstat->sts_sensedata =
7054 *((struct scsi_extended_sense *)bp->b_un.b_addr);
7055 ssp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
7056 arq_data->arq_save_sp = NULL;
7057
7058 /*
7059 * ASC=0x47 is parity error
7060 */
7061 if (arqstat->sts_sensedata.es_key == KEY_ABORTED_COMMAND &&
7062 arqstat->sts_sensedata.es_add_code == 0x47) {
7063 fas_sync_wide_backoff(fas, sp, slot);
7064 }
7065
7066 fas_call_pkt_comp(fas, ssp);
7067 }
7068
7069 /*
7070 * handle check condition and start an arq packet
7071 */
7072 static int
fas_handle_sts_chk(struct fas * fas,struct fas_cmd * sp)7073 fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp)
7074 {
7075 struct fas_cmd *arqsp = fas->f_arq_pkt[sp->cmd_slot];
7076 struct arq_private_data *arq_data;
7077 struct buf *bp;
7078
7079 if ((arqsp == NULL) || (arqsp == sp) ||
7080 (sp->cmd_scblen < sizeof (struct scsi_arq_status))) {
7081 IPRINTF("no arq packet or cannot arq on arq pkt\n");
7082 fas_call_pkt_comp(fas, sp);
7083 return (0);
7084 }
7085
7086 arq_data = (struct arq_private_data *)arqsp->cmd_pkt->pkt_private;
7087 bp = arq_data->arq_save_bp;
7088
7089 ASSERT(sp->cmd_flags & CFLAG_FINISHED);
7090 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7091 DPRINTF3("start arq for slot=%x, arqsp=0x%p, rqpkt=0x%p\n",
7092 sp->cmd_slot, (void *)arqsp, (void *)fas->f_arq_pkt[sp->cmd_slot]);
7093 if (arq_data->arq_save_sp != NULL) {
7094 IPRINTF("auto request sense already in progress\n");
7095 goto fail;
7096 }
7097
7098 arq_data->arq_save_sp = sp;
7099
7100 bzero(bp->b_un.b_addr, sizeof (struct scsi_extended_sense));
7101
7102 /*
7103 * copy the timeout from the original packet by lack of a better
7104 * value
7105 * we could take the residue of the timeout but that could cause
7106 * premature timeouts perhaps
7107 */
7108 arqsp->cmd_pkt->pkt_time = sp->cmd_pkt->pkt_time;
7109 arqsp->cmd_flags &= ~CFLAG_TRANFLAG;
7110 ASSERT(arqsp->cmd_pkt->pkt_comp != NULL);
7111
7112 /*
7113 * make sure that auto request sense always goes out
7114 * after queue full and after throttle was set to draining
7115 */
7116 fas_full_throttle(fas, sp->cmd_slot);
7117 (void) fas_accept_pkt(fas, arqsp, NO_TRAN_BUSY);
7118 return (0);
7119
7120 fail:
7121 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
7122 fas_log(fas, CE_WARN, "auto request sense failed\n");
7123 fas_dump_cmd(fas, sp);
7124 fas_call_pkt_comp(fas, sp);
7125 return (-1);
7126 }
7127
7128
7129 /*
7130 * handle qfull condition
7131 */
7132 static void
fas_handle_qfull(struct fas * fas,struct fas_cmd * sp)7133 fas_handle_qfull(struct fas *fas, struct fas_cmd *sp)
7134 {
7135 int slot = sp->cmd_slot;
7136
7137 if ((++sp->cmd_qfull_retries > fas->f_qfull_retries[Tgt(sp)]) ||
7138 (fas->f_qfull_retries[Tgt(sp)] == 0)) {
7139 /*
7140 * We have exhausted the retries on QFULL, or,
7141 * the target driver has indicated that it
7142 * wants to handle QFULL itself by setting
7143 * qfull-retries capability to 0. In either case
7144 * we want the target driver's QFULL handling
7145 * to kick in. We do this by having pkt_reason
7146 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
7147 */
7148 IPRINTF2("%d.%d: status queue full, retries over\n",
7149 Tgt(sp), Lun(sp));
7150 fas_set_all_lun_throttles(fas, slot, DRAIN_THROTTLE);
7151 fas_call_pkt_comp(fas, sp);
7152 } else {
7153 if (fas->f_reset_delay[Tgt(sp)] == 0) {
7154 fas->f_throttle[slot] =
7155 max((fas->f_tcmds[slot] - 2), 0);
7156 }
7157 IPRINTF3("%d.%d: status queue full, new throttle = %d, "
7158 "retrying\n", Tgt(sp), Lun(sp), fas->f_throttle[slot]);
7159 sp->cmd_pkt->pkt_flags |= FLAG_HEAD;
7160 sp->cmd_flags &= ~CFLAG_TRANFLAG;
7161 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
7162
7163 /*
7164 * when target gives queue full status with no commands
7165 * outstanding (f_tcmds[] == 0), throttle is set to 0
7166 * (HOLD_THROTTLE), and the queue full handling starts
7167 * (see psarc/1994/313); if there are commands outstanding,
7168 * the throttle is set to (f_tcmds[] - 2)
7169 */
7170 if (fas->f_throttle[slot] == HOLD_THROTTLE) {
7171 /*
7172 * By setting throttle to QFULL_THROTTLE, we
7173 * avoid submitting new commands and in
7174 * fas_restart_cmd find out slots which need
7175 * their throttles to be cleared.
7176 */
7177 fas_set_all_lun_throttles(fas, slot, QFULL_THROTTLE);
7178 if (fas->f_restart_cmd_timeid == 0) {
7179 fas->f_restart_cmd_timeid =
7180 timeout(fas_restart_cmd, fas,
7181 fas->f_qfull_retry_interval[Tgt(sp)]);
7182 }
7183 }
7184 }
7185 }
7186
7187 /*
7188 * invoked from timeout() to restart qfull cmds with throttle == 0
7189 */
7190 static void
fas_restart_cmd(void * fas_arg)7191 fas_restart_cmd(void *fas_arg)
7192 {
7193 struct fas *fas = fas_arg;
7194 int i;
7195
7196 IPRINTF("fas_restart_cmd:\n");
7197
7198 mutex_enter(FAS_MUTEX(fas));
7199 fas->f_restart_cmd_timeid = 0;
7200
7201 for (i = 0; i < N_SLOTS; i += NLUNS_PER_TARGET) {
7202 if (fas->f_reset_delay[i/NLUNS_PER_TARGET] == 0) {
7203 if (fas->f_throttle[i] == QFULL_THROTTLE) {
7204 fas_set_all_lun_throttles(fas,
7205 i, MAX_THROTTLE);
7206 }
7207 }
7208 }
7209
7210 (void) fas_ustart(fas);
7211 mutex_exit(FAS_MUTEX(fas));
7212 }
7213
7214 /*
7215 * Timeout handling:
7216 * Command watchdog routines
7217 */
7218
7219 /*ARGSUSED*/
7220 static void
fas_watch(void * arg)7221 fas_watch(void *arg)
7222 {
7223 struct fas *fas;
7224 ushort_t props_update = 0;
7225
7226 rw_enter(&fas_global_rwlock, RW_READER);
7227
7228 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
7229
7230 mutex_enter(FAS_MUTEX(fas));
7231 IPRINTF2("ncmds=%x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
7232
7233 #ifdef FAS_PIO_COUNTS
7234 if (fas->f_total_cmds) {
7235 int n = fas->f_total_cmds;
7236
7237 fas_log(fas, CE_NOTE,
7238 "total=%d, cmds=%d fas-rd=%d, fas-wrt=%d, dma-rd=%d, dma-wrt=%d\n",
7239 fas->f_total_cmds,
7240 fas->f_reg_cmds/n,
7241 fas->f_reg_reads/n, fas->f_reg_writes/n,
7242 fas->f_reg_dma_reads/n, fas->f_reg_dma_writes/n);
7243
7244 fas->f_reg_reads = fas->f_reg_writes =
7245 fas->f_reg_dma_reads = fas->f_reg_dma_writes =
7246 fas->f_reg_cmds = fas->f_total_cmds = 0;
7247 }
7248 #endif
7249 if (fas->f_ncmds) {
7250 int i;
7251 fas_watchsubr(fas);
7252
7253 /*
7254 * reset throttle. the throttle may have been
7255 * too low if queue full was caused by
7256 * another initiator
7257 * Only reset throttle if no cmd active in slot 0
7258 * (untagged cmd)
7259 */
7260 #ifdef FAS_TEST
7261 if (fas_enable_untagged) {
7262 fas_test_untagged++;
7263 }
7264 #endif
7265 for (i = 0; i < N_SLOTS; i++) {
7266 if ((fas->f_throttle[i] > HOLD_THROTTLE) &&
7267 (fas->f_active[i] &&
7268 (fas->f_active[i]->f_slot[0] == NULL))) {
7269 fas_full_throttle(fas, i);
7270 }
7271 }
7272 }
7273
7274 if (fas->f_props_update) {
7275 int i;
7276 /*
7277 * f_mutex will be released and reentered in
7278 * fas_props_update().
7279 * Hence we save the fas->f_props_update now and
7280 * set to 0 indicating that property has been
7281 * updated. This will avoid a race condition with
7282 * any thread that runs in interrupt context that
7283 * attempts to set the f_props_update to non-zero value
7284 */
7285 props_update = fas->f_props_update;
7286 fas->f_props_update = 0;
7287 for (i = 0; i < NTARGETS_WIDE; i++) {
7288 if (props_update & (1<<i)) {
7289 fas_update_props(fas, i);
7290 }
7291 }
7292 }
7293 fas_check_waitQ_and_mutex_exit(fas);
7294
7295 }
7296 rw_exit(&fas_global_rwlock);
7297
7298 again:
7299 mutex_enter(&fas_global_mutex);
7300 if (fas_timeout_initted && fas_timeout_id) {
7301 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
7302 }
7303 mutex_exit(&fas_global_mutex);
7304 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_WATCH_END, "fas_watch_end");
7305 }
7306
7307 static void
fas_watchsubr(struct fas * fas)7308 fas_watchsubr(struct fas *fas)
7309 {
7310 short slot;
7311 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7312 struct f_slots *tag_slots;
7313
7314 for (slot = 0; slot < N_SLOTS; slot += d) {
7315
7316 #ifdef FAS_TEST
7317 if (fas_btest) {
7318 fas_btest = 0;
7319 (void) fas_reset_bus(fas);
7320 return;
7321 }
7322 if (fas_force_timeout && fas->f_tcmds[slot]) {
7323 fas_cmd_timeout(fas, slot);
7324 fas_force_timeout = 0;
7325 return;
7326 }
7327 fas_test_reset(fas, slot);
7328 fas_test_abort(fas, slot);
7329 #endif /* FAS_TEST */
7330
7331 /*
7332 * check tagged cmds first
7333 */
7334 tag_slots = fas->f_active[slot];
7335 DPRINTF3(
7336 "fas_watchsubr: slot %x: tcmds=%x, timeout=%x\n",
7337 slot, fas->f_tcmds[slot], tag_slots->f_timeout);
7338
7339 if ((fas->f_tcmds[slot] > 0) && (tag_slots->f_timebase)) {
7340
7341 if (tag_slots->f_timebase <=
7342 fas_scsi_watchdog_tick) {
7343 tag_slots->f_timebase +=
7344 fas_scsi_watchdog_tick;
7345 continue;
7346 }
7347
7348 tag_slots->f_timeout -= fas_scsi_watchdog_tick;
7349
7350 if (tag_slots->f_timeout < 0) {
7351 fas_cmd_timeout(fas, slot);
7352 return;
7353 }
7354 if ((tag_slots->f_timeout) <=
7355 fas_scsi_watchdog_tick) {
7356 IPRINTF1("pending timeout on slot=%x\n",
7357 slot);
7358 IPRINTF("draining all queues\n");
7359 fas_set_throttles(fas, 0, N_SLOTS,
7360 DRAIN_THROTTLE);
7361 }
7362 }
7363 }
7364 }
7365
7366 /*
7367 * timeout recovery
7368 */
7369 static void
fas_cmd_timeout(struct fas * fas,int slot)7370 fas_cmd_timeout(struct fas *fas, int slot)
7371 {
7372 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7373 int target, lun, i, n, tag, ncmds;
7374 struct fas_cmd *sp = NULL;
7375 struct fas_cmd *ssp;
7376
7377 ASSERT(fas->f_tcmds[slot]);
7378
7379 #ifdef FAS_TEST
7380 if (fas_test_stop) {
7381 debug_enter("timeout");
7382 }
7383 #endif
7384
7385 /*
7386 * set throttle back; no more draining necessary
7387 */
7388 for (i = 0; i < N_SLOTS; i += d) {
7389 if (fas->f_throttle[i] == DRAIN_THROTTLE) {
7390 fas_full_throttle(fas, i);
7391 }
7392 }
7393
7394 if (NOTAG(slot/NLUNS_PER_TARGET)) {
7395 sp = fas->f_active[slot]->f_slot[0];
7396 }
7397
7398 /*
7399 * if no interrupt pending for next second then the current
7400 * cmd must be stuck; switch slot and sp to current slot and cmd
7401 */
7402 if (fas->f_current_sp && fas->f_state != STATE_FREE) {
7403 for (i = 0; (i < 10000) && (INTPENDING(fas) == 0); i++) {
7404 drv_usecwait(100);
7405 }
7406 if (INTPENDING(fas) == 0) {
7407 slot = fas->f_current_sp->cmd_slot;
7408 sp = fas->f_current_sp;
7409 }
7410 }
7411
7412 target = slot / NLUNS_PER_TARGET;
7413 lun = slot % NLUNS_PER_TARGET;
7414
7415 /*
7416 * update all outstanding pkts for this slot
7417 */
7418 n = fas->f_active[slot]->f_n_slots;
7419 for (ncmds = tag = 0; tag < n; tag++) {
7420 ssp = fas->f_active[slot]->f_slot[tag];
7421 if (ssp && ssp->cmd_pkt->pkt_time) {
7422 fas_set_pkt_reason(fas, ssp, CMD_TIMEOUT,
7423 STAT_TIMEOUT | STAT_ABORTED);
7424 fas_short_dump_cmd(fas, ssp);
7425 ncmds++;
7426 }
7427 }
7428
7429 /*
7430 * no timed-out cmds here?
7431 */
7432 if (ncmds == 0) {
7433 return;
7434 }
7435
7436 /*
7437 * dump all we know about this timeout
7438 */
7439 if (sp) {
7440 if (sp->cmd_flags & CFLAG_CMDDISC) {
7441 fas_log(fas, CE_WARN,
7442 "Disconnected command timeout for Target %d.%d",
7443 target, lun);
7444 } else {
7445 ASSERT(sp == fas->f_current_sp);
7446 fas_log(fas, CE_WARN,
7447 "Connected command timeout for Target %d.%d",
7448 target, lun);
7449 /*
7450 * Current command timeout appears to relate often
7451 * to noisy SCSI in synchronous mode.
7452 */
7453 if (fas->f_state == ACTS_DATA_DONE) {
7454 fas_sync_wide_backoff(fas, sp, slot);
7455 }
7456 }
7457 #ifdef FASDEBUG
7458 fas_printstate(fas, "timeout");
7459 #endif
7460 } else {
7461 fas_log(fas, CE_WARN,
7462 "Disconnected tagged cmd(s) (%d) timeout for Target %d.%d",
7463 fas->f_tcmds[slot], target, lun);
7464 }
7465
7466 if (fas_abort_cmd(fas, sp, slot) == ACTION_SEARCH) {
7467 (void) fas_istart(fas);
7468 }
7469 }
7470
7471 /*
7472 * fas_sync_wide_backoff() increases sync period and enables slow
7473 * cable mode.
7474 * the second time, we revert back to narrow/async
7475 * we count on a bus reset to disable wide in the target and will
7476 * never renegotiate wide again
7477 */
7478 static void
fas_sync_wide_backoff(struct fas * fas,struct fas_cmd * sp,int slot)7479 fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
7480 int slot)
7481 {
7482 char phase;
7483 ushort_t state = fas->f_state;
7484 uchar_t tgt = slot / NLUNS_PER_TARGET;
7485 uint_t tshift = 1 << tgt;
7486
7487 phase = fas_reg_read(fas, &fas->f_reg->fas_stat);
7488 phase &= FAS_PHASE_MASK;
7489
7490 IPRINTF4(
7491 "fas_sync_wide_backoff: target %d: state=%x, phase=%x, sp=0x%p\n",
7492 tgt, state, phase, (void *)sp);
7493
7494 #ifdef FASDEBUG
7495 if (fas_no_sync_wide_backoff) {
7496 return;
7497 }
7498 #endif
7499
7500 /*
7501 * if this not the first time or sync is disabled
7502 * thru scsi_options then disable wide
7503 */
7504 if ((fas->f_backoff & tshift) ||
7505 (fas->f_nosync & tshift)) {
7506 /*
7507 * disable wide for just this target
7508 */
7509 if ((fas->f_nowide & tshift) == 0) {
7510 fas_log(fas, CE_WARN,
7511 "Target %d disabled wide SCSI mode", tgt);
7512 }
7513 /*
7514 * do not reset the bit in f_nowide because that
7515 * would not force a renegotiation of wide
7516 * and do not change any register value yet because
7517 * we may have reconnects before the renegotiations
7518 */
7519 fas->f_target_scsi_options[tgt] &= ~SCSI_OPTIONS_WIDE;
7520 }
7521
7522 /*
7523 * reduce xfer rate. if this is the first time, reduce by
7524 * 100%. second time, disable sync and wide.
7525 */
7526 if (fas->f_offset[tgt] != 0) {
7527 /*
7528 * do not reset the bit in f_nosync because that
7529 * would not force a renegotiation of sync
7530 */
7531 if (fas->f_backoff & tshift) {
7532 if ((fas->f_nosync & tshift) == 0) {
7533 fas_log(fas, CE_WARN,
7534 "Target %d reverting to async. mode",
7535 tgt);
7536 }
7537 fas->f_target_scsi_options[tgt] &=
7538 ~(SCSI_OPTIONS_SYNC | SCSI_OPTIONS_FAST);
7539 } else {
7540 /* increase period by 100% */
7541 fas->f_neg_period[tgt] *= 2;
7542
7543 fas_log(fas, CE_WARN,
7544 "Target %d reducing sync. transfer rate", tgt);
7545 }
7546 }
7547 fas->f_backoff |= tshift;
7548
7549 /*
7550 * always enable slow cable mode, if not already enabled
7551 */
7552 if ((fas->f_fasconf & FAS_CONF_SLOWMODE) == 0) {
7553 fas->f_fasconf |= FAS_CONF_SLOWMODE;
7554 fas_reg_write(fas, &fas->f_reg->fas_conf, fas->f_fasconf);
7555 IPRINTF("Reverting to slow SCSI cable mode\n");
7556 }
7557
7558 /*
7559 * Force sync renegotiation and update properties
7560 */
7561 fas_force_renegotiation(fas, tgt);
7562 fas->f_props_update |= (1<<tgt);
7563 }
7564
7565 /*
7566 * handle failed negotiations (either reject or bus free condition)
7567 */
7568 static void
fas_reset_sync_wide(struct fas * fas)7569 fas_reset_sync_wide(struct fas *fas)
7570 {
7571 struct fas_cmd *sp = fas->f_current_sp;
7572 int tgt = Tgt(sp);
7573
7574 if (fas->f_wdtr_sent) {
7575 IPRINTF("wide neg message rejected or bus free\n");
7576 fas->f_nowide |= (1<<tgt);
7577 fas->f_fasconf3[tgt] &= ~FAS_CONF3_WIDE;
7578 fas_reg_write(fas, &fas->f_reg->fas_conf3,
7579 fas->f_fasconf3[tgt]);
7580 /*
7581 * clear offset just in case it goes to
7582 * data phase
7583 */
7584 fas_reg_write(fas,
7585 (uchar_t *)&fas->f_reg->fas_sync_offset, 0);
7586 } else if (fas->f_sdtr_sent) {
7587 volatile struct fasreg *fasreg =
7588 fas->f_reg;
7589 IPRINTF("sync neg message rejected or bus free\n");
7590 fas->f_nosync |= (1<<tgt);
7591 fas->f_offset[tgt] = 0;
7592 fas->f_sync_period[tgt] = 0;
7593 fas_reg_write(fas,
7594 (uchar_t *)&fasreg->fas_sync_period, 0);
7595 fas_reg_write(fas,
7596 (uchar_t *)&fasreg->fas_sync_offset, 0);
7597 fas->f_offset[tgt] = 0;
7598 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
7599 fas_reg_write(fas, &fasreg->fas_conf3,
7600 fas->f_fasconf3[tgt]);
7601 }
7602
7603 fas_force_renegotiation(fas, tgt);
7604 }
7605
7606 /*
7607 * force wide and sync renegotiation
7608 */
7609 static void
fas_force_renegotiation(struct fas * fas,int target)7610 fas_force_renegotiation(struct fas *fas, int target)
7611 {
7612 ushort_t tshift = 1<<target;
7613 fas->f_sync_known &= ~tshift;
7614 fas->f_sync_enabled &= ~tshift;
7615 fas->f_wide_known &= ~tshift;
7616 fas->f_wide_enabled &= ~tshift;
7617 }
7618
7619 /*
7620 * update conf3 register for wide negotiation
7621 */
7622 static void
fas_set_wide_conf3(struct fas * fas,int target,int width)7623 fas_set_wide_conf3(struct fas *fas, int target, int width)
7624 {
7625 ASSERT(width <= 1);
7626 switch (width) {
7627 case 0:
7628 fas->f_fasconf3[target] &= ~FAS_CONF3_WIDE;
7629 break;
7630 case 1:
7631 fas->f_fasconf3[target] |= FAS_CONF3_WIDE;
7632 fas->f_wide_enabled |= (1<<target);
7633 break;
7634 }
7635
7636 fas_reg_write(fas, &fas->f_reg->fas_conf3, fas->f_fasconf3[target]);
7637 fas->f_fasconf3_reg_last = fas->f_fasconf3[target];
7638 }
7639
7640 /*
7641 * Abort command handling
7642 *
7643 * abort current cmd, either by device reset or immediately with bus reset
7644 * (usually an abort msg doesn't completely solve the problem, therefore
7645 * a device or bus reset is recommended)
7646 */
7647 static int
fas_abort_curcmd(struct fas * fas)7648 fas_abort_curcmd(struct fas *fas)
7649 {
7650 if (fas->f_current_sp) {
7651 return (fas_abort_cmd(fas, fas->f_current_sp,
7652 fas->f_current_sp->cmd_slot));
7653 } else {
7654 return (fas_reset_bus(fas));
7655 }
7656 }
7657
7658 static int
fas_abort_cmd(struct fas * fas,struct fas_cmd * sp,int slot)7659 fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot)
7660 {
7661 struct scsi_address ap;
7662
7663 ap.a_hba_tran = fas->f_tran;
7664 ap.a_target = slot / NLUNS_PER_TARGET;
7665 ap.a_lun = slot % NLUNS_PER_TARGET;
7666
7667 IPRINTF1("abort cmd 0x%p\n", (void *)sp);
7668
7669 /*
7670 * attempting to abort a connected cmd is usually fruitless, so
7671 * only try disconnected cmds
7672 * a reset is preferable over an abort (see 1161701)
7673 */
7674 if ((fas->f_current_sp && (fas->f_current_sp->cmd_slot != slot)) ||
7675 (fas->f_state == STATE_FREE)) {
7676 IPRINTF2("attempting to reset target %d.%d\n",
7677 ap.a_target, ap.a_lun);
7678 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
7679 return (ACTION_SEARCH);
7680 }
7681 }
7682
7683 /*
7684 * if the target won't listen, then a retry is useless
7685 * there is also the possibility that the cmd still completed while
7686 * we were trying to reset and the target driver may have done a
7687 * device reset which has blown away this sp.
7688 * well, we've tried, now pull the chain
7689 */
7690 IPRINTF("aborting all cmds by bus reset\n");
7691 return (fas_reset_bus(fas));
7692 }
7693
7694 /*
7695 * fas_do_scsi_abort() assumes that we already have the mutex.
7696 * during the abort, we hold the mutex and prevent callbacks by setting
7697 * completion pointer to NULL. this will also avoid that a target driver
7698 * attempts to do a scsi_abort/reset while we are aborting.
7699 * because the completion pointer is NULL we can still update the
7700 * packet after completion
7701 * the throttle for this slot is cleared either by fas_abort_connected_cmd
7702 * or fas_runpoll which prevents new cmds from starting while aborting
7703 */
7704 static int
fas_do_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)7705 fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
7706 {
7707 struct fas *fas = ADDR2FAS(ap);
7708 struct fas_cmd *sp;
7709 int rval = FALSE;
7710 short slot;
7711 struct fas_cmd *cur_sp = fas->f_current_sp;
7712 void (*cur_savec)(), (*sp_savec)();
7713 int sp_tagged_flag, abort_msg;
7714
7715 if (pkt) {
7716 sp = PKT2CMD(pkt);
7717 slot = sp->cmd_slot;
7718 ASSERT(slot == ((ap->a_target * NLUNS_PER_TARGET) | ap->a_lun));
7719 } else {
7720 sp = NULL;
7721 slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
7722 }
7723
7724 fas_move_waitQ_to_readyQ(fas);
7725
7726 /*
7727 * If no specific command was passed, all cmds here will be aborted
7728 * If a specific command was passed as an argument (to be aborted)
7729 * only the specified command will be aborted
7730 */
7731 ASSERT(mutex_owned(FAS_MUTEX(fas)));
7732 IPRINTF4("fas_scsi_abort for slot %x, "
7733 "sp=0x%p, pkt_flags=%x, cur_sp=0x%p\n",
7734 slot, (void *)sp, (sp? sp->cmd_pkt_flags : 0), (void *)cur_sp);
7735
7736 /*
7737 * first check if the cmd is in the ready queue or
7738 * in the active queue
7739 */
7740 if (sp) {
7741 IPRINTF3("aborting one command 0x%p for %d.%d\n",
7742 (void *)sp, ap->a_target, ap->a_lun);
7743 rval = fas_remove_from_readyQ(fas, sp, slot);
7744 if (rval) {
7745 IPRINTF("aborted one ready cmd\n");
7746 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7747 fas_decrement_ncmds(fas, sp);
7748 fas_call_pkt_comp(fas, sp);
7749 goto exit;
7750
7751 } else if ((sp !=
7752 fas->f_active[slot]->f_slot[sp->cmd_tag[1]])) {
7753 IPRINTF("cmd doesn't exist here\n");
7754 rval = TRUE;
7755 goto exit;
7756 }
7757 }
7758
7759 /*
7760 * hold off any new commands while attempting to abort
7761 * an active cmd
7762 */
7763 fas_set_throttles(fas, slot, 1, HOLD_THROTTLE);
7764
7765 if (cur_sp) {
7766 /*
7767 * prevent completion on current cmd
7768 */
7769 cur_savec = cur_sp->cmd_pkt->pkt_comp;
7770 cur_sp->cmd_pkt->pkt_comp = NULL;
7771 }
7772
7773 if (sp) {
7774 /*
7775 * the cmd exists here. is it connected or disconnected?
7776 * if connected but still selecting then can't abort now.
7777 * prevent completion on this cmd
7778 */
7779 sp_tagged_flag = (sp->cmd_pkt_flags & FLAG_TAGMASK);
7780 abort_msg = (sp_tagged_flag? MSG_ABORT_TAG : MSG_ABORT);
7781 sp_savec = sp->cmd_pkt->pkt_comp;
7782 sp->cmd_pkt->pkt_comp = NULL;
7783
7784 /* connected but not selecting? */
7785 if ((sp == cur_sp) && (fas->f_state != STATE_FREE) &&
7786 (sp->cmd_pkt->pkt_state)) {
7787 rval = fas_abort_connected_cmd(fas, sp, abort_msg);
7788 }
7789
7790 /* if abort connected cmd failed, try abort disconnected */
7791 if ((rval == 0) &&
7792 (sp->cmd_flags & CFLAG_CMDDISC) &&
7793 ((sp->cmd_flags & CFLAG_COMPLETED) == 0)) {
7794 rval = fas_abort_disconnected_cmd(fas, ap, sp,
7795 abort_msg, slot);
7796 }
7797
7798 if (rval) {
7799 sp->cmd_flags |= CFLAG_COMPLETED;
7800 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7801 }
7802
7803 sp->cmd_pkt->pkt_comp = sp_savec;
7804
7805 } else {
7806 IPRINTF2("aborting all commands for %d.%d\n",
7807 ap->a_target, ap->a_lun);
7808 abort_msg = MSG_ABORT;
7809
7810 /* active and not selecting ? */
7811 if (cur_sp && (fas->f_state != STATE_FREE) &&
7812 (cur_sp->cmd_slot == slot) &&
7813 cur_sp->cmd_pkt->pkt_state) {
7814 rval = fas_abort_connected_cmd(fas, cur_sp,
7815 abort_msg);
7816 }
7817 if (rval == 0) {
7818 rval = fas_abort_disconnected_cmd(fas, ap,
7819 NULL, abort_msg, slot);
7820 }
7821 }
7822
7823 done:
7824 /* complete the current sp */
7825 if (cur_sp) {
7826 cur_sp->cmd_pkt->pkt_comp = cur_savec;
7827 if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
7828 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
7829 cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
7830 fas_decrement_ncmds(fas, cur_sp);
7831 fas_call_pkt_comp(fas, cur_sp);
7832 }
7833 }
7834
7835 /* complete the sp passed as 2nd arg */
7836 if (sp && (sp != cur_sp) && (sp->cmd_flags & CFLAG_COMPLETED)) {
7837 sp->cmd_flags &= ~CFLAG_COMPLETED;
7838 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
7839 fas_decrement_ncmds(fas, sp);
7840 fas_call_pkt_comp(fas, sp);
7841 }
7842
7843 /* clean up all cmds for this slot */
7844 if (rval && (abort_msg == MSG_ABORT)) {
7845 /*
7846 * mark all commands here as aborted
7847 * abort msg has been accepted, now cleanup queues;
7848 */
7849 fas_mark_packets(fas, slot, CMD_ABORTED, STAT_ABORTED);
7850 fas_flush_tagQ(fas, slot);
7851 fas_flush_readyQ(fas, slot);
7852 }
7853 fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
7854
7855 exit:
7856 if (fas->f_state == STATE_FREE) {
7857 (void) fas_ustart(fas);
7858 }
7859
7860 ASSERT(mutex_owned(FAS_MUTEX(fas)));
7861
7862 #ifdef FASDEBUG
7863 if (rval && fas_test_stop) {
7864 debug_enter("abort succeeded");
7865 }
7866 #endif
7867 return (rval);
7868 }
7869
7870 /*
7871 * mark all packets with new reason and update statistics
7872 */
7873 static void
fas_mark_packets(struct fas * fas,int slot,uchar_t reason,uint_t stat)7874 fas_mark_packets(struct fas *fas, int slot, uchar_t reason, uint_t stat)
7875 {
7876 struct fas_cmd *sp = fas->f_readyf[slot];
7877
7878 while (sp != 0) {
7879 fas_set_pkt_reason(fas, sp, reason, STAT_ABORTED);
7880 sp = sp->cmd_forw;
7881 }
7882 if (fas->f_tcmds[slot]) {
7883 int n = 0;
7884 ushort_t tag;
7885
7886 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
7887 if ((sp = fas->f_active[slot]->f_slot[tag]) != 0) {
7888 fas_set_pkt_reason(fas, sp, reason, stat);
7889 n++;
7890 }
7891 }
7892 ASSERT(fas->f_tcmds[slot] == n);
7893 }
7894 }
7895
7896 /*
7897 * set pkt_reason and OR in pkt_statistics flag
7898 */
7899 static void
fas_set_pkt_reason(struct fas * fas,struct fas_cmd * sp,uchar_t reason,uint_t stat)7900 fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
7901 uint_t stat)
7902 {
7903 if (sp) {
7904 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
7905 sp->cmd_pkt->pkt_reason = reason;
7906 }
7907 sp->cmd_pkt->pkt_statistics |= stat;
7908 IPRINTF3("sp=0x%p, pkt_reason=%x, pkt_stat=%x\n",
7909 (void *)sp, reason, sp->cmd_pkt->pkt_statistics);
7910 }
7911 }
7912
7913 /*
7914 * delete specified cmd from the ready queue
7915 */
7916 static int
fas_remove_from_readyQ(struct fas * fas,struct fas_cmd * sp,int slot)7917 fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp, int slot)
7918 {
7919 struct fas_cmd *ssp, *psp;
7920
7921 /*
7922 * command has not been started yet and is still in the ready queue
7923 */
7924 if (sp) {
7925 ASSERT(fas->f_ncmds > 0);
7926 /*
7927 * find packet on the ready queue and remove it
7928 */
7929 for (psp = NULL, ssp = fas->f_readyf[slot]; ssp != NULL;
7930 psp = ssp, ssp = ssp->cmd_forw) {
7931 if (ssp == sp) {
7932 if (fas->f_readyf[slot] == sp) {
7933 fas->f_readyf[slot] = sp->cmd_forw;
7934 } else {
7935 psp->cmd_forw = sp->cmd_forw;
7936 }
7937 if (fas->f_readyb[slot] == sp) {
7938 fas->f_readyb[slot] = psp;
7939 }
7940 return (TRUE);
7941 }
7942 }
7943 }
7944 return (FALSE);
7945 }
7946
7947 /*
7948 * add cmd to to head of the readyQ
7949 * due to tag allocation failure or preemption we have to return
7950 * this cmd to the readyQ
7951 */
7952 static void
fas_head_of_readyQ(struct fas * fas,struct fas_cmd * sp)7953 fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp)
7954 {
7955 /*
7956 * never return a NOINTR pkt to the readyQ
7957 * (fas_runpoll will resubmit)
7958 */
7959 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7960 struct fas_cmd *dp;
7961 int slot = sp->cmd_slot;
7962
7963 dp = fas->f_readyf[slot];
7964 fas->f_readyf[slot] = sp;
7965 sp->cmd_forw = dp;
7966 if (fas->f_readyb[slot] == NULL) {
7967 fas->f_readyb[slot] = sp;
7968 }
7969 }
7970 }
7971
7972 /*
7973 * flush cmds in ready queue
7974 */
7975 static void
fas_flush_readyQ(struct fas * fas,int slot)7976 fas_flush_readyQ(struct fas *fas, int slot)
7977 {
7978 if (fas->f_readyf[slot]) {
7979 struct fas_cmd *sp, *nsp;
7980
7981 IPRINTF1("flushing ready queue, slot=%x\n", slot);
7982 ASSERT(fas->f_ncmds > 0);
7983
7984 sp = fas->f_readyf[slot];
7985 fas->f_readyf[slot] = fas->f_readyb[slot] = NULL;
7986
7987 while (sp != 0) {
7988 /*
7989 * save the forward pointer before calling
7990 * the completion routine
7991 */
7992 nsp = sp->cmd_forw;
7993 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
7994 ASSERT(Tgt(sp) == slot/NLUNS_PER_TARGET);
7995 fas_decrement_ncmds(fas, sp);
7996 fas_call_pkt_comp(fas, sp);
7997 sp = nsp;
7998 }
7999 fas_check_ncmds(fas);
8000 }
8001 }
8002
8003 /*
8004 * cleanup the tag queue
8005 * preserve some order by starting with the oldest tag
8006 */
8007 static void
fas_flush_tagQ(struct fas * fas,int slot)8008 fas_flush_tagQ(struct fas *fas, int slot)
8009 {
8010 ushort_t tag, starttag;
8011 struct fas_cmd *sp;
8012 struct f_slots *tagque = fas->f_active[slot];
8013
8014 if (tagque == NULL) {
8015 return;
8016 }
8017
8018 DPRINTF2("flushing entire tag queue, slot=%x, tcmds=%x\n",
8019 slot, fas->f_tcmds[slot]);
8020
8021 #ifdef FASDEBUG
8022 {
8023 int n = 0;
8024 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
8025 if ((sp = tagque->f_slot[tag]) != 0) {
8026 n++;
8027 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8028 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
8029 if ((sp->cmd_flags & CFLAG_FINISHED) ==
8030 0) {
8031 debug_enter("fas_flush_tagQ");
8032 }
8033 }
8034 }
8035 }
8036 ASSERT(fas->f_tcmds[slot] == n);
8037 }
8038 #endif
8039 tag = starttag = fas->f_active[slot]->f_tags;
8040
8041 do {
8042 if ((sp = tagque->f_slot[tag]) != 0) {
8043 fas_flush_cmd(fas, sp, 0, 0);
8044 }
8045 tag = ((ushort_t)(tag + 1)) %
8046 (ushort_t)fas->f_active[slot]->f_n_slots;
8047 } while (tag != starttag);
8048
8049 ASSERT(fas->f_tcmds[slot] == 0);
8050 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8051 fas_check_ncmds(fas);
8052 }
8053
8054 /*
8055 * cleanup one active command
8056 */
8057 static void
fas_flush_cmd(struct fas * fas,struct fas_cmd * sp,uchar_t reason,uint_t stat)8058 fas_flush_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
8059 uint_t stat)
8060 {
8061 short slot = sp->cmd_slot;
8062
8063 ASSERT(fas->f_ncmds > 0);
8064 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8065 ASSERT(sp == fas->f_active[slot]->f_slot[sp->cmd_tag[1]]);
8066
8067 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
8068 fas_decrement_ncmds(fas, sp);
8069 fas_set_pkt_reason(fas, sp, reason, stat);
8070 fas_call_pkt_comp(fas, sp);
8071
8072 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8073 fas_check_ncmds(fas);
8074 }
8075
8076 /*
8077 * prepare a proxy cmd (a cmd sent on behalf of the target driver,
8078 * usually for error recovery or abort/reset)
8079 */
8080 static void
fas_makeproxy_cmd(struct fas_cmd * sp,struct scsi_address * ap,struct scsi_pkt * pkt,int nmsgs,...)8081 fas_makeproxy_cmd(struct fas_cmd *sp, struct scsi_address *ap,
8082 struct scsi_pkt *pkt, int nmsgs, ...)
8083 {
8084 va_list vap;
8085 int i;
8086
8087 ASSERT(nmsgs <= (CDB_GROUP5 - CDB_GROUP0 - 3));
8088
8089 bzero(sp, sizeof (*sp));
8090 bzero(pkt, scsi_pkt_size());
8091
8092 pkt->pkt_address = *ap;
8093 pkt->pkt_cdbp = (opaque_t)&sp->cmd_cdb[0];
8094 pkt->pkt_scbp = (opaque_t)&sp->cmd_scb;
8095 pkt->pkt_ha_private = (opaque_t)sp;
8096 sp->cmd_pkt = pkt;
8097 sp->cmd_scblen = 1;
8098 sp->cmd_pkt_flags = pkt->pkt_flags = FLAG_NOINTR;
8099 sp->cmd_flags = CFLAG_CMDPROXY;
8100 sp->cmd_cdb[FAS_PROXY_TYPE] = FAS_PROXY_SNDMSG;
8101 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
8102 sp->cmd_cdb[FAS_PROXY_DATA] = (char)nmsgs;
8103
8104 va_start(vap, nmsgs);
8105 for (i = 0; i < nmsgs; i++) {
8106 sp->cmd_cdb[FAS_PROXY_DATA + 1 + i] = (uchar_t)va_arg(vap, int);
8107 }
8108 va_end(vap);
8109 }
8110
8111 /*
8112 * send a proxy cmd and check the result
8113 */
8114 static int
fas_do_proxy_cmd(struct fas * fas,struct fas_cmd * sp,struct scsi_address * ap,char * what)8115 fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
8116 struct scsi_address *ap, char *what)
8117 {
8118 int rval;
8119
8120 IPRINTF3("Sending proxy %s message to %d.%d\n", what,
8121 ap->a_target, ap->a_lun);
8122 if (fas_accept_pkt(fas, sp, TRAN_BUSY_OK) == TRAN_ACCEPT &&
8123 sp->cmd_pkt->pkt_reason == CMD_CMPLT &&
8124 sp->cmd_cdb[FAS_PROXY_RESULT] == TRUE) {
8125 IPRINTF3("Proxy %s succeeded for %d.%d\n", what,
8126 ap->a_target, ap->a_lun);
8127 ASSERT(fas->f_current_sp != sp);
8128 rval = TRUE;
8129 } else {
8130 IPRINTF5(
8131 "Proxy %s failed for %d.%d, result=%x, reason=%x\n", what,
8132 ap->a_target, ap->a_lun, sp->cmd_cdb[FAS_PROXY_RESULT],
8133 sp->cmd_pkt->pkt_reason);
8134 ASSERT(fas->f_current_sp != sp);
8135 rval = FALSE;
8136 }
8137 return (rval);
8138 }
8139
8140 /*
8141 * abort a connected command by sending an abort msg; hold off on
8142 * starting new cmds by setting throttles to HOLD_THROTTLE
8143 */
8144 static int
fas_abort_connected_cmd(struct fas * fas,struct fas_cmd * sp,uchar_t msg)8145 fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t msg)
8146 {
8147 int rval = FALSE;
8148 int flags = sp->cmd_pkt_flags;
8149
8150 /*
8151 * if reset delay active we cannot access the target.
8152 */
8153 if (fas->f_reset_delay[Tgt(sp)]) {
8154 return (rval);
8155 }
8156
8157 /*
8158 * only abort while in data phase; otherwise we mess up msg phase
8159 */
8160 if (!((fas->f_state == ACTS_DATA) ||
8161 (fas->f_state == ACTS_DATA_DONE))) {
8162 return (rval);
8163 }
8164
8165
8166 IPRINTF3("Sending abort message %s to connected %d.%d\n",
8167 scsi_mname(msg), Tgt(sp), Lun(sp));
8168
8169
8170 fas->f_abort_msg_sent = 0;
8171 fas->f_omsglen = 1;
8172 fas->f_cur_msgout[0] = msg;
8173 sp->cmd_pkt_flags |= FLAG_NOINTR;
8174 fas_assert_atn(fas);
8175
8176 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8177
8178 /*
8179 * now check if the msg was taken
8180 * e_abort is set in fas_handle_msg_out_done when the abort
8181 * msg has actually gone out (ie. msg out phase occurred
8182 */
8183 if (fas->f_abort_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8184 IPRINTF2("target %d.%d aborted\n",
8185 Tgt(sp), Lun(sp));
8186 rval = TRUE;
8187 } else {
8188 IPRINTF2("target %d.%d did not abort\n",
8189 Tgt(sp), Lun(sp));
8190 }
8191 sp->cmd_pkt_flags = flags;
8192 fas->f_omsglen = 0;
8193 return (rval);
8194 }
8195
8196 /*
8197 * abort a disconnected command; if it is a tagged command, we need
8198 * to include the tag
8199 */
8200 static int
fas_abort_disconnected_cmd(struct fas * fas,struct scsi_address * ap,struct fas_cmd * sp,uchar_t msg,int slot)8201 fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
8202 struct fas_cmd *sp, uchar_t msg, int slot)
8203 {
8204 auto struct fas_cmd local;
8205 struct fas_cmd *proxy_cmdp = &local;
8206 struct scsi_pkt *pkt;
8207 int rval;
8208 int target = ap->a_target;
8209
8210 /*
8211 * if reset delay is active, we cannot start a selection
8212 * and there shouldn't be a cmd outstanding
8213 */
8214 if (fas->f_reset_delay[target] != 0) {
8215 return (FALSE);
8216 }
8217
8218 if (sp)
8219 ASSERT(sp->cmd_slot == slot);
8220
8221 IPRINTF1("aborting disconnected tagged cmd(s) with %s\n",
8222 scsi_mname(msg));
8223 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8224 if (sp && (TAGGED(target) && (msg == MSG_ABORT_TAG))) {
8225 int tag = sp->cmd_tag[1];
8226 ASSERT(sp == fas->f_active[slot]->f_slot[tag]);
8227 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 3,
8228 MSG_SIMPLE_QTAG, tag, msg);
8229 } else {
8230 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 1, msg);
8231 }
8232
8233 rval = fas_do_proxy_cmd(fas, proxy_cmdp, ap, scsi_mname(msg));
8234 kmem_free(pkt, scsi_pkt_size());
8235 return (rval);
8236 }
8237
8238 /*
8239 * reset handling:
8240 * fas_do_scsi_reset assumes that we have already entered the mutex
8241 */
8242 static int
fas_do_scsi_reset(struct scsi_address * ap,int level)8243 fas_do_scsi_reset(struct scsi_address *ap, int level)
8244 {
8245 int rval = FALSE;
8246 struct fas *fas = ADDR2FAS(ap);
8247 short slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
8248
8249 ASSERT(mutex_owned(FAS_MUTEX(fas)));
8250 IPRINTF3("fas_scsi_reset for slot %x, level=%x, tcmds=%x\n",
8251 slot, level, fas->f_tcmds[slot]);
8252
8253 fas_move_waitQ_to_readyQ(fas);
8254
8255 if (level == RESET_ALL) {
8256 /*
8257 * We know that fas_reset_bus() returns ACTION_RETURN.
8258 */
8259 (void) fas_reset_bus(fas);
8260
8261 /*
8262 * Now call fas_dopoll() to field the reset interrupt
8263 * which will then call fas_reset_recovery which will
8264 * call the completion function for all commands.
8265 */
8266 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8267 /*
8268 * reset fas
8269 */
8270 fas_internal_reset(fas, FAS_RESET_FAS);
8271 (void) fas_reset_bus(fas);
8272 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8273 fas_log(fas,
8274 CE_WARN, "reset scsi bus failed");
8275 New_state(fas, STATE_FREE);
8276 } else {
8277 rval = TRUE;
8278 }
8279 } else {
8280 rval = TRUE;
8281 }
8282
8283 } else {
8284 struct fas_cmd *cur_sp = fas->f_current_sp;
8285 void (*savec)() = NULL;
8286
8287 /*
8288 * prevent new commands from starting
8289 */
8290 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
8291
8292 /*
8293 * zero pkt_comp so it won't complete during the reset and
8294 * we can still update the packet after the reset.
8295 */
8296 if (cur_sp) {
8297 savec = cur_sp->cmd_pkt->pkt_comp;
8298 cur_sp->cmd_pkt->pkt_comp = NULL;
8299 }
8300
8301 /*
8302 * is this a connected cmd but not selecting?
8303 */
8304 if (cur_sp && (fas->f_state != STATE_FREE) &&
8305 (cur_sp->cmd_pkt->pkt_state != 0) &&
8306 (ap->a_target == (Tgt(cur_sp)))) {
8307 rval = fas_reset_connected_cmd(fas, ap);
8308 }
8309
8310 /*
8311 * if not connected or fas_reset_connected_cmd() failed,
8312 * attempt a reset_disconnected_cmd
8313 */
8314 if (rval == FALSE) {
8315 rval = fas_reset_disconnected_cmd(fas, ap);
8316 }
8317
8318 /*
8319 * cleanup if reset was successful
8320 * complete the current sp first.
8321 */
8322 if (cur_sp) {
8323 cur_sp->cmd_pkt->pkt_comp = savec;
8324 if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
8325 if (ap->a_target == (Tgt(cur_sp))) {
8326 fas_set_pkt_reason(fas, cur_sp,
8327 CMD_RESET, STAT_DEV_RESET);
8328 }
8329 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
8330 cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
8331 fas_decrement_ncmds(fas, cur_sp);
8332 fas_call_pkt_comp(fas, cur_sp);
8333 }
8334 }
8335
8336 if (rval == TRUE) {
8337 fas_reset_cleanup(fas, slot);
8338 } else {
8339 IPRINTF1("fas_scsi_reset failed for slot %x\n", slot);
8340
8341 /*
8342 * restore throttles to max throttle, regardless
8343 * of what it was (fas_set_throttles() will deal
8344 * with reset delay active)
8345 * restoring to the old throttle is not
8346 * a such a good idea
8347 */
8348 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
8349
8350 }
8351
8352 if (fas->f_state == STATE_FREE) {
8353 (void) fas_ustart(fas);
8354 }
8355 }
8356 exit:
8357 ASSERT(mutex_owned(FAS_MUTEX(fas)));
8358 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8359
8360 #ifdef FASDEBUG
8361 if (rval && fas_test_stop) {
8362 debug_enter("reset succeeded");
8363 }
8364 #endif
8365 return (rval);
8366 }
8367
8368 /*
8369 * reset delay is handled by a separate watchdog; this ensures that
8370 * regardless of fas_scsi_watchdog_tick, the reset delay will not change
8371 */
8372 static void
fas_start_watch_reset_delay(struct fas * fas)8373 fas_start_watch_reset_delay(struct fas *fas)
8374 {
8375 mutex_enter(&fas_global_mutex);
8376 if ((fas_reset_watch == 0) && FAS_CAN_SCHED) {
8377 fas_reset_watch = timeout(fas_watch_reset_delay, NULL,
8378 drv_usectohz((clock_t)FAS_WATCH_RESET_DELAY_TICK * 1000));
8379 }
8380 ASSERT((fas_reset_watch != 0) || (fas->f_flags & FAS_FLG_NOTIMEOUTS));
8381 mutex_exit(&fas_global_mutex);
8382 }
8383
8384 /*
8385 * set throttles to HOLD and set reset_delay for all target/luns
8386 */
8387 static void
fas_setup_reset_delay(struct fas * fas)8388 fas_setup_reset_delay(struct fas *fas)
8389 {
8390 if (!ddi_in_panic()) {
8391 int i;
8392
8393 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
8394 for (i = 0; i < NTARGETS_WIDE; i++) {
8395 fas->f_reset_delay[i] = fas->f_scsi_reset_delay;
8396 }
8397 fas_start_watch_reset_delay(fas);
8398 } else {
8399 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8400 }
8401 }
8402
8403 /*
8404 * fas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8405 * fas instance for active reset delays
8406 */
8407 /*ARGSUSED*/
8408 static void
fas_watch_reset_delay(void * arg)8409 fas_watch_reset_delay(void *arg)
8410 {
8411 struct fas *fas;
8412 struct fas *lfas; /* last not_done fas */
8413 int not_done = 0;
8414
8415 mutex_enter(&fas_global_mutex);
8416 fas_reset_watch = 0;
8417 mutex_exit(&fas_global_mutex);
8418
8419 rw_enter(&fas_global_rwlock, RW_READER);
8420 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
8421 if (fas->f_tran == 0) {
8422 continue;
8423 }
8424 mutex_enter(FAS_MUTEX(fas));
8425 not_done += fas_watch_reset_delay_subr(fas);
8426 lfas = fas;
8427 fas_check_waitQ_and_mutex_exit(fas);
8428 }
8429 rw_exit(&fas_global_rwlock);
8430 if (not_done) {
8431 ASSERT(lfas != NULL);
8432 fas_start_watch_reset_delay(lfas);
8433 }
8434 }
8435
8436 static int
fas_watch_reset_delay_subr(struct fas * fas)8437 fas_watch_reset_delay_subr(struct fas *fas)
8438 {
8439 short slot, s;
8440 int start_slot = -1;
8441 int done = 0;
8442
8443 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
8444
8445 /*
8446 * check if a reset delay is active; if so back to full throttle
8447 * which will unleash the cmds in the ready Q
8448 */
8449 s = slot/NLUNS_PER_TARGET;
8450 if (fas->f_reset_delay[s] != 0) {
8451 EPRINTF2("target%d: reset delay=%d\n", s,
8452 fas->f_reset_delay[s]);
8453 fas->f_reset_delay[s] -= FAS_WATCH_RESET_DELAY_TICK;
8454 if (fas->f_reset_delay[s] <= 0) {
8455 /*
8456 * clear throttle for all luns on this target
8457 */
8458 fas->f_reset_delay[s] = 0;
8459 fas_set_all_lun_throttles(fas,
8460 slot, MAX_THROTTLE);
8461 IPRINTF1("reset delay completed, slot=%x\n",
8462 slot);
8463 if (start_slot == -1) {
8464 start_slot = slot;
8465 }
8466 } else {
8467 done = -1;
8468 }
8469 }
8470 }
8471
8472 /*
8473 * start a cmd if a reset delay expired
8474 */
8475 if (start_slot != -1 && fas->f_state == STATE_FREE) {
8476 (void) fas_ustart(fas);
8477 }
8478 return (done);
8479 }
8480
8481 /*
8482 * cleanup after a device reset. this affects all target's luns
8483 */
8484 static void
fas_reset_cleanup(struct fas * fas,int slot)8485 fas_reset_cleanup(struct fas *fas, int slot)
8486 {
8487 /*
8488 * reset msg has been accepted, now cleanup queues;
8489 * for all luns of this target
8490 */
8491 int i, start, end;
8492 int target = slot/NLUNS_PER_TARGET;
8493
8494 start = slot & ~(NLUNS_PER_TARGET-1);
8495 end = start + NLUNS_PER_TARGET;
8496 IPRINTF4("fas_reset_cleanup: slot %x, start=%x, end=%x, tcmds=%x\n",
8497 slot, start, end, fas->f_tcmds[slot]);
8498
8499 ASSERT(!(fas->f_current_sp &&
8500 (fas->f_current_sp->cmd_slot == slot) &&
8501 (fas->f_state & STATE_SELECTING)));
8502
8503 /*
8504 * if we are not in panic set up a reset delay for this target,
8505 * a zero throttle forces all new requests into the ready Q
8506 */
8507 if (!ddi_in_panic()) {
8508 fas_set_all_lun_throttles(fas, start, HOLD_THROTTLE);
8509 fas->f_reset_delay[target] = fas->f_scsi_reset_delay;
8510 fas_start_watch_reset_delay(fas);
8511 } else {
8512 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8513 }
8514
8515 for (i = start; i < end; i++) {
8516 fas_mark_packets(fas, i, CMD_RESET, STAT_DEV_RESET);
8517 fas_flush_tagQ(fas, i);
8518 fas_flush_readyQ(fas, i);
8519 if (fas->f_arq_pkt[i]) {
8520 struct fas_cmd *sp = fas->f_arq_pkt[i];
8521 struct arq_private_data *arq_data =
8522 (struct arq_private_data *)
8523 (sp->cmd_pkt->pkt_private);
8524 if (sp->cmd_pkt->pkt_comp) {
8525 ASSERT(arq_data->arq_save_sp == NULL);
8526 }
8527 }
8528 ASSERT(fas->f_tcmds[i] == 0);
8529 }
8530 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8531
8532 fas_force_renegotiation(fas, target);
8533 }
8534
8535 /*
8536 * reset a currently disconnected target
8537 */
8538 static int
fas_reset_disconnected_cmd(struct fas * fas,struct scsi_address * ap)8539 fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap)
8540 {
8541 auto struct fas_cmd local;
8542 struct fas_cmd *sp = &local;
8543 struct scsi_pkt *pkt;
8544 int rval;
8545
8546 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8547 fas_makeproxy_cmd(sp, ap, pkt, 1, MSG_DEVICE_RESET);
8548 rval = fas_do_proxy_cmd(fas, sp, ap, scsi_mname(MSG_DEVICE_RESET));
8549 kmem_free(pkt, scsi_pkt_size());
8550 return (rval);
8551 }
8552
8553 /*
8554 * reset a target with a currently connected command
8555 * Assert ATN and send MSG_DEVICE_RESET, zero throttles temporarily
8556 * to prevent new cmds from starting regardless of the outcome
8557 */
8558 static int
fas_reset_connected_cmd(struct fas * fas,struct scsi_address * ap)8559 fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap)
8560 {
8561 int rval = FALSE;
8562 struct fas_cmd *sp = fas->f_current_sp;
8563 int flags = sp->cmd_pkt_flags;
8564
8565 /*
8566 * only attempt to reset in data phase; during other phases
8567 * asserting ATN may just cause confusion
8568 */
8569 if (!((fas->f_state == ACTS_DATA) ||
8570 (fas->f_state == ACTS_DATA_DONE))) {
8571 return (rval);
8572 }
8573
8574 IPRINTF2("Sending reset message to connected %d.%d\n",
8575 ap->a_target, ap->a_lun);
8576 fas->f_reset_msg_sent = 0;
8577 fas->f_omsglen = 1;
8578 fas->f_cur_msgout[0] = MSG_DEVICE_RESET;
8579 sp->cmd_pkt_flags |= FLAG_NOINTR;
8580
8581 fas_assert_atn(fas);
8582
8583 /*
8584 * poll for interrupts until bus free
8585 */
8586 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8587
8588 /*
8589 * now check if the msg was taken
8590 * f_reset is set in fas_handle_msg_out_done when
8591 * msg has actually gone out (ie. msg out phase occurred)
8592 */
8593 if (fas->f_reset_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8594 IPRINTF2("target %d.%d reset\n", ap->a_target, ap->a_lun);
8595 rval = TRUE;
8596 } else {
8597 IPRINTF2("target %d.%d did not reset\n",
8598 ap->a_target, ap->a_lun);
8599 }
8600 sp->cmd_pkt_flags = flags;
8601 fas->f_omsglen = 0;
8602
8603 return (rval);
8604 }
8605
8606 /*
8607 * reset the scsi bus to blow all commands away
8608 */
8609 static int
fas_reset_bus(struct fas * fas)8610 fas_reset_bus(struct fas *fas)
8611 {
8612 IPRINTF("fas_reset_bus:\n");
8613 New_state(fas, ACTS_RESET);
8614
8615 fas_internal_reset(fas, FAS_RESET_SCSIBUS);
8616
8617 /*
8618 * Now that we've reset the SCSI bus, we'll take a SCSI RESET
8619 * interrupt and use that to clean up the state of things.
8620 */
8621 return (ACTION_RETURN);
8622 }
8623
8624 /*
8625 * fas_reset_recovery is called on the reset interrupt and cleans
8626 * up all cmds (active or waiting)
8627 */
8628 static int
fas_reset_recovery(struct fas * fas)8629 fas_reset_recovery(struct fas *fas)
8630 {
8631 short slot, start_slot;
8632 int i;
8633 int rval = ACTION_SEARCH;
8634 int max_loop = 0;
8635
8636 IPRINTF("fas_reset_recovery:\n");
8637 fas_check_ncmds(fas);
8638
8639 /*
8640 * renegotiate wide and sync for all targets
8641 */
8642 fas->f_sync_known = fas->f_wide_known = 0;
8643
8644 /*
8645 * reset dma engine
8646 */
8647 FAS_FLUSH_DMA_HARD(fas);
8648
8649 /*
8650 * set throttles and reset delay
8651 */
8652 fas_setup_reset_delay(fas);
8653
8654 /*
8655 * clear interrupts until they go away
8656 */
8657 while (INTPENDING(fas) && (max_loop < FAS_RESET_SPIN_MAX_LOOP)) {
8658 volatile struct fasreg *fasreg = fas->f_reg;
8659 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
8660 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
8661 fas->f_step = fas_reg_read(fas, &fasreg->fas_step);
8662 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
8663 drv_usecwait(FAS_RESET_SPIN_DELAY_USEC);
8664 max_loop++;
8665 }
8666
8667 if (max_loop >= FAS_RESET_SPIN_MAX_LOOP) {
8668 fas_log(fas, CE_WARN, "Resetting SCSI bus failed");
8669 }
8670
8671 fas_reg_cmd_write(fas, CMD_FLUSH);
8672
8673 /*
8674 * reset the chip, this shouldn't be necessary but sometimes
8675 * we get a hang in the next data in phase
8676 */
8677 fas_internal_reset(fas, FAS_RESET_FAS);
8678
8679 /*
8680 * reset was expected? if not, it must be external bus reset
8681 */
8682 if (fas->f_state != ACTS_RESET) {
8683 if (fas->f_ncmds) {
8684 fas_log(fas, CE_WARN, "external SCSI bus reset");
8685 }
8686 }
8687
8688 if (fas->f_ncmds == 0) {
8689 rval = ACTION_RETURN;
8690 goto done;
8691 }
8692
8693 /*
8694 * completely reset the state of the softc data.
8695 */
8696 fas_internal_reset(fas, FAS_RESET_SOFTC);
8697
8698 /*
8699 * Hold the state of the host adapter open
8700 */
8701 New_state(fas, ACTS_FROZEN);
8702
8703 /*
8704 * for right now just claim that all
8705 * commands have been destroyed by a SCSI reset
8706 * and let already set reason fields or callers
8707 * decide otherwise for specific commands.
8708 */
8709 start_slot = fas->f_next_slot;
8710 slot = start_slot;
8711 do {
8712 fas_check_ncmds(fas);
8713 fas_mark_packets(fas, slot, CMD_RESET, STAT_BUS_RESET);
8714 fas_flush_tagQ(fas, slot);
8715 fas_flush_readyQ(fas, slot);
8716 if (fas->f_arq_pkt[slot]) {
8717 struct fas_cmd *sp = fas->f_arq_pkt[slot];
8718 struct arq_private_data *arq_data =
8719 (struct arq_private_data *)
8720 (sp->cmd_pkt->pkt_private);
8721 if (sp->cmd_pkt->pkt_comp) {
8722 ASSERT(arq_data->arq_save_sp == NULL);
8723 }
8724 }
8725 slot = NEXTSLOT(slot, fas->f_dslot);
8726 } while (slot != start_slot);
8727
8728 fas_check_ncmds(fas);
8729
8730 /*
8731 * reset timeouts
8732 */
8733 for (i = 0; i < N_SLOTS; i++) {
8734 if (fas->f_active[i]) {
8735 fas->f_active[i]->f_timebase = 0;
8736 fas->f_active[i]->f_timeout = 0;
8737 fas->f_active[i]->f_dups = 0;
8738 }
8739 }
8740
8741 done:
8742 /*
8743 * Move the state back to free...
8744 */
8745 New_state(fas, STATE_FREE);
8746 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8747
8748 /*
8749 * perform the reset notification callbacks that are registered.
8750 */
8751 (void) scsi_hba_reset_notify_callback(&fas->f_mutex,
8752 &fas->f_reset_notify_listf);
8753
8754 /*
8755 * if reset delay is still active a search is meaningless
8756 * but do it anyway
8757 */
8758 return (rval);
8759 }
8760
8761 /*
8762 * hba_tran ops for quiesce and unquiesce
8763 */
8764 static int
fas_scsi_quiesce(dev_info_t * dip)8765 fas_scsi_quiesce(dev_info_t *dip)
8766 {
8767 struct fas *fas;
8768 scsi_hba_tran_t *tran;
8769
8770 tran = ddi_get_driver_private(dip);
8771 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8772 return (-1);
8773 }
8774
8775 return (fas_quiesce_bus(fas));
8776 }
8777
8778 static int
fas_scsi_unquiesce(dev_info_t * dip)8779 fas_scsi_unquiesce(dev_info_t *dip)
8780 {
8781 struct fas *fas;
8782 scsi_hba_tran_t *tran;
8783
8784 tran = ddi_get_driver_private(dip);
8785 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8786 return (-1);
8787 }
8788
8789 return (fas_unquiesce_bus(fas));
8790 }
8791
8792 #ifdef FAS_TEST
8793 /*
8794 * torture test functions
8795 */
8796 static void
fas_test_reset(struct fas * fas,int slot)8797 fas_test_reset(struct fas *fas, int slot)
8798 {
8799 struct scsi_address ap;
8800 char target = slot/NLUNS_PER_TARGET;
8801
8802 if (fas_rtest & (1 << target)) {
8803 ap.a_hba_tran = fas->f_tran;
8804 ap.a_target = target;
8805 ap.a_lun = 0;
8806 if ((fas_rtest_type == 1) &&
8807 (fas->f_state == ACTS_DATA_DONE)) {
8808 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8809 fas_rtest = 0;
8810 }
8811 } else if ((fas_rtest_type == 2) &&
8812 (fas->f_state == ACTS_DATA_DONE)) {
8813 if (fas_do_scsi_reset(&ap, RESET_ALL)) {
8814 fas_rtest = 0;
8815 }
8816 } else {
8817 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8818 fas_rtest = 0;
8819 }
8820 }
8821 }
8822 }
8823
8824 static void
fas_test_abort(struct fas * fas,int slot)8825 fas_test_abort(struct fas *fas, int slot)
8826 {
8827 struct fas_cmd *sp = fas->f_current_sp;
8828 struct scsi_address ap;
8829 char target = slot/NLUNS_PER_TARGET;
8830 struct scsi_pkt *pkt = NULL;
8831
8832 if (fas_atest & (1 << target)) {
8833 ap.a_hba_tran = fas->f_tran;
8834 ap.a_target = target;
8835 ap.a_lun = 0;
8836
8837 if ((fas_atest_disc == 0) && sp &&
8838 (sp->cmd_slot == slot) &&
8839 ((sp->cmd_flags & CFLAG_CMDDISC) == 0)) {
8840 pkt = sp->cmd_pkt;
8841 } else if ((fas_atest_disc == 1) && NOTAG(target)) {
8842 sp = fas->f_active[slot]->f_slot[0];
8843 if (sp && (sp->cmd_flags & CFLAG_CMDDISC)) {
8844 pkt = sp->cmd_pkt;
8845 }
8846 } else if ((fas_atest_disc == 1) && (sp == 0) &&
8847 TAGGED(target) &&
8848 (fas->f_tcmds[slot] != 0)) {
8849 int tag;
8850 /*
8851 * find the oldest tag
8852 */
8853 for (tag = NTAGS-1; tag >= 0; tag--) {
8854 if ((sp = fas->f_active[slot]->f_slot[tag])
8855 != 0)
8856 break;
8857 }
8858 if (sp) {
8859 pkt = sp->cmd_pkt;
8860 ASSERT(sp->cmd_slot == slot);
8861 } else {
8862 return;
8863 }
8864 } else if (fas_atest_disc == 2 && (sp == 0) &&
8865 (fas->f_tcmds[slot] != 0)) {
8866 pkt = NULL;
8867 } else if (fas_atest_disc == 2 && NOTAG(target)) {
8868 pkt = NULL;
8869 } else if (fas_atest_disc == 3 && fas->f_readyf[slot]) {
8870 pkt = fas->f_readyf[slot]->cmd_pkt;
8871 } else if (fas_atest_disc == 4 &&
8872 fas->f_readyf[slot] && fas->f_readyf[slot]->cmd_forw) {
8873 pkt = fas->f_readyf[slot]->cmd_forw->cmd_pkt;
8874 } else if (fas_atest_disc == 5 && fas->f_readyb[slot]) {
8875 pkt = fas->f_readyb[slot]->cmd_pkt;
8876 } else if ((fas_atest_disc == 6) && sp &&
8877 (sp->cmd_slot == slot) &&
8878 (fas->f_state == ACTS_DATA_DONE)) {
8879 pkt = sp->cmd_pkt;
8880 } else if (fas_atest_disc == 7) {
8881 if (fas_do_scsi_abort(&ap, NULL)) {
8882 if (fas_do_scsi_abort(&ap, NULL)) {
8883 if (fas_do_scsi_reset(&ap,
8884 RESET_TARGET)) {
8885 fas_atest = 0;
8886 }
8887 }
8888 }
8889 return;
8890 } else {
8891 return;
8892 }
8893
8894 fas_log(fas, CE_NOTE, "aborting pkt=0x%p state=%x\n",
8895 (void *)pkt, (pkt != NULL? pkt->pkt_state : 0));
8896 if (fas_do_scsi_abort(&ap, pkt)) {
8897 fas_atest = 0;
8898 }
8899 }
8900 }
8901 #endif /* FAS_TEST */
8902
8903 /*
8904 * capability interface
8905 */
8906 static int
fas_commoncap(struct scsi_address * ap,char * cap,int val,int tgtonly,int doset)8907 fas_commoncap(struct scsi_address *ap, char *cap, int val,
8908 int tgtonly, int doset)
8909 {
8910 struct fas *fas = ADDR2FAS(ap);
8911 int cidx;
8912 int target = ap->a_target;
8913 ushort_t tshift = (1<<target);
8914 ushort_t ntshift = ~tshift;
8915 int rval = FALSE;
8916
8917 mutex_enter(FAS_MUTEX(fas));
8918
8919 if (cap == (char *)0) {
8920 goto exit;
8921 }
8922
8923 cidx = scsi_hba_lookup_capstr(cap);
8924 if (cidx == -1) {
8925 rval = UNDEFINED;
8926 } else if (doset) {
8927 /*
8928 * we usually don't allow setting capabilities for
8929 * other targets!
8930 */
8931 if (!tgtonly) {
8932 goto exit;
8933 }
8934 switch (cidx) {
8935 case SCSI_CAP_DMA_MAX:
8936 case SCSI_CAP_MSG_OUT:
8937 case SCSI_CAP_PARITY:
8938 case SCSI_CAP_INITIATOR_ID:
8939 case SCSI_CAP_LINKED_CMDS:
8940 case SCSI_CAP_UNTAGGED_QING:
8941 case SCSI_CAP_RESET_NOTIFICATION:
8942 /*
8943 * None of these are settable via
8944 * the capability interface.
8945 */
8946 break;
8947
8948 case SCSI_CAP_DISCONNECT:
8949 if (val)
8950 fas->f_target_scsi_options[ap->a_target] |=
8951 SCSI_OPTIONS_DR;
8952 else
8953 fas->f_target_scsi_options[ap->a_target] &=
8954 ~SCSI_OPTIONS_DR;
8955
8956 break;
8957
8958 case SCSI_CAP_SYNCHRONOUS:
8959 if (val) {
8960 fas->f_force_async &= ~tshift;
8961 } else {
8962 fas->f_force_async |= tshift;
8963 }
8964 fas_force_renegotiation(fas, target);
8965 rval = TRUE;
8966 break;
8967
8968 case SCSI_CAP_TAGGED_QING:
8969 {
8970 int slot = target * NLUNS_PER_TARGET | ap->a_lun;
8971 ushort_t old_notag = fas->f_notag;
8972
8973 /* do not allow with active tgt */
8974 if (fas->f_tcmds[slot]) {
8975 break;
8976 }
8977
8978 slot = target * NLUNS_PER_TARGET | ap->a_lun;
8979
8980 if (val) {
8981 if (fas->f_target_scsi_options[target] &
8982 SCSI_OPTIONS_TAG) {
8983 IPRINTF1("target %d: TQ enabled\n",
8984 target);
8985 fas->f_notag &= ntshift;
8986 } else {
8987 break;
8988 }
8989 } else {
8990 IPRINTF1("target %d: TQ disabled\n",
8991 target);
8992 fas->f_notag |= tshift;
8993 }
8994
8995 if (val && fas_alloc_active_slots(fas, slot,
8996 KM_NOSLEEP)) {
8997 fas->f_notag = old_notag;
8998 break;
8999 }
9000
9001 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
9002
9003 fas_update_props(fas, target);
9004 rval = TRUE;
9005 break;
9006 }
9007
9008 case SCSI_CAP_WIDE_XFER:
9009 if (val) {
9010 if (fas->f_target_scsi_options[target] &
9011 SCSI_OPTIONS_WIDE) {
9012 fas->f_nowide &= ntshift;
9013 fas->f_force_narrow &= ~tshift;
9014 } else {
9015 break;
9016 }
9017 } else {
9018 fas->f_force_narrow |= tshift;
9019 }
9020 fas_force_renegotiation(fas, target);
9021 rval = TRUE;
9022 break;
9023
9024 case SCSI_CAP_ARQ:
9025 if (val) {
9026 if (fas_create_arq_pkt(fas, ap)) {
9027 break;
9028 }
9029 } else {
9030 if (fas_delete_arq_pkt(fas, ap)) {
9031 break;
9032 }
9033 }
9034 rval = TRUE;
9035 break;
9036
9037 case SCSI_CAP_QFULL_RETRIES:
9038 fas->f_qfull_retries[target] = (uchar_t)val;
9039 rval = TRUE;
9040 break;
9041
9042 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9043 fas->f_qfull_retry_interval[target] =
9044 drv_usectohz(val * 1000);
9045 rval = TRUE;
9046 break;
9047
9048 default:
9049 rval = UNDEFINED;
9050 break;
9051 }
9052
9053 } else if (doset == 0) {
9054 int slot = target * NLUNS_PER_TARGET | ap->a_lun;
9055
9056 switch (cidx) {
9057 case SCSI_CAP_DMA_MAX:
9058 /* very high limit because of multiple dma windows */
9059 rval = 1<<30;
9060 break;
9061 case SCSI_CAP_MSG_OUT:
9062 rval = TRUE;
9063 break;
9064 case SCSI_CAP_DISCONNECT:
9065 if (tgtonly &&
9066 (fas->f_target_scsi_options[target] &
9067 SCSI_OPTIONS_DR)) {
9068 rval = TRUE;
9069 }
9070 break;
9071 case SCSI_CAP_SYNCHRONOUS:
9072 if (tgtonly && fas->f_offset[target]) {
9073 rval = TRUE;
9074 }
9075 break;
9076 case SCSI_CAP_PARITY:
9077 rval = TRUE;
9078 break;
9079 case SCSI_CAP_INITIATOR_ID:
9080 rval = MY_ID(fas);
9081 break;
9082 case SCSI_CAP_TAGGED_QING:
9083 if (tgtonly && ((fas->f_notag & tshift) == 0)) {
9084 rval = TRUE;
9085 }
9086 break;
9087 case SCSI_CAP_WIDE_XFER:
9088 if ((tgtonly && (fas->f_nowide & tshift) == 0)) {
9089 rval = TRUE;
9090 }
9091 break;
9092 case SCSI_CAP_UNTAGGED_QING:
9093 rval = TRUE;
9094 break;
9095 case SCSI_CAP_ARQ:
9096 if (tgtonly && fas->f_arq_pkt[slot]) {
9097 rval = TRUE;
9098 }
9099 break;
9100 case SCSI_CAP_LINKED_CMDS:
9101 break;
9102 case SCSI_CAP_RESET_NOTIFICATION:
9103 rval = TRUE;
9104 break;
9105 case SCSI_CAP_QFULL_RETRIES:
9106 rval = fas->f_qfull_retries[target];
9107 break;
9108 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9109 rval = drv_hztousec(
9110 fas->f_qfull_retry_interval[target]) /
9111 1000;
9112 break;
9113
9114 default:
9115 rval = UNDEFINED;
9116 break;
9117 }
9118 }
9119 exit:
9120 if (val && tgtonly) {
9121 fas_update_props(fas, target);
9122 }
9123 fas_check_waitQ_and_mutex_exit(fas);
9124
9125 if (doset) {
9126 IPRINTF6(
9127 "fas_commoncap:tgt=%x,cap=%s,tgtonly=%x,doset=%x,val=%x,rval=%x\n",
9128 target, cap, tgtonly, doset, val, rval);
9129 }
9130 return (rval);
9131 }
9132
9133 /*
9134 * property management
9135 * fas_update_props:
9136 * create/update sync/wide/TQ/scsi-options properties for this target
9137 */
9138 static void
fas_update_props(struct fas * fas,int tgt)9139 fas_update_props(struct fas *fas, int tgt)
9140 {
9141 char property[32];
9142 uint_t xfer_speed = 0;
9143 uint_t xfer_rate = 0;
9144 int wide_enabled, tq_enabled;
9145 uint_t regval = fas->f_sync_period[tgt];
9146 int offset = fas->f_offset[tgt];
9147
9148 wide_enabled = ((fas->f_nowide & (1<<tgt)) == 0);
9149 if (offset && regval) {
9150 xfer_speed =
9151 FAS_SYNC_KBPS((regval * fas->f_clock_cycle) / 1000);
9152 xfer_rate = ((wide_enabled)? 2 : 1) * xfer_speed;
9153 }
9154 (void) sprintf(property, "target%x-sync-speed", tgt);
9155 fas_update_this_prop(fas, property, xfer_rate);
9156
9157 (void) sprintf(property, "target%x-wide", tgt);
9158 fas_update_this_prop(fas, property, wide_enabled);
9159
9160 (void) sprintf(property, "target%x-TQ", tgt);
9161 tq_enabled = ((fas->f_notag & (1<<tgt))? 0 : 1);
9162 fas_update_this_prop(fas, property, tq_enabled);
9163
9164 }
9165
9166 static void
fas_update_this_prop(struct fas * fas,char * property,int value)9167 fas_update_this_prop(struct fas *fas, char *property, int value)
9168 {
9169 dev_info_t *dip = fas->f_dev;
9170
9171 IPRINTF2("update prop: %s value=%x\n", property, value);
9172 ASSERT(mutex_owned(FAS_MUTEX(fas)));
9173 /*
9174 * We cannot hold any mutex at this point because the call to
9175 * ddi_prop_update_int() may block.
9176 */
9177 mutex_exit(FAS_MUTEX(fas));
9178 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
9179 property, value) != DDI_PROP_SUCCESS) {
9180 IPRINTF1("cannot modify/create %s property\n", property);
9181 }
9182 mutex_enter(FAS_MUTEX(fas));
9183 }
9184
9185 /*
9186 * allocate active slots array, size is dependent on whether tagQ enabled
9187 */
9188 static int
fas_alloc_active_slots(struct fas * fas,int slot,int flag)9189 fas_alloc_active_slots(struct fas *fas, int slot, int flag)
9190 {
9191 int target = slot / NLUNS_PER_TARGET;
9192 struct f_slots *old_active = fas->f_active[slot];
9193 struct f_slots *new_active;
9194 ushort_t size;
9195 int rval = -1;
9196
9197 if (fas->f_tcmds[slot]) {
9198 IPRINTF("cannot change size of active slots array\n");
9199 return (rval);
9200 }
9201
9202 size = ((NOTAG(target)) ? FAS_F_SLOT_SIZE : FAS_F_SLOTS_SIZE_TQ);
9203 EPRINTF4(
9204 "fas_alloc_active_slots: target=%x size=%x, old=0x%p, oldsize=%x\n",
9205 target, size, (void *)old_active,
9206 ((old_active == NULL) ? -1 : old_active->f_size));
9207
9208 new_active = kmem_zalloc(size, flag);
9209 if (new_active == NULL) {
9210 IPRINTF("new active alloc failed\n");
9211 } else {
9212 fas->f_active[slot] = new_active;
9213 fas->f_active[slot]->f_n_slots = (NOTAG(target) ? 1 : NTAGS);
9214 fas->f_active[slot]->f_size = size;
9215 /*
9216 * reserve tag 0 for non-tagged cmds to tagged targets
9217 */
9218 if (TAGGED(target)) {
9219 fas->f_active[slot]->f_tags = 1;
9220 }
9221 if (old_active) {
9222 kmem_free((caddr_t)old_active, old_active->f_size);
9223 }
9224 rval = 0;
9225 }
9226 return (rval);
9227 }
9228
9229 /*
9230 * Error logging, printing, and debug print routines
9231 */
9232 static char *fas_label = "fas";
9233
9234 /*PRINTFLIKE3*/
9235 static void
fas_log(struct fas * fas,int level,const char * fmt,...)9236 fas_log(struct fas *fas, int level, const char *fmt, ...)
9237 {
9238 dev_info_t *dev;
9239 va_list ap;
9240
9241 if (fas) {
9242 dev = fas->f_dev;
9243 } else {
9244 dev = 0;
9245 }
9246
9247 mutex_enter(&fas_log_mutex);
9248
9249 va_start(ap, fmt);
9250 (void) vsprintf(fas_log_buf, fmt, ap);
9251 va_end(ap);
9252
9253 if (level == CE_CONT) {
9254 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9255 } else {
9256 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9257 }
9258
9259 mutex_exit(&fas_log_mutex);
9260 }
9261
9262 /*PRINTFLIKE2*/
9263 static void
fas_printf(struct fas * fas,const char * fmt,...)9264 fas_printf(struct fas *fas, const char *fmt, ...)
9265 {
9266 dev_info_t *dev = 0;
9267 va_list ap;
9268 int level = CE_CONT;
9269
9270 mutex_enter(&fas_log_mutex);
9271
9272 va_start(ap, fmt);
9273 (void) vsprintf(fas_log_buf, fmt, ap);
9274 va_end(ap);
9275
9276 if (fas) {
9277 dev = fas->f_dev;
9278 level = CE_NOTE;
9279 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9280 } else {
9281 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9282 }
9283
9284 mutex_exit(&fas_log_mutex);
9285 }
9286
9287 #ifdef FASDEBUG
9288 /*PRINTFLIKE2*/
9289 void
fas_dprintf(struct fas * fas,const char * fmt,...)9290 fas_dprintf(struct fas *fas, const char *fmt, ...)
9291 {
9292 dev_info_t *dev = 0;
9293 va_list ap;
9294
9295 if (fas) {
9296 dev = fas->f_dev;
9297 }
9298
9299 mutex_enter(&fas_log_mutex);
9300
9301 va_start(ap, fmt);
9302 (void) vsprintf(fas_log_buf, fmt, ap);
9303 va_end(ap);
9304
9305 scsi_log(dev, fas_label, SCSI_DEBUG, "%s", fas_log_buf);
9306
9307 mutex_exit(&fas_log_mutex);
9308 }
9309 #endif
9310
9311
9312 static void
fas_printstate(struct fas * fas,char * msg)9313 fas_printstate(struct fas *fas, char *msg)
9314 {
9315 volatile struct fasreg *fasreg = fas->f_reg;
9316 volatile struct dma *dmar = fas->f_dma;
9317 uint_t csr = fas_dma_reg_read(fas, &dmar->dma_csr);
9318 uint_t count = fas_dma_reg_read(fas, &dmar->dma_count);
9319 uint_t addr = fas_dma_reg_read(fas, &dmar->dma_addr);
9320 uint_t test = fas_dma_reg_read(fas, &dmar->dma_test);
9321 uint_t fas_cnt;
9322
9323 fas_log(fas, CE_WARN, "%s: current fas state:", msg);
9324 fas_printf(NULL, "Latched stat=0x%b intr=0x%b",
9325 fas->f_stat, FAS_STAT_BITS, fas->f_intr, FAS_INT_BITS);
9326 fas_printf(NULL, "last msgout: %s, last msgin: %s",
9327 scsi_mname(fas->f_last_msgout), scsi_mname(fas->f_last_msgin));
9328 fas_printf(NULL, "DMA csr=0x%b", csr, dma_bits);
9329 fas_printf(NULL,
9330 "addr=%x dmacnt=%x test=%x last=%x last_cnt=%x",
9331 addr, count, test, fas->f_lastdma, fas->f_lastcount);
9332
9333 GET_FAS_COUNT(fasreg, fas_cnt);
9334 fas_printf(NULL, "fas state:");
9335 fas_printf(NULL, "\tcount(32)=%x cmd=%x stat=%x stat2=%x intr=%x",
9336 fas_cnt, fasreg->fas_cmd, fasreg->fas_stat, fasreg->fas_stat2,
9337 fasreg->fas_intr);
9338 fas_printf(NULL,
9339 "\tstep=%x fifoflag=%x conf=%x test=%x conf2=%x conf3=%x",
9340 fasreg->fas_step, fasreg->fas_fifo_flag, fasreg->fas_conf,
9341 fasreg->fas_test, fasreg->fas_conf2, fasreg->fas_conf3);
9342
9343 if (fas->f_current_sp) {
9344 fas_dump_cmd(fas, fas->f_current_sp);
9345 }
9346 }
9347
9348 /*
9349 * dump all we know about a cmd
9350 */
9351 static void
fas_dump_cmd(struct fas * fas,struct fas_cmd * sp)9352 fas_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9353 {
9354 int i;
9355 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9356 auto char buf[128];
9357
9358 buf[0] = '\0';
9359 fas_printf(NULL, "Cmd dump for Target %d Lun %d:",
9360 Tgt(sp), Lun(sp));
9361 (void) sprintf(&buf[0], " cdb=[");
9362 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9363 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9364 }
9365 (void) sprintf(&buf[strlen(buf)], " ]");
9366 fas_printf(NULL, buf);
9367 fas_printf(NULL, "State=%s Last State=%s",
9368 fas_state_name(fas->f_state), fas_state_name(fas->f_laststate));
9369 fas_printf(NULL,
9370 "pkt_state=0x%b pkt_flags=0x%x pkt_statistics=0x%x",
9371 sp->cmd_pkt->pkt_state, scsi_state_bits, sp->cmd_pkt_flags,
9372 sp->cmd_pkt->pkt_statistics);
9373 if (sp->cmd_pkt->pkt_state & STATE_GOT_STATUS) {
9374 fas_printf(NULL, "Status=0x%x\n", sp->cmd_pkt->pkt_scbp[0]);
9375 }
9376 }
9377
9378 /*ARGSUSED*/
9379 static void
fas_short_dump_cmd(struct fas * fas,struct fas_cmd * sp)9380 fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9381 {
9382 int i;
9383 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9384 auto char buf[128];
9385
9386 buf[0] = '\0';
9387 (void) sprintf(&buf[0], "?%d.%d: cdb=[", Tgt(sp), Lun(sp));
9388 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9389 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9390 }
9391 (void) sprintf(&buf[strlen(buf)], " ]");
9392 fas_printf(NULL, buf);
9393 }
9394
9395 /*
9396 * state decoding for error messages
9397 */
9398 static char *
fas_state_name(ushort_t state)9399 fas_state_name(ushort_t state)
9400 {
9401 if (state == STATE_FREE) {
9402 return ("FREE");
9403 } else if (state & STATE_SELECTING) {
9404 if (state == STATE_SELECT_NORMAL)
9405 return ("SELECT");
9406 else if (state == STATE_SELECT_N_STOP)
9407 return ("SEL&STOP");
9408 else if (state == STATE_SELECT_N_SENDMSG)
9409 return ("SELECT_SNDMSG");
9410 else
9411 return ("SEL_NO_ATN");
9412 } else {
9413 static struct {
9414 char *sname;
9415 char state;
9416 } names[] = {
9417 "CMD_START", ACTS_CMD_START,
9418 "CMD_DONE", ACTS_CMD_DONE,
9419 "MSG_OUT", ACTS_MSG_OUT,
9420 "MSG_OUT_DONE", ACTS_MSG_OUT_DONE,
9421 "MSG_IN", ACTS_MSG_IN,
9422 "MSG_IN_MORE", ACTS_MSG_IN_MORE,
9423 "MSG_IN_DONE", ACTS_MSG_IN_DONE,
9424 "CLEARING", ACTS_CLEARING,
9425 "DATA", ACTS_DATA,
9426 "DATA_DONE", ACTS_DATA_DONE,
9427 "CMD_CMPLT", ACTS_C_CMPLT,
9428 "UNKNOWN", ACTS_UNKNOWN,
9429 "RESEL", ACTS_RESEL,
9430 "ENDVEC", ACTS_ENDVEC,
9431 "RESET", ACTS_RESET,
9432 "ABORTING", ACTS_ABORTING,
9433 "FROZEN", ACTS_FROZEN,
9434 0
9435 };
9436 int i;
9437 for (i = 0; names[i].sname; i++) {
9438 if (names[i].state == state)
9439 return (names[i].sname);
9440 }
9441 }
9442 return ("<BAD>");
9443 }
9444