1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ISSUES 30 * 31 * - more consistent error messages 32 * - report name of device on errors? 33 * - if wide target renegotiates sync, back to narrow? 34 * - last_msgout is not accurate ???? 35 * - resolve XXXX 36 * - improve msg reject code (use special msg reject handler) 37 * - better use of IDE message 38 * - keep track if ATN remains asserted and target not going into 39 * a msg-out phase 40 * - improve comments 41 * - no slave accesses when start address is odd and dma hasn't started 42 * this affect asserting ATN 43 */ 44 45 /* 46 * fas - QLogic fas366 wide/fast SCSI Processor HBA driver with 47 * tagged and non-tagged queueing support 48 */ 49 #if defined(lint) && !defined(DEBUG) 50 #define DEBUG 1 51 #define FASDEBUG 52 #endif 53 54 #define DMA_REG_TRACING /* enable dma register access tracing */ 55 56 57 /* 58 * standard header files 59 */ 60 #include <sys/note.h> 61 #include <sys/scsi/scsi.h> 62 #include <sys/file.h> 63 #include <sys/vtrace.h> 64 65 /* 66 * private header files 67 */ 68 #include <sys/scsi/adapters/fasdma.h> 69 #include <sys/scsi/adapters/fasreg.h> 70 #include <sys/scsi/adapters/fasvar.h> 71 #include <sys/scsi/adapters/fascmd.h> 72 #include <sys/scsi/impl/scsi_reset_notify.h> 73 74 /* 75 * tunables 76 */ 77 static int fas_selection_timeout = 250; /* 250 milliseconds */ 78 static uchar_t fas_default_offset = DEFAULT_OFFSET; 79 80 /* 81 * needed for presto support, do not remove 82 */ 83 static int fas_enable_sbus64 = 1; 84 85 #ifdef FASDEBUG 86 int fasdebug = 0; 87 int fasdebug_instance = -1; /* debug all instances */ 88 static int fas_burstsizes_limit = -1; 89 static int fas_no_sync_wide_backoff = 0; 90 #endif /* FASDEBUG */ 91 92 /* 93 * Local static data protected by global mutex 94 */ 95 static kmutex_t fas_global_mutex; /* to allow concurrent attach */ 96 97 static int fas_scsi_watchdog_tick; /* in seconds, for all */ 98 /* instances */ 99 static clock_t fas_tick; /* fas_watch() interval in Hz */ 100 static timeout_id_t fas_reset_watch; /* timeout id for reset watch */ 101 static timeout_id_t fas_timeout_id = 0; 102 static int fas_timeout_initted = 0; 103 104 static krwlock_t fas_global_rwlock; 105 106 static void *fas_state; /* soft state ptr */ 107 static struct fas *fas_head; /* link all softstate structures */ 108 static struct fas *fas_tail; /* for fas_watch() */ 109 110 static kmutex_t fas_log_mutex; 111 static char fas_log_buf[256]; 112 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch)) 113 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \ 114 fas_scsi_watchdog_tick fas_tick)) 115 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", fas::f_quiesce_timeid)) 116 117 /* 118 * dma attribute structure for scsi engine 119 */ 120 static ddi_dma_attr_t dma_fasattr = { 121 DMA_ATTR_V0, (unsigned long long)0, 122 (unsigned long long)0xffffffff, (unsigned long long)((1<<24)-1), 123 1, DEFAULT_BURSTSIZE, 1, 124 (unsigned long long)0xffffffff, (unsigned long long)0xffffffff, 125 1, 512, 0 126 }; 127 128 /* 129 * optional torture test stuff 130 */ 131 #ifdef FASDEBUG 132 #define FAS_TEST 133 static int fas_ptest_emsgin; 134 static int fas_ptest_msgin; 135 static int fas_ptest_msg = -1; 136 static int fas_ptest_status; 137 static int fas_ptest_data_in; 138 static int fas_atest; 139 static int fas_atest_disc; 140 static int fas_atest_reconn; 141 static void fas_test_abort(struct fas *fas, int slot); 142 static int fas_rtest; 143 static int fas_rtest_type; 144 static void fas_test_reset(struct fas *fas, int slot); 145 static int fas_force_timeout; 146 static int fas_btest; 147 static int fas_test_stop; 148 static int fas_transport_busy; 149 static int fas_transport_busy_rqs; 150 static int fas_transport_reject; 151 static int fas_arqs_failure; 152 static int fas_tran_err; 153 static int fas_test_untagged; 154 static int fas_enable_untagged; 155 #endif 156 157 /* 158 * warlock directives 159 */ 160 _NOTE(DATA_READABLE_WITHOUT_LOCK(dma fasdebug)) 161 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy)) 162 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy_rqs)) 163 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_reject)) 164 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_arqs_failure)) 165 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_tran_err)) 166 _NOTE(MUTEX_PROTECTS_DATA(fas_log_mutex, fas_log_buf)) 167 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch)) 168 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \ 169 fas_scsi_watchdog_tick fas_tick)) 170 171 /* 172 * function prototypes 173 * 174 * scsa functions are exported by means of the transport table: 175 */ 176 static int fas_scsi_tgt_probe(struct scsi_device *sd, 177 int (*waitfunc)(void)); 178 static int fas_scsi_tgt_init(dev_info_t *, dev_info_t *, 179 scsi_hba_tran_t *, struct scsi_device *); 180 static int fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 181 static int fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 182 static int fas_scsi_reset(struct scsi_address *ap, int level); 183 static int fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 184 static int fas_scsi_setcap(struct scsi_address *ap, char *cap, int value, 185 int whom); 186 static struct scsi_pkt *fas_scsi_init_pkt(struct scsi_address *ap, 187 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 188 int tgtlen, int flags, int (*callback)(), caddr_t arg); 189 static void fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 190 static void fas_scsi_dmafree(struct scsi_address *ap, 191 struct scsi_pkt *pkt); 192 static void fas_scsi_sync_pkt(struct scsi_address *ap, 193 struct scsi_pkt *pkt); 194 195 /* 196 * internal functions: 197 */ 198 static int fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp); 199 static int fas_alloc_tag(struct fas *fas, struct fas_cmd *sp); 200 static int fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag); 201 static void fas_empty_waitQ(struct fas *fas); 202 static void fas_move_waitQ_to_readyQ(struct fas *fas); 203 static void fas_check_waitQ_and_mutex_exit(struct fas *fas); 204 static int fas_istart(struct fas *fas); 205 static int fas_ustart(struct fas *fas); 206 static int fas_startcmd(struct fas *fas, struct fas_cmd *sp); 207 208 static int fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp, 209 int cmdlen, int tgtlen, int statuslen, int kf); 210 static void fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp); 211 static int fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags); 212 static void fas_kmem_cache_destructor(void *buf, void *cdrarg); 213 214 static int fas_finish(struct fas *fas); 215 static void fas_handle_qfull(struct fas *fas, struct fas_cmd *sp); 216 static void fas_restart_cmd(void *); 217 static int fas_dopoll(struct fas *fas, int timeout); 218 static void fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp); 219 static uint_t fas_intr(caddr_t arg); 220 static int fas_intr_svc(struct fas *fas); 221 static int fas_phasemanage(struct fas *fas); 222 static int fas_handle_unknown(struct fas *fas); 223 static int fas_handle_cmd_start(struct fas *fas); 224 static int fas_handle_cmd_done(struct fas *fas); 225 static int fas_handle_msg_out_start(struct fas *fas); 226 static int fas_handle_msg_out_done(struct fas *fas); 227 static int fas_handle_clearing(struct fas *fas); 228 static int fas_handle_data_start(struct fas *fas); 229 static int fas_handle_data_done(struct fas *fas); 230 static int fas_handle_c_cmplt(struct fas *fas); 231 static int fas_handle_msg_in_start(struct fas *fas); 232 static int fas_handle_more_msgin(struct fas *fas); 233 static int fas_handle_msg_in_done(struct fas *fas); 234 static int fas_onebyte_msg(struct fas *fas); 235 static int fas_twobyte_msg(struct fas *fas); 236 static int fas_multibyte_msg(struct fas *fas); 237 static void fas_revert_to_async(struct fas *fas, int tgt); 238 static int fas_finish_select(struct fas *fas); 239 static int fas_reselect_preempt(struct fas *fas); 240 static int fas_reconnect(struct fas *fas); 241 static int fas_handle_selection(struct fas *fas); 242 static void fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp); 243 static int fas_handle_gross_err(struct fas *fas); 244 static int fas_illegal_cmd_or_bus_reset(struct fas *fas); 245 static int fas_check_dma_error(struct fas *fas); 246 247 static void fas_make_sdtr(struct fas *fas, int msgout_offset, int target); 248 static void fas_make_wdtr(struct fas *fas, int msgout_offset, int target, 249 int width); 250 static void fas_update_props(struct fas *fas, int tgt); 251 static void fas_update_this_prop(struct fas *fas, char *property, int value); 252 253 static int fas_commoncap(struct scsi_address *ap, char *cap, int val, 254 int tgtonly, int doset); 255 256 static void fas_watch(void *arg); 257 static void fas_watchsubr(struct fas *fas); 258 static void fas_cmd_timeout(struct fas *fas, int slot); 259 static void fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp, 260 int slot); 261 static void fas_reset_sync_wide(struct fas *fas); 262 static void fas_set_wide_conf3(struct fas *fas, int target, int width); 263 static void fas_force_renegotiation(struct fas *fas, int target); 264 265 static int fas_set_new_window(struct fas *fas, struct fas_cmd *sp); 266 static int fas_restore_pointers(struct fas *fas, struct fas_cmd *sp); 267 static int fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end); 268 269 /*PRINTFLIKE3*/ 270 static void fas_log(struct fas *fas, int level, const char *fmt, ...); 271 /*PRINTFLIKE2*/ 272 static void fas_printf(struct fas *fas, const char *fmt, ...); 273 static void fas_printstate(struct fas *fas, char *msg); 274 static void fas_dump_cmd(struct fas *fas, struct fas_cmd *sp); 275 static void fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp); 276 static char *fas_state_name(ushort_t state); 277 278 static void fas_makeproxy_cmd(struct fas_cmd *sp, 279 struct scsi_address *ap, struct scsi_pkt *pkt, int nmsg, ...); 280 static int fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp, 281 struct scsi_address *ap, char *what); 282 283 static void fas_internal_reset(struct fas *fas, int reset_action); 284 static int fas_alloc_active_slots(struct fas *fas, int slot, int flag); 285 286 static int fas_abort_curcmd(struct fas *fas); 287 static int fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot); 288 static int fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 289 static int fas_do_scsi_reset(struct scsi_address *ap, int level); 290 static int fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp, 291 int slot); 292 static void fas_flush_readyQ(struct fas *fas, int slot); 293 static void fas_flush_tagQ(struct fas *fas, int slot); 294 static void fas_flush_cmd(struct fas *fas, struct fas_cmd *sp, 295 uchar_t reason, uint_t stat); 296 static int fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp, 297 uchar_t msg); 298 static int fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap, 299 struct fas_cmd *sp, uchar_t msg, int slot); 300 static void fas_mark_packets(struct fas *fas, int slot, uchar_t reason, 301 uint_t stat); 302 static void fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp, 303 uchar_t reason, uint_t stat); 304 305 static int fas_reset_bus(struct fas *fas); 306 static int fas_reset_recovery(struct fas *fas); 307 static int fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap); 308 static int fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap); 309 static void fas_start_watch_reset_delay(struct fas *); 310 static void fas_setup_reset_delay(struct fas *fas); 311 static void fas_watch_reset_delay(void *arg); 312 static int fas_watch_reset_delay_subr(struct fas *fas); 313 static void fas_reset_cleanup(struct fas *fas, int slot); 314 static int fas_scsi_reset_notify(struct scsi_address *ap, int flag, 315 void (*callback)(caddr_t), caddr_t arg); 316 static int fas_scsi_quiesce(dev_info_t *hba_dip); 317 static int fas_scsi_unquiesce(dev_info_t *hba_dip); 318 319 static void fas_set_throttles(struct fas *fas, int slot, 320 int n, int what); 321 static void fas_set_all_lun_throttles(struct fas *fas, int slot, int what); 322 static void fas_full_throttle(struct fas *fas, int slot); 323 static void fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int timeout); 324 static void fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp); 325 326 static int fas_quiesce_bus(struct fas *fas); 327 static int fas_unquiesce_bus(struct fas *fas); 328 static void fas_ncmds_checkdrain(void *arg); 329 static int fas_check_outstanding(struct fas *fas); 330 331 static int fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap); 332 static int fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap); 333 static int fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp); 334 void fas_complete_arq_pkt(struct scsi_pkt *pkt); 335 336 void fas_call_pkt_comp(struct fas *fas, struct fas_cmd *sp); 337 void fas_empty_callbackQ(struct fas *fas); 338 int fas_init_callbacks(struct fas *fas); 339 void fas_destroy_callbacks(struct fas *fas); 340 341 static int fas_check_dma_error(struct fas *fas); 342 static int fas_init_chip(struct fas *fas, uchar_t id); 343 344 static void fas_read_fifo(struct fas *fas); 345 static void fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad); 346 347 #ifdef FASDEBUG 348 static void fas_reg_cmd_write(struct fas *fas, uint8_t cmd); 349 static void fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what); 350 static uint8_t fas_reg_read(struct fas *fas, volatile uint8_t *p); 351 352 static void fas_dma_reg_write(struct fas *fas, volatile uint32_t *p, 353 uint32_t what); 354 static uint32_t fas_dma_reg_read(struct fas *fas, volatile uint32_t *p); 355 #else 356 #define fas_reg_cmd_write(fas, cmd) \ 357 fas->f_reg->fas_cmd = (cmd), fas->f_last_cmd = (cmd) 358 #define fas_reg_write(fas, p, what) *(p) = (what) 359 #define fas_reg_read(fas, p) *(p) 360 #define fas_dma_reg_write(fas, p, what) *(p) = (what) 361 #define fas_dma_reg_read(fas, p) *(p) 362 #endif 363 364 /* 365 * autoconfiguration data and routines. 366 */ 367 static int fas_attach(dev_info_t *dev, ddi_attach_cmd_t cmd); 368 static int fas_detach(dev_info_t *dev, ddi_detach_cmd_t cmd); 369 static int fas_dr_detach(dev_info_t *dev); 370 371 static struct dev_ops fas_ops = { 372 DEVO_REV, /* devo_rev, */ 373 0, /* refcnt */ 374 ddi_no_info, /* info */ 375 nulldev, /* identify */ 376 nulldev, /* probe */ 377 fas_attach, /* attach */ 378 fas_detach, /* detach */ 379 nodev, /* reset */ 380 NULL, /* driver operations */ 381 NULL, /* bus operations */ 382 NULL /* power */ 383 }; 384 385 char _depends_on[] = "misc/scsi"; 386 387 static struct modldrv modldrv = { 388 &mod_driverops, /* Type of module. This one is a driver */ 389 "FAS SCSI HBA Driver v%I%", /* Name of the module. */ 390 &fas_ops, /* driver ops */ 391 }; 392 393 static struct modlinkage modlinkage = { 394 MODREV_1, (void *)&modldrv, NULL 395 }; 396 397 int 398 _init(void) 399 { 400 int rval; 401 /* CONSTCOND */ 402 ASSERT(NO_COMPETING_THREADS); 403 404 rval = ddi_soft_state_init(&fas_state, sizeof (struct fas), 405 FAS_INITIAL_SOFT_SPACE); 406 if (rval != 0) { 407 return (rval); 408 } 409 410 if ((rval = scsi_hba_init(&modlinkage)) != 0) { 411 ddi_soft_state_fini(&fas_state); 412 return (rval); 413 } 414 415 mutex_init(&fas_global_mutex, NULL, MUTEX_DRIVER, NULL); 416 rw_init(&fas_global_rwlock, NULL, RW_DRIVER, NULL); 417 418 mutex_init(&fas_log_mutex, NULL, MUTEX_DRIVER, NULL); 419 420 if ((rval = mod_install(&modlinkage)) != 0) { 421 mutex_destroy(&fas_log_mutex); 422 rw_destroy(&fas_global_rwlock); 423 mutex_destroy(&fas_global_mutex); 424 ddi_soft_state_fini(&fas_state); 425 scsi_hba_fini(&modlinkage); 426 return (rval); 427 } 428 429 return (rval); 430 } 431 432 int 433 _fini(void) 434 { 435 int rval; 436 /* CONSTCOND */ 437 ASSERT(NO_COMPETING_THREADS); 438 439 if ((rval = mod_remove(&modlinkage)) == 0) { 440 ddi_soft_state_fini(&fas_state); 441 scsi_hba_fini(&modlinkage); 442 mutex_destroy(&fas_log_mutex); 443 rw_destroy(&fas_global_rwlock); 444 mutex_destroy(&fas_global_mutex); 445 } 446 return (rval); 447 } 448 449 int 450 _info(struct modinfo *modinfop) 451 { 452 /* CONSTCOND */ 453 ASSERT(NO_COMPETING_THREADS); 454 455 return (mod_info(&modlinkage, modinfop)); 456 } 457 458 static int 459 fas_scsi_tgt_probe(struct scsi_device *sd, 460 int (*waitfunc)(void)) 461 { 462 dev_info_t *dip = ddi_get_parent(sd->sd_dev); 463 int rval = SCSIPROBE_FAILURE; 464 scsi_hba_tran_t *tran; 465 struct fas *fas; 466 int tgt = sd->sd_address.a_target; 467 468 tran = ddi_get_driver_private(dip); 469 ASSERT(tran != NULL); 470 fas = TRAN2FAS(tran); 471 472 /* 473 * force renegotiation since inquiry cmds do not cause 474 * check conditions 475 */ 476 mutex_enter(FAS_MUTEX(fas)); 477 fas_force_renegotiation(fas, tgt); 478 mutex_exit(FAS_MUTEX(fas)); 479 rval = scsi_hba_probe(sd, waitfunc); 480 481 /* 482 * the scsi-options precedence is: 483 * target-scsi-options highest 484 * device-type-scsi-options 485 * per bus scsi-options 486 * global scsi-options lowest 487 */ 488 mutex_enter(FAS_MUTEX(fas)); 489 if ((rval == SCSIPROBE_EXISTS) && 490 ((fas->f_target_scsi_options_defined & (1 << tgt)) == 0)) { 491 int options; 492 493 options = scsi_get_device_type_scsi_options(dip, sd, -1); 494 if (options != -1) { 495 fas->f_target_scsi_options[tgt] = options; 496 fas_log(fas, CE_NOTE, 497 "?target%x-scsi-options = 0x%x\n", tgt, 498 fas->f_target_scsi_options[tgt]); 499 fas_force_renegotiation(fas, tgt); 500 } 501 } 502 mutex_exit(FAS_MUTEX(fas)); 503 504 IPRINTF2("target%x-scsi-options= 0x%x\n", 505 tgt, fas->f_target_scsi_options[tgt]); 506 507 return (rval); 508 } 509 510 511 /*ARGSUSED*/ 512 static int 513 fas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 514 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 515 { 516 return (((sd->sd_address.a_target < NTARGETS_WIDE) && 517 (sd->sd_address.a_lun < NLUNS_PER_TARGET)) ? 518 DDI_SUCCESS : DDI_FAILURE); 519 } 520 521 /*ARGSUSED*/ 522 static int 523 fas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 524 { 525 struct fas *fas = NULL; 526 volatile struct dma *dmar = NULL; 527 volatile struct fasreg *fasreg; 528 ddi_dma_attr_t *fas_dma_attr; 529 ddi_device_acc_attr_t dev_attr; 530 531 int instance, id, slot, i, hm_rev; 532 size_t rlen; 533 uint_t count; 534 char buf[64]; 535 scsi_hba_tran_t *tran = NULL; 536 char intr_added = 0; 537 char mutex_init_done = 0; 538 char hba_attached = 0; 539 char bound_handle = 0; 540 char *prop_template = "target%d-scsi-options"; 541 char prop_str[32]; 542 543 /* CONSTCOND */ 544 ASSERT(NO_COMPETING_THREADS); 545 546 switch (cmd) { 547 case DDI_ATTACH: 548 break; 549 550 case DDI_RESUME: 551 if ((tran = ddi_get_driver_private(dip)) == NULL) 552 return (DDI_FAILURE); 553 554 fas = TRAN2FAS(tran); 555 if (!fas) { 556 return (DDI_FAILURE); 557 } 558 /* 559 * Reset hardware and softc to "no outstanding commands" 560 * Note that a check condition can result on first command 561 * to a target. 562 */ 563 mutex_enter(FAS_MUTEX(fas)); 564 fas_internal_reset(fas, 565 FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA); 566 567 (void) fas_reset_bus(fas); 568 569 fas->f_suspended = 0; 570 571 /* make sure that things get started */ 572 (void) fas_istart(fas); 573 fas_check_waitQ_and_mutex_exit(fas); 574 575 mutex_enter(&fas_global_mutex); 576 if (fas_timeout_id == 0) { 577 fas_timeout_id = timeout(fas_watch, NULL, fas_tick); 578 fas_timeout_initted = 1; 579 } 580 mutex_exit(&fas_global_mutex); 581 582 return (DDI_SUCCESS); 583 584 default: 585 return (DDI_FAILURE); 586 } 587 588 instance = ddi_get_instance(dip); 589 590 /* 591 * Since we know that some instantiations of this device can 592 * be plugged into slave-only SBus slots, check to see whether 593 * this is one such. 594 */ 595 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 596 cmn_err(CE_WARN, 597 "fas%d: device in slave-only slot", instance); 598 return (DDI_FAILURE); 599 } 600 601 if (ddi_intr_hilevel(dip, 0)) { 602 /* 603 * Interrupt number '0' is a high-level interrupt. 604 * At this point you either add a special interrupt 605 * handler that triggers a soft interrupt at a lower level, 606 * or - more simply and appropriately here - you just 607 * fail the attach. 608 */ 609 cmn_err(CE_WARN, 610 "fas%d: Device is using a hilevel intr", instance); 611 return (DDI_FAILURE); 612 } 613 614 /* 615 * Allocate softc information. 616 */ 617 if (ddi_soft_state_zalloc(fas_state, instance) != DDI_SUCCESS) { 618 cmn_err(CE_WARN, 619 "fas%d: cannot allocate soft state", instance); 620 goto fail; 621 } 622 623 fas = (struct fas *)ddi_get_soft_state(fas_state, instance); 624 625 if (fas == NULL) { 626 goto fail; 627 } 628 629 /* 630 * map in device registers 631 */ 632 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 633 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 634 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 635 636 if (ddi_regs_map_setup(dip, (uint_t)0, (caddr_t *)&dmar, 637 (off_t)0, (off_t)sizeof (struct dma), 638 &dev_attr, &fas->f_dmar_acc_handle) != DDI_SUCCESS) { 639 cmn_err(CE_WARN, "fas%d: cannot map dma", instance); 640 goto fail; 641 } 642 643 if (ddi_regs_map_setup(dip, (uint_t)1, (caddr_t *)&fasreg, 644 (off_t)0, (off_t)sizeof (struct fasreg), 645 &dev_attr, &fas->f_regs_acc_handle) != DDI_SUCCESS) { 646 cmn_err(CE_WARN, 647 "fas%d: unable to map fas366 registers", instance); 648 goto fail; 649 } 650 651 fas_dma_attr = &dma_fasattr; 652 if (ddi_dma_alloc_handle(dip, fas_dma_attr, 653 DDI_DMA_SLEEP, NULL, &fas->f_dmahandle) != DDI_SUCCESS) { 654 cmn_err(CE_WARN, 655 "fas%d: cannot alloc dma handle", instance); 656 goto fail; 657 } 658 659 /* 660 * allocate cmdarea and its dma handle 661 */ 662 if (ddi_dma_mem_alloc(fas->f_dmahandle, 663 (uint_t)2*FIFOSIZE, 664 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 665 NULL, (caddr_t *)&fas->f_cmdarea, &rlen, 666 &fas->f_cmdarea_acc_handle) != DDI_SUCCESS) { 667 cmn_err(CE_WARN, 668 "fas%d: cannot alloc cmd area", instance); 669 goto fail; 670 } 671 672 fas->f_reg = fasreg; 673 fas->f_dma = dmar; 674 fas->f_instance = instance; 675 676 if (ddi_dma_addr_bind_handle(fas->f_dmahandle, 677 NULL, (caddr_t)fas->f_cmdarea, 678 rlen, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 679 &fas->f_dmacookie, &count) != DDI_DMA_MAPPED) { 680 cmn_err(CE_WARN, 681 "fas%d: cannot bind cmdarea", instance); 682 goto fail; 683 } 684 bound_handle++; 685 686 ASSERT(count == 1); 687 688 /* 689 * Allocate a transport structure 690 */ 691 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 692 693 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */ 694 scsi_size_clean(dip); /* SCSI_SIZE_CLEAN_VERIFY ok */ 695 696 /* 697 * initialize transport structure 698 */ 699 fas->f_tran = tran; 700 fas->f_dev = dip; 701 tran->tran_hba_private = fas; 702 tran->tran_tgt_private = NULL; 703 tran->tran_tgt_init = fas_scsi_tgt_init; 704 tran->tran_tgt_probe = fas_scsi_tgt_probe; 705 tran->tran_tgt_free = NULL; 706 tran->tran_start = fas_scsi_start; 707 tran->tran_abort = fas_scsi_abort; 708 tran->tran_reset = fas_scsi_reset; 709 tran->tran_getcap = fas_scsi_getcap; 710 tran->tran_setcap = fas_scsi_setcap; 711 tran->tran_init_pkt = fas_scsi_init_pkt; 712 tran->tran_destroy_pkt = fas_scsi_destroy_pkt; 713 tran->tran_dmafree = fas_scsi_dmafree; 714 tran->tran_sync_pkt = fas_scsi_sync_pkt; 715 tran->tran_reset_notify = fas_scsi_reset_notify; 716 tran->tran_get_bus_addr = NULL; 717 tran->tran_get_name = NULL; 718 tran->tran_quiesce = fas_scsi_quiesce; 719 tran->tran_unquiesce = fas_scsi_unquiesce; 720 tran->tran_bus_reset = NULL; 721 tran->tran_add_eventcall = NULL; 722 tran->tran_get_eventcookie = NULL; 723 tran->tran_post_event = NULL; 724 tran->tran_remove_eventcall = NULL; 725 726 fas->f_force_async = 0; 727 728 /* 729 * disable tagged queuing and wide for all targets 730 * (will be enabled by target driver if required) 731 * sync is enabled by default 732 */ 733 fas->f_nowide = fas->f_notag = ALL_TARGETS; 734 fas->f_force_narrow = ALL_TARGETS; 735 736 /* 737 * By default we assume embedded devices and save time 738 * checking for timeouts in fas_watch() by skipping 739 * the rest of luns 740 * If we're talking to any non-embedded devices, 741 * we can't cheat and skip over non-zero luns anymore 742 * in fas_watch() and fas_ustart(). 743 */ 744 fas->f_dslot = NLUNS_PER_TARGET; 745 746 /* 747 * f_active is used for saving disconnected cmds; 748 * For tagged targets, we need to increase the size later 749 * Only allocate for Lun == 0, if we probe a lun > 0 then 750 * we allocate an active structure 751 * If TQ gets enabled then we need to increase the size 752 * to hold 256 cmds 753 */ 754 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) { 755 (void) fas_alloc_active_slots(fas, slot, KM_SLEEP); 756 } 757 758 /* 759 * initialize the qfull retry counts 760 */ 761 for (i = 0; i < NTARGETS_WIDE; i++) { 762 fas->f_qfull_retries[i] = QFULL_RETRIES; 763 fas->f_qfull_retry_interval[i] = 764 drv_usectohz(QFULL_RETRY_INTERVAL * 1000); 765 766 } 767 768 /* 769 * Initialize throttles. 770 */ 771 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE); 772 773 /* 774 * Initialize mask of deferred property updates 775 */ 776 fas->f_props_update = 0; 777 778 /* 779 * set host ID 780 */ 781 fas->f_fasconf = DEFAULT_HOSTID; 782 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "initiator-id", -1); 783 if (id == -1) { 784 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 785 "scsi-initiator-id", -1); 786 } 787 if (id != DEFAULT_HOSTID && id >= 0 && id < NTARGETS_WIDE) { 788 fas_log(fas, CE_NOTE, "?initiator SCSI ID now %d\n", id); 789 fas->f_fasconf = (uchar_t)id; 790 } 791 792 /* 793 * find the burstsize and reduce ours if necessary 794 */ 795 fas->f_dma_attr = fas_dma_attr; 796 fas->f_dma_attr->dma_attr_burstsizes &= 797 ddi_dma_burstsizes(fas->f_dmahandle); 798 799 #ifdef FASDEBUG 800 fas->f_dma_attr->dma_attr_burstsizes &= fas_burstsizes_limit; 801 IPRINTF1("dma burstsize=%x\n", fas->f_dma_attr->dma_attr_burstsizes); 802 #endif 803 /* 804 * Attach this instance of the hba 805 */ 806 if (scsi_hba_attach_setup(dip, fas->f_dma_attr, tran, 0) != 807 DDI_SUCCESS) { 808 fas_log(fas, CE_WARN, "scsi_hba_attach_setup failed"); 809 goto fail; 810 } 811 hba_attached++; 812 813 /* 814 * if scsi-options property exists, use it 815 */ 816 fas->f_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY, 817 dip, 0, "scsi-options", DEFAULT_SCSI_OPTIONS); 818 819 /* 820 * if scsi-selection-timeout property exists, use it 821 */ 822 fas_selection_timeout = ddi_prop_get_int(DDI_DEV_T_ANY, 823 dip, 0, "scsi-selection-timeout", SCSI_DEFAULT_SELECTION_TIMEOUT); 824 825 /* 826 * if hm-rev property doesn't exist, use old scheme for rev 827 */ 828 hm_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 829 "hm-rev", -1); 830 831 if (hm_rev == 0xa0 || hm_rev == -1) { 832 if (DMAREV(dmar) != 0) { 833 fas->f_hm_rev = 0x20; 834 fas_log(fas, CE_WARN, 835 "obsolete rev 2.0 FEPS chip, " 836 "possible data corruption"); 837 } else { 838 fas->f_hm_rev = 0x10; 839 fas_log(fas, CE_WARN, 840 "obsolete and unsupported rev 1.0 FEPS chip"); 841 goto fail; 842 } 843 } else if (hm_rev == 0x20) { 844 fas->f_hm_rev = 0x21; 845 fas_log(fas, CE_WARN, "obsolete rev 2.1 FEPS chip"); 846 } else { 847 fas->f_hm_rev = (uchar_t)hm_rev; 848 fas_log(fas, CE_NOTE, "?rev %x.%x FEPS chip\n", 849 (hm_rev >> 4) & 0xf, hm_rev & 0xf); 850 } 851 852 if ((fas->f_scsi_options & SCSI_OPTIONS_SYNC) == 0) { 853 fas->f_nosync = ALL_TARGETS; 854 } 855 856 if ((fas->f_scsi_options & SCSI_OPTIONS_WIDE) == 0) { 857 fas->f_nowide = ALL_TARGETS; 858 } 859 860 /* 861 * if target<n>-scsi-options property exists, use it; 862 * otherwise use the f_scsi_options 863 */ 864 for (i = 0; i < NTARGETS_WIDE; i++) { 865 (void) sprintf(prop_str, prop_template, i); 866 fas->f_target_scsi_options[i] = ddi_prop_get_int( 867 DDI_DEV_T_ANY, dip, 0, prop_str, -1); 868 869 if (fas->f_target_scsi_options[i] != -1) { 870 fas_log(fas, CE_NOTE, "?target%x-scsi-options=0x%x\n", 871 i, fas->f_target_scsi_options[i]); 872 fas->f_target_scsi_options_defined |= 1 << i; 873 } else { 874 fas->f_target_scsi_options[i] = fas->f_scsi_options; 875 } 876 if (((fas->f_target_scsi_options[i] & 877 SCSI_OPTIONS_DR) == 0) && 878 (fas->f_target_scsi_options[i] & SCSI_OPTIONS_TAG)) { 879 fas->f_target_scsi_options[i] &= ~SCSI_OPTIONS_TAG; 880 fas_log(fas, CE_WARN, 881 "Disabled TQ since disconnects are disabled"); 882 } 883 } 884 885 fas->f_scsi_tag_age_limit = 886 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-tag-age-limit", 887 DEFAULT_TAG_AGE_LIMIT); 888 889 fas->f_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY, 890 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY); 891 if (fas->f_scsi_reset_delay == 0) { 892 fas_log(fas, CE_NOTE, 893 "scsi_reset_delay of 0 is not recommended," 894 " resetting to SCSI_DEFAULT_RESET_DELAY\n"); 895 fas->f_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY; 896 } 897 898 /* 899 * get iblock cookie and initialize mutexes 900 */ 901 if (ddi_get_iblock_cookie(dip, (uint_t)0, &fas->f_iblock) 902 != DDI_SUCCESS) { 903 cmn_err(CE_WARN, "fas_attach: cannot get iblock cookie"); 904 goto fail; 905 } 906 907 mutex_init(&fas->f_mutex, NULL, MUTEX_DRIVER, fas->f_iblock); 908 cv_init(&fas->f_cv, NULL, CV_DRIVER, NULL); 909 910 /* 911 * initialize mutex for waitQ 912 */ 913 mutex_init(&fas->f_waitQ_mutex, NULL, MUTEX_DRIVER, fas->f_iblock); 914 mutex_init_done++; 915 916 /* 917 * initialize callback mechanism (immediate callback) 918 */ 919 mutex_enter(&fas_global_mutex); 920 if (fas_init_callbacks(fas)) { 921 mutex_exit(&fas_global_mutex); 922 goto fail; 923 } 924 mutex_exit(&fas_global_mutex); 925 926 /* 927 * kstat_intr support 928 */ 929 (void) sprintf(buf, "fas%d", instance); 930 fas->f_intr_kstat = kstat_create("fas", instance, buf, "controller", \ 931 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT); 932 if (fas->f_intr_kstat) 933 kstat_install(fas->f_intr_kstat); 934 935 /* 936 * install interrupt handler 937 */ 938 mutex_enter(FAS_MUTEX(fas)); 939 if (ddi_add_intr(dip, (uint_t)0, &fas->f_iblock, NULL, 940 fas_intr, (caddr_t)fas)) { 941 cmn_err(CE_WARN, "fas: cannot add intr"); 942 mutex_exit(FAS_MUTEX(fas)); 943 goto fail; 944 } 945 intr_added++; 946 947 /* 948 * initialize fas chip 949 */ 950 if (fas_init_chip(fas, id)) { 951 cmn_err(CE_WARN, "fas: cannot initialize"); 952 mutex_exit(FAS_MUTEX(fas)); 953 goto fail; 954 } 955 mutex_exit(FAS_MUTEX(fas)); 956 957 /* 958 * create kmem cache for packets 959 */ 960 (void) sprintf(buf, "fas%d_cache", instance); 961 fas->f_kmem_cache = kmem_cache_create(buf, 962 EXTCMD_SIZE, 8, 963 fas_kmem_cache_constructor, fas_kmem_cache_destructor, 964 NULL, (void *)fas, NULL, 0); 965 if (fas->f_kmem_cache == NULL) { 966 cmn_err(CE_WARN, "fas: cannot create kmem_cache"); 967 goto fail; 968 } 969 970 /* 971 * at this point, we are not going to fail the attach 972 * so there is no need to undo the rest: 973 * 974 * add this fas to the list, this makes debugging easier 975 * and fas_watch() needs it to walk thru all fas's 976 */ 977 rw_enter(&fas_global_rwlock, RW_WRITER); 978 if (fas_head == NULL) { 979 fas_head = fas; 980 } else { 981 fas_tail->f_next = fas; 982 } 983 fas_tail = fas; /* point to last fas in list */ 984 rw_exit(&fas_global_rwlock); 985 986 /* 987 * there is one watchdog handler for all driver instances. 988 * start the watchdog if it hasn't been done yet 989 */ 990 mutex_enter(&fas_global_mutex); 991 if (fas_scsi_watchdog_tick == 0) { 992 fas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY, 993 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK); 994 if (fas_scsi_watchdog_tick != DEFAULT_WD_TICK) { 995 fas_log(fas, CE_NOTE, "?scsi-watchdog-tick=%d\n", 996 fas_scsi_watchdog_tick); 997 } 998 fas_tick = drv_usectohz((clock_t) 999 fas_scsi_watchdog_tick * 1000000); 1000 IPRINTF2("fas scsi watchdog tick=%x, fas_tick=%lx\n", 1001 fas_scsi_watchdog_tick, fas_tick); 1002 if (fas_timeout_id == 0) { 1003 fas_timeout_id = timeout(fas_watch, NULL, fas_tick); 1004 fas_timeout_initted = 1; 1005 } 1006 } 1007 mutex_exit(&fas_global_mutex); 1008 1009 ddi_report_dev(dip); 1010 1011 return (DDI_SUCCESS); 1012 1013 fail: 1014 cmn_err(CE_WARN, "fas%d: cannot attach", instance); 1015 if (fas) { 1016 for (slot = 0; slot < N_SLOTS; slot++) { 1017 struct f_slots *active = fas->f_active[slot]; 1018 if (active) { 1019 kmem_free(active, active->f_size); 1020 fas->f_active[slot] = NULL; 1021 } 1022 } 1023 if (mutex_init_done) { 1024 mutex_destroy(&fas->f_mutex); 1025 mutex_destroy(&fas->f_waitQ_mutex); 1026 cv_destroy(&fas->f_cv); 1027 } 1028 if (intr_added) { 1029 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock); 1030 } 1031 /* 1032 * kstat_intr support 1033 */ 1034 if (fas->f_intr_kstat) { 1035 kstat_delete(fas->f_intr_kstat); 1036 } 1037 if (hba_attached) { 1038 (void) scsi_hba_detach(dip); 1039 } 1040 if (tran) { 1041 scsi_hba_tran_free(tran); 1042 } 1043 if (fas->f_kmem_cache) { 1044 kmem_cache_destroy(fas->f_kmem_cache); 1045 } 1046 if (fas->f_cmdarea) { 1047 if (bound_handle) { 1048 (void) ddi_dma_unbind_handle(fas->f_dmahandle); 1049 } 1050 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle); 1051 } 1052 if (fas->f_dmahandle) { 1053 ddi_dma_free_handle(&fas->f_dmahandle); 1054 } 1055 fas_destroy_callbacks(fas); 1056 if (fas->f_regs_acc_handle) { 1057 ddi_regs_map_free(&fas->f_regs_acc_handle); 1058 } 1059 if (fas->f_dmar_acc_handle) { 1060 ddi_regs_map_free(&fas->f_dmar_acc_handle); 1061 } 1062 ddi_soft_state_free(fas_state, instance); 1063 1064 ddi_remove_minor_node(dip, NULL); 1065 } 1066 return (DDI_FAILURE); 1067 } 1068 1069 /*ARGSUSED*/ 1070 static int 1071 fas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1072 { 1073 struct fas *fas, *nfas; 1074 scsi_hba_tran_t *tran; 1075 1076 /* CONSTCOND */ 1077 ASSERT(NO_COMPETING_THREADS); 1078 1079 switch (cmd) { 1080 case DDI_DETACH: 1081 return (fas_dr_detach(dip)); 1082 1083 case DDI_SUSPEND: 1084 if ((tran = ddi_get_driver_private(dip)) == NULL) 1085 return (DDI_FAILURE); 1086 1087 fas = TRAN2FAS(tran); 1088 if (!fas) { 1089 return (DDI_FAILURE); 1090 } 1091 1092 mutex_enter(FAS_MUTEX(fas)); 1093 1094 fas->f_suspended = 1; 1095 1096 if (fas->f_ncmds) { 1097 (void) fas_reset_bus(fas); 1098 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT); 1099 } 1100 /* 1101 * disable dma and fas interrupt 1102 */ 1103 fas->f_dma_csr &= ~DMA_INTEN; 1104 fas->f_dma_csr &= ~DMA_ENDVMA; 1105 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr); 1106 1107 mutex_exit(FAS_MUTEX(fas)); 1108 1109 if (fas->f_quiesce_timeid) { 1110 (void) untimeout(fas->f_quiesce_timeid); 1111 fas->f_quiesce_timeid = 0; 1112 } 1113 1114 if (fas->f_restart_cmd_timeid) { 1115 (void) untimeout(fas->f_restart_cmd_timeid); 1116 fas->f_restart_cmd_timeid = 0; 1117 } 1118 1119 /* Last fas? */ 1120 rw_enter(&fas_global_rwlock, RW_WRITER); 1121 for (nfas = fas_head; nfas; nfas = nfas->f_next) { 1122 if (!nfas->f_suspended) { 1123 rw_exit(&fas_global_rwlock); 1124 return (DDI_SUCCESS); 1125 } 1126 } 1127 rw_exit(&fas_global_rwlock); 1128 1129 mutex_enter(&fas_global_mutex); 1130 if (fas_timeout_id != 0) { 1131 timeout_id_t tid = fas_timeout_id; 1132 fas_timeout_id = 0; 1133 fas_timeout_initted = 0; 1134 mutex_exit(&fas_global_mutex); 1135 (void) untimeout(tid); 1136 } else { 1137 mutex_exit(&fas_global_mutex); 1138 } 1139 1140 mutex_enter(&fas_global_mutex); 1141 if (fas_reset_watch) { 1142 timeout_id_t tid = fas_reset_watch; 1143 fas_reset_watch = 0; 1144 mutex_exit(&fas_global_mutex); 1145 (void) untimeout(tid); 1146 } else { 1147 mutex_exit(&fas_global_mutex); 1148 } 1149 1150 return (DDI_SUCCESS); 1151 1152 default: 1153 return (DDI_FAILURE); 1154 } 1155 _NOTE(NOT_REACHED) 1156 /* NOTREACHED */ 1157 } 1158 1159 static int 1160 fas_dr_detach(dev_info_t *dip) 1161 { 1162 struct fas *fas, *f; 1163 scsi_hba_tran_t *tran; 1164 short slot; 1165 int i, j; 1166 1167 if ((tran = ddi_get_driver_private(dip)) == NULL) 1168 return (DDI_FAILURE); 1169 1170 fas = TRAN2FAS(tran); 1171 if (!fas) { 1172 return (DDI_FAILURE); 1173 } 1174 1175 /* 1176 * disable interrupts 1177 */ 1178 fas->f_dma_csr &= ~DMA_INTEN; 1179 fas->f_dma->dma_csr = fas->f_dma_csr; 1180 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock); 1181 1182 /* 1183 * Remove device instance from the global linked list 1184 */ 1185 rw_enter(&fas_global_rwlock, RW_WRITER); 1186 1187 if (fas_head == fas) { 1188 f = fas_head = fas->f_next; 1189 } else { 1190 for (f = fas_head; f != (struct fas *)NULL; f = f->f_next) { 1191 if (f->f_next == fas) { 1192 f->f_next = fas->f_next; 1193 break; 1194 } 1195 } 1196 1197 /* 1198 * Instance not in softc list. Since the 1199 * instance is not there in softc list, don't 1200 * enable interrupts, the instance is effectively 1201 * unusable. 1202 */ 1203 if (f == (struct fas *)NULL) { 1204 cmn_err(CE_WARN, "fas_dr_detach: fas instance not" 1205 " in softc list!"); 1206 rw_exit(&fas_global_rwlock); 1207 return (DDI_FAILURE); 1208 } 1209 1210 1211 } 1212 1213 if (fas_tail == fas) 1214 fas_tail = f; 1215 1216 rw_exit(&fas_global_rwlock); 1217 1218 if (fas->f_intr_kstat) 1219 kstat_delete(fas->f_intr_kstat); 1220 1221 fas_destroy_callbacks(fas); 1222 1223 scsi_hba_reset_notify_tear_down(fas->f_reset_notify_listf); 1224 1225 mutex_enter(&fas_global_mutex); 1226 /* 1227 * destroy any outstanding tagged command info 1228 */ 1229 for (slot = 0; slot < N_SLOTS; slot++) { 1230 struct f_slots *active = fas->f_active[slot]; 1231 if (active) { 1232 ushort_t tag; 1233 for (tag = 0; tag < active->f_n_slots; tag++) { 1234 struct fas_cmd *sp = active->f_slot[tag]; 1235 if (sp) { 1236 struct scsi_pkt *pkt = sp->cmd_pkt; 1237 if (pkt) { 1238 (void) fas_scsi_destroy_pkt( 1239 &pkt->pkt_address, pkt); 1240 } 1241 /* sp freed in fas_scsi_destroy_pkt */ 1242 active->f_slot[tag] = NULL; 1243 } 1244 } 1245 kmem_free(active, active->f_size); 1246 fas->f_active[slot] = NULL; 1247 } 1248 ASSERT(fas->f_tcmds[slot] == 0); 1249 } 1250 1251 /* 1252 * disallow timeout thread rescheduling 1253 */ 1254 fas->f_flags |= FAS_FLG_NOTIMEOUTS; 1255 mutex_exit(&fas_global_mutex); 1256 1257 if (fas->f_quiesce_timeid) { 1258 (void) untimeout(fas->f_quiesce_timeid); 1259 } 1260 1261 /* 1262 * last fas? ... if active, CANCEL watch threads. 1263 */ 1264 mutex_enter(&fas_global_mutex); 1265 if (fas_head == (struct fas *)NULL) { 1266 if (fas_timeout_initted) { 1267 timeout_id_t tid = fas_timeout_id; 1268 fas_timeout_initted = 0; 1269 fas_timeout_id = 0; /* don't resched */ 1270 mutex_exit(&fas_global_mutex); 1271 (void) untimeout(tid); 1272 mutex_enter(&fas_global_mutex); 1273 } 1274 1275 if (fas_reset_watch) { 1276 mutex_exit(&fas_global_mutex); 1277 (void) untimeout(fas_reset_watch); 1278 mutex_enter(&fas_global_mutex); 1279 fas_reset_watch = 0; 1280 } 1281 } 1282 mutex_exit(&fas_global_mutex); 1283 1284 if (fas->f_restart_cmd_timeid) { 1285 (void) untimeout(fas->f_restart_cmd_timeid); 1286 fas->f_restart_cmd_timeid = 0; 1287 } 1288 1289 /* 1290 * destroy outstanding ARQ pkts 1291 */ 1292 for (i = 0; i < NTARGETS_WIDE; i++) { 1293 for (j = 0; j < NLUNS_PER_TARGET; j++) { 1294 int slot = i * NLUNS_PER_TARGET | j; 1295 if (fas->f_arq_pkt[slot]) { 1296 struct scsi_address sa; 1297 sa.a_hba_tran = NULL; /* not used */ 1298 sa.a_target = (ushort_t)i; 1299 sa.a_lun = (uchar_t)j; 1300 (void) fas_delete_arq_pkt(fas, &sa); 1301 } 1302 } 1303 } 1304 1305 /* 1306 * Remove device MT locks and CV 1307 */ 1308 mutex_destroy(&fas->f_waitQ_mutex); 1309 mutex_destroy(&fas->f_mutex); 1310 cv_destroy(&fas->f_cv); 1311 1312 /* 1313 * Release miscellaneous device resources 1314 */ 1315 1316 if (fas->f_kmem_cache) { 1317 kmem_cache_destroy(fas->f_kmem_cache); 1318 } 1319 1320 if (fas->f_cmdarea != (uchar_t *)NULL) { 1321 (void) ddi_dma_unbind_handle(fas->f_dmahandle); 1322 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle); 1323 } 1324 1325 if (fas->f_dmahandle != (ddi_dma_handle_t)NULL) { 1326 ddi_dma_free_handle(&fas->f_dmahandle); 1327 } 1328 1329 if (fas->f_regs_acc_handle) { 1330 ddi_regs_map_free(&fas->f_regs_acc_handle); 1331 } 1332 if (fas->f_dmar_acc_handle) { 1333 ddi_regs_map_free(&fas->f_dmar_acc_handle); 1334 } 1335 1336 /* 1337 * Remove properties created during attach() 1338 */ 1339 ddi_prop_remove_all(dip); 1340 1341 /* 1342 * Delete the DMA limits, transport vectors and remove the device 1343 * links to the scsi_transport layer. 1344 * -- ddi_set_driver_private(dip, NULL) 1345 */ 1346 (void) scsi_hba_detach(dip); 1347 1348 /* 1349 * Free the scsi_transport structure for this device. 1350 */ 1351 scsi_hba_tran_free(tran); 1352 1353 ddi_soft_state_free(fas_state, ddi_get_instance(dip)); 1354 1355 return (DDI_SUCCESS); 1356 } 1357 1358 static int 1359 fas_quiesce_bus(struct fas *fas) 1360 { 1361 mutex_enter(FAS_MUTEX(fas)); 1362 IPRINTF("fas_quiesce: QUIESCEing\n"); 1363 IPRINTF3("fas_quiesce: ncmds (%d) ndisc (%d) state (%d)\n", 1364 fas->f_ncmds, fas->f_ndisc, fas->f_softstate); 1365 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE); 1366 if (fas_check_outstanding(fas)) { 1367 fas->f_softstate |= FAS_SS_DRAINING; 1368 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain, 1369 fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000))); 1370 if (cv_wait_sig(FAS_CV(fas), FAS_MUTEX(fas)) == 0) { 1371 /* 1372 * quiesce has been interrupted. 1373 */ 1374 IPRINTF("fas_quiesce: abort QUIESCE\n"); 1375 fas->f_softstate &= ~FAS_SS_DRAINING; 1376 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE); 1377 (void) fas_istart(fas); 1378 if (fas->f_quiesce_timeid != 0) { 1379 mutex_exit(FAS_MUTEX(fas)); 1380 #ifndef __lock_lint /* warlock complains but there is a NOTE on this */ 1381 (void) untimeout(fas->f_quiesce_timeid); 1382 fas->f_quiesce_timeid = 0; 1383 #endif 1384 return (-1); 1385 } 1386 mutex_exit(FAS_MUTEX(fas)); 1387 return (-1); 1388 } else { 1389 IPRINTF("fas_quiesce: bus is QUIESCED\n"); 1390 ASSERT(fas->f_quiesce_timeid == 0); 1391 fas->f_softstate &= ~FAS_SS_DRAINING; 1392 fas->f_softstate |= FAS_SS_QUIESCED; 1393 mutex_exit(FAS_MUTEX(fas)); 1394 return (0); 1395 } 1396 } 1397 IPRINTF("fas_quiesce: bus was not busy QUIESCED\n"); 1398 mutex_exit(FAS_MUTEX(fas)); 1399 return (0); 1400 } 1401 1402 static int 1403 fas_unquiesce_bus(struct fas *fas) 1404 { 1405 mutex_enter(FAS_MUTEX(fas)); 1406 fas->f_softstate &= ~FAS_SS_QUIESCED; 1407 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE); 1408 (void) fas_istart(fas); 1409 IPRINTF("fas_quiesce: bus has been UNQUIESCED\n"); 1410 mutex_exit(FAS_MUTEX(fas)); 1411 1412 return (0); 1413 } 1414 1415 /* 1416 * invoked from timeout() to check the number of outstanding commands 1417 */ 1418 static void 1419 fas_ncmds_checkdrain(void *arg) 1420 { 1421 struct fas *fas = arg; 1422 1423 mutex_enter(FAS_MUTEX(fas)); 1424 IPRINTF3("fas_checkdrain: ncmds (%d) ndisc (%d) state (%d)\n", 1425 fas->f_ncmds, fas->f_ndisc, fas->f_softstate); 1426 if (fas->f_softstate & FAS_SS_DRAINING) { 1427 fas->f_quiesce_timeid = 0; 1428 if (fas_check_outstanding(fas) == 0) { 1429 IPRINTF("fas_drain: bus has drained\n"); 1430 cv_signal(FAS_CV(fas)); 1431 } else { 1432 /* 1433 * throttle may have been reset by a bus reset 1434 * or fas_runpoll() 1435 * XXX shouldn't be necessary 1436 */ 1437 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE); 1438 IPRINTF("fas_drain: rescheduling timeout\n"); 1439 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain, 1440 fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000))); 1441 } 1442 } 1443 mutex_exit(FAS_MUTEX(fas)); 1444 } 1445 1446 static int 1447 fas_check_outstanding(struct fas *fas) 1448 { 1449 uint_t slot; 1450 uint_t d = ((fas->f_dslot == 0)? 1 : fas->f_dslot); 1451 int ncmds = 0; 1452 1453 ASSERT(mutex_owned(FAS_MUTEX(fas))); 1454 1455 for (slot = 0; slot < N_SLOTS; slot += d) 1456 ncmds += fas->f_tcmds[slot]; 1457 1458 return (ncmds); 1459 } 1460 1461 1462 #ifdef FASDEBUG 1463 /* 1464 * fas register read/write functions with tracing 1465 */ 1466 static void 1467 fas_reg_tracing(struct fas *fas, int type, int regno, uint32_t what) 1468 { 1469 fas->f_reg_trace[fas->f_reg_trace_index++] = type; 1470 fas->f_reg_trace[fas->f_reg_trace_index++] = regno; 1471 fas->f_reg_trace[fas->f_reg_trace_index++] = what; 1472 fas->f_reg_trace[fas->f_reg_trace_index++] = gethrtime(); 1473 fas->f_reg_trace[fas->f_reg_trace_index] = 0xff; 1474 if (fas->f_reg_trace_index >= REG_TRACE_BUF_SIZE) { 1475 fas->f_reg_trace_index = 0; 1476 } 1477 } 1478 1479 static void 1480 fas_reg_cmd_write(struct fas *fas, uint8_t cmd) 1481 { 1482 volatile struct fasreg *fasreg = fas->f_reg; 1483 int regno = (uintptr_t)&fasreg->fas_cmd - (uintptr_t)fasreg; 1484 1485 fasreg->fas_cmd = cmd; 1486 fas->f_last_cmd = cmd; 1487 1488 EPRINTF1("issuing cmd %x\n", (uchar_t)cmd); 1489 fas_reg_tracing(fas, 0, regno, cmd); 1490 1491 fas->f_reg_cmds++; 1492 } 1493 1494 static void 1495 fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what) 1496 { 1497 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg; 1498 1499 *p = what; 1500 1501 EPRINTF2("writing reg%x = %x\n", regno, what); 1502 fas_reg_tracing(fas, 1, regno, what); 1503 1504 fas->f_reg_writes++; 1505 } 1506 1507 static uint8_t 1508 fas_reg_read(struct fas *fas, volatile uint8_t *p) 1509 { 1510 uint8_t what; 1511 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg; 1512 1513 what = *p; 1514 1515 EPRINTF2("reading reg%x => %x\n", regno, what); 1516 fas_reg_tracing(fas, 2, regno, what); 1517 1518 fas->f_reg_reads++; 1519 1520 return (what); 1521 } 1522 1523 /* 1524 * dma register access routines 1525 */ 1526 static void 1527 fas_dma_reg_write(struct fas *fas, volatile uint32_t *p, uint32_t what) 1528 { 1529 *p = what; 1530 fas->f_reg_dma_writes++; 1531 1532 #ifdef DMA_REG_TRACING 1533 { 1534 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma; 1535 EPRINTF2("writing dma reg%x = %x\n", regno, what); 1536 fas_reg_tracing(fas, 3, regno, what); 1537 } 1538 #endif 1539 } 1540 1541 static uint32_t 1542 fas_dma_reg_read(struct fas *fas, volatile uint32_t *p) 1543 { 1544 uint32_t what = *p; 1545 fas->f_reg_dma_reads++; 1546 1547 #ifdef DMA_REG_TRACING 1548 { 1549 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma; 1550 EPRINTF2("reading dma reg%x => %x\n", regno, what); 1551 fas_reg_tracing(fas, 4, regno, what); 1552 } 1553 #endif 1554 return (what); 1555 } 1556 #endif 1557 1558 #define FIFO_EMPTY(fas) (fas_reg_read(fas, &fas->f_reg->fas_stat2) & \ 1559 FAS_STAT2_EMPTY) 1560 #define FIFO_CNT(fas) \ 1561 (fas_reg_read(fas, &fas->f_reg->fas_fifo_flag) & FIFO_CNT_MASK) 1562 1563 #ifdef FASDEBUG 1564 static void 1565 fas_assert_atn(struct fas *fas) 1566 { 1567 fas_reg_cmd_write(fas, CMD_SET_ATN); 1568 #ifdef FAS_TEST 1569 if (fas_test_stop > 1) 1570 debug_enter("asserted atn"); 1571 #endif 1572 } 1573 #else 1574 #define fas_assert_atn(fas) fas_reg_cmd_write(fas, CMD_SET_ATN) 1575 #endif 1576 1577 /* 1578 * DMA macros; we use a shadow copy of the dma_csr to save unnecessary 1579 * reads 1580 */ 1581 #define FAS_DMA_WRITE(fas, count, base, cmd) { \ 1582 volatile struct fasreg *fasreg = fas->f_reg; \ 1583 volatile struct dma *dmar = fas->f_dma; \ 1584 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \ 1585 SET_FAS_COUNT(fasreg, count); \ 1586 fas_reg_cmd_write(fas, cmd); \ 1587 fas_dma_reg_write(fas, &dmar->dma_count, count); \ 1588 fas->f_dma_csr |= \ 1589 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \ 1590 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \ 1591 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \ 1592 } 1593 1594 #define FAS_DMA_WRITE_SETUP(fas, count, base) { \ 1595 volatile struct fasreg *fasreg = fas->f_reg; \ 1596 volatile struct dma *dmar = fas->f_dma; \ 1597 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \ 1598 SET_FAS_COUNT(fasreg, count); \ 1599 fas_dma_reg_write(fas, &dmar->dma_count, count); \ 1600 fas->f_dma_csr |= \ 1601 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \ 1602 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \ 1603 } 1604 1605 1606 #define FAS_DMA_READ(fas, count, base, dmacount, cmd) { \ 1607 volatile struct fasreg *fasreg = fas->f_reg; \ 1608 volatile struct dma *dmar = fas->f_dma; \ 1609 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \ 1610 SET_FAS_COUNT(fasreg, count); \ 1611 fas_reg_cmd_write(fas, cmd); \ 1612 fas->f_dma_csr |= \ 1613 (fas->f_dma_csr & ~DMA_WRITE) | DMA_ENDVMA | DMA_DSBL_DRAIN; \ 1614 fas_dma_reg_write(fas, &dmar->dma_count, dmacount); \ 1615 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \ 1616 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \ 1617 } 1618 1619 static void 1620 FAS_FLUSH_DMA(struct fas *fas) 1621 { 1622 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET); 1623 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY| 1624 DMA_DSBL_DRAIN); 1625 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE); 1626 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0); 1627 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr); 1628 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0); 1629 } 1630 1631 /* 1632 * FAS_FLUSH_DMA_HARD checks on REQPEND before taking away the reset 1633 */ 1634 static void 1635 FAS_FLUSH_DMA_HARD(struct fas *fas) 1636 { 1637 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET); 1638 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY| 1639 DMA_DSBL_DRAIN); 1640 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE); 1641 while (fas_dma_reg_read(fas, &fas->f_dma->dma_csr) & DMA_REQPEND) 1642 ; 1643 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0); 1644 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr); 1645 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0); 1646 } 1647 1648 /* 1649 * update period, conf3, offset reg, if necessary 1650 */ 1651 #define FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target) \ 1652 { \ 1653 uchar_t period, offset, conf3; \ 1654 period = fas->f_sync_period[target] & SYNC_PERIOD_MASK; \ 1655 offset = fas->f_offset[target]; \ 1656 conf3 = fas->f_fasconf3[target]; \ 1657 if ((period != fas->f_period_reg_last) || \ 1658 (offset != fas->f_offset_reg_last) || \ 1659 (conf3 != fas->f_fasconf3_reg_last)) { \ 1660 fas->f_period_reg_last = period; \ 1661 fas->f_offset_reg_last = offset; \ 1662 fas->f_fasconf3_reg_last = conf3; \ 1663 fas_reg_write(fas, &fasreg->fas_sync_period, period); \ 1664 fas_reg_write(fas, &fasreg->fas_sync_offset, offset); \ 1665 fas_reg_write(fas, &fasreg->fas_conf3, conf3); \ 1666 } \ 1667 } 1668 1669 /* 1670 * fifo read/write routines 1671 * always read the fifo bytes before reading the interrupt register 1672 */ 1673 1674 static void 1675 fas_read_fifo(struct fas *fas) 1676 { 1677 int stat = fas->f_stat; 1678 volatile struct fasreg *fasreg = fas->f_reg; 1679 int i; 1680 1681 i = fas_reg_read(fas, &fasreg->fas_fifo_flag) & FIFO_CNT_MASK; 1682 EPRINTF2("fas_read_fifo: fifo cnt=%x, stat=%x\n", i, stat); 1683 ASSERT(i <= FIFOSIZE); 1684 1685 fas->f_fifolen = 0; 1686 while (i-- > 0) { 1687 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas, 1688 &fasreg->fas_fifo_data); 1689 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas, 1690 &fasreg->fas_fifo_data); 1691 } 1692 if (fas->f_stat2 & FAS_STAT2_ISHUTTLE) { 1693 1694 /* write pad byte */ 1695 fas_reg_write(fas, &fasreg->fas_fifo_data, 0); 1696 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas, 1697 &fasreg->fas_fifo_data); 1698 /* flush pad byte */ 1699 fas_reg_cmd_write(fas, CMD_FLUSH); 1700 } 1701 EPRINTF2("fas_read_fifo: fifo len=%x, stat2=%x\n", 1702 fas->f_fifolen, stat); 1703 } /* fas_read_fifo */ 1704 1705 static void 1706 fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad) 1707 { 1708 int i; 1709 volatile struct fasreg *fasreg = fas->f_reg; 1710 1711 EPRINTF1("writing fifo %x bytes\n", length); 1712 ASSERT(length <= 15); 1713 fas_reg_cmd_write(fas, CMD_FLUSH); 1714 for (i = 0; i < length; i++) { 1715 fas_reg_write(fas, &fasreg->fas_fifo_data, buf[i]); 1716 if (pad) { 1717 fas_reg_write(fas, &fasreg->fas_fifo_data, 0); 1718 } 1719 } 1720 } 1721 1722 /* 1723 * Hardware and Software internal reset routines 1724 */ 1725 static int 1726 fas_init_chip(struct fas *fas, uchar_t initiator_id) 1727 { 1728 int i; 1729 uchar_t clock_conv; 1730 uchar_t initial_conf3; 1731 uint_t ticks; 1732 static char *prop_cfreq = "clock-frequency"; 1733 1734 /* 1735 * Determine clock frequency of attached FAS chip. 1736 */ 1737 i = ddi_prop_get_int(DDI_DEV_T_ANY, 1738 fas->f_dev, DDI_PROP_DONTPASS, prop_cfreq, -1); 1739 clock_conv = (i + FIVE_MEG - 1) / FIVE_MEG; 1740 if (clock_conv != CLOCK_40MHZ) { 1741 fas_log(fas, CE_WARN, "Bad clock frequency"); 1742 return (-1); 1743 } 1744 1745 fas->f_clock_conv = clock_conv; 1746 fas->f_clock_cycle = CLOCK_PERIOD(i); 1747 ticks = FAS_CLOCK_TICK(fas); 1748 fas->f_stval = FAS_CLOCK_TIMEOUT(ticks, fas_selection_timeout); 1749 1750 DPRINTF5("%d mhz, clock_conv %d, clock_cycle %d, ticks %d, stval %d\n", 1751 i, fas->f_clock_conv, fas->f_clock_cycle, 1752 ticks, fas->f_stval); 1753 /* 1754 * set up conf registers 1755 */ 1756 fas->f_fasconf |= FAS_CONF_PAREN; 1757 fas->f_fasconf2 = (uchar_t)(FAS_CONF2_FENABLE | FAS_CONF2_XL32); 1758 1759 if (initiator_id < NTARGETS) { 1760 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO; 1761 } else { 1762 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO | 1763 FAS_CONF3_IDBIT3; 1764 } 1765 1766 for (i = 0; i < NTARGETS_WIDE; i++) { 1767 fas->f_fasconf3[i] = initial_conf3; 1768 } 1769 1770 /* 1771 * Avoid resetting the scsi bus since this causes a few seconds 1772 * delay per fas in boot and also causes busy conditions in some 1773 * tape devices. 1774 */ 1775 fas_internal_reset(fas, FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA); 1776 1777 /* 1778 * initialize period and offset for each target 1779 */ 1780 for (i = 0; i < NTARGETS_WIDE; i++) { 1781 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_SYNC) { 1782 fas->f_offset[i] = fas_default_offset | 1783 fas->f_req_ack_delay; 1784 } else { 1785 fas->f_offset[i] = 0; 1786 } 1787 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_FAST) { 1788 fas->f_neg_period[i] = 1789 (uchar_t)MIN_SYNC_PERIOD(fas); 1790 } else { 1791 fas->f_neg_period[i] = 1792 (uchar_t)CONVERT_PERIOD(DEFAULT_SYNC_PERIOD); 1793 } 1794 } 1795 return (0); 1796 } 1797 1798 /* 1799 * reset bus, chip, dma, or soft state 1800 */ 1801 static void 1802 fas_internal_reset(struct fas *fas, int reset_action) 1803 { 1804 volatile struct fasreg *fasreg = fas->f_reg; 1805 volatile struct dma *dmar = fas->f_dma; 1806 1807 if (reset_action & FAS_RESET_SCSIBUS) { 1808 fas_reg_cmd_write(fas, CMD_RESET_SCSI); 1809 fas_setup_reset_delay(fas); 1810 } 1811 1812 FAS_FLUSH_DMA_HARD(fas); /* resets and reinits the dma */ 1813 1814 /* 1815 * NOTE: if dma is aborted while active, indefinite hangs 1816 * may occur; it is preferable to stop the target first before 1817 * flushing the dma 1818 */ 1819 if (reset_action & FAS_RESET_DMA) { 1820 int burstsizes = fas->f_dma_attr->dma_attr_burstsizes; 1821 if (burstsizes & BURST64) { 1822 IPRINTF("64 byte burstsize\n"); 1823 fas->f_dma_csr |= DMA_BURST64; 1824 } else if (burstsizes & BURST32) { 1825 IPRINTF("32 byte burstsize\n"); 1826 fas->f_dma_csr |= DMA_BURST32; 1827 } else { 1828 IPRINTF("16 byte burstsize\n"); 1829 } 1830 if ((fas->f_hm_rev > 0x20) && (fas_enable_sbus64) && 1831 (ddi_dma_set_sbus64(fas->f_dmahandle, burstsizes) == 1832 DDI_SUCCESS)) { 1833 IPRINTF("enabled 64 bit sbus\n"); 1834 fas->f_dma_csr |= DMA_WIDE_EN; 1835 } 1836 } 1837 1838 if (reset_action & FAS_RESET_FAS) { 1839 /* 1840 * 2 NOPs with DMA are required here 1841 * id_code is unreliable if we don't do this) 1842 */ 1843 uchar_t idcode, fcode; 1844 int dmarev; 1845 1846 fas_reg_cmd_write(fas, CMD_RESET_FAS); 1847 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA); 1848 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA); 1849 1850 /* 1851 * Re-load chip configurations 1852 * Only load registers which are not loaded in fas_startcmd() 1853 */ 1854 fas_reg_write(fas, &fasreg->fas_clock_conv, 1855 (fas->f_clock_conv & CLOCK_MASK)); 1856 1857 fas_reg_write(fas, &fasreg->fas_timeout, fas->f_stval); 1858 1859 /* 1860 * enable default configurations 1861 */ 1862 fas->f_idcode = idcode = 1863 fas_reg_read(fas, &fasreg->fas_id_code); 1864 fcode = (uchar_t)(idcode & FAS_FCODE_MASK) >> (uchar_t)3; 1865 fas->f_type = FAS366; 1866 IPRINTF2("Family code %d, revision %d\n", 1867 fcode, (idcode & FAS_REV_MASK)); 1868 dmarev = fas_dma_reg_read(fas, &dmar->dma_csr); 1869 dmarev = (dmarev >> 11) & 0xf; 1870 IPRINTF1("DMA channel revision %d\n", dmarev); 1871 1872 fas_reg_write(fas, &fasreg->fas_conf, fas->f_fasconf); 1873 fas_reg_write(fas, &fasreg->fas_conf2, fas->f_fasconf2); 1874 1875 fas->f_req_ack_delay = DEFAULT_REQ_ACK_DELAY; 1876 1877 /* 1878 * Just in case... clear interrupt 1879 */ 1880 (void) fas_reg_read(fas, &fasreg->fas_intr); 1881 } 1882 1883 if (reset_action & FAS_RESET_SOFTC) { 1884 fas->f_wdtr_sent = fas->f_sdtr_sent = 0; 1885 fas->f_wide_known = fas->f_sync_known = 0; 1886 fas->f_wide_enabled = fas->f_sync_enabled = 0; 1887 fas->f_omsglen = 0; 1888 fas->f_cur_msgout[0] = fas->f_last_msgout = 1889 fas->f_last_msgin = INVALID_MSG; 1890 fas->f_abort_msg_sent = fas->f_reset_msg_sent = 0; 1891 fas->f_next_slot = 0; 1892 fas->f_current_sp = NULL; 1893 fas->f_fifolen = 0; 1894 fas->f_fasconf3_reg_last = fas->f_offset_reg_last = 1895 fas->f_period_reg_last = 0xff; 1896 1897 New_state(fas, STATE_FREE); 1898 } 1899 } 1900 1901 1902 #ifdef FASDEBUG 1903 /* 1904 * check if ncmds still reflects the truth 1905 * count all cmds for this driver instance and compare with ncmds 1906 */ 1907 static void 1908 fas_check_ncmds(struct fas *fas) 1909 { 1910 int slot = 0; 1911 ushort_t tag, t; 1912 int n, total = 0; 1913 1914 do { 1915 if (fas->f_active[slot]) { 1916 struct fas_cmd *sp = fas->f_readyf[slot]; 1917 t = fas->f_active[slot]->f_n_slots; 1918 while (sp != 0) { 1919 sp = sp->cmd_forw; 1920 total++; 1921 } 1922 for (n = tag = 0; tag < t; tag++) { 1923 if (fas->f_active[slot]->f_slot[tag] != 0) { 1924 n++; 1925 total++; 1926 } 1927 } 1928 ASSERT(n == fas->f_tcmds[slot]); 1929 } 1930 slot = NEXTSLOT(slot, fas->f_dslot); 1931 } while (slot != 0); 1932 1933 if (total != fas->f_ncmds) { 1934 IPRINTF2("fas_check_ncmds: total=%x, ncmds=%x\n", 1935 total, fas->f_ncmds); 1936 } 1937 ASSERT(fas->f_ncmds >= fas->f_ndisc); 1938 } 1939 #else 1940 #define fas_check_ncmds(fas) 1941 #endif 1942 1943 /* 1944 * SCSA Interface functions 1945 * 1946 * Visible to the external world via the transport structure. 1947 * 1948 * fas_scsi_abort: abort a current cmd or all cmds for a target 1949 */ 1950 /*ARGSUSED*/ 1951 static int 1952 fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1953 { 1954 struct fas *fas = ADDR2FAS(ap); 1955 int rval; 1956 1957 IPRINTF2("fas_scsi_abort: target %d.%d\n", ap->a_target, ap->a_lun); 1958 1959 mutex_enter(FAS_MUTEX(fas)); 1960 rval = fas_do_scsi_abort(ap, pkt); 1961 fas_check_waitQ_and_mutex_exit(fas); 1962 return (rval); 1963 } 1964 1965 /* 1966 * reset handling: reset bus or target 1967 */ 1968 /*ARGSUSED*/ 1969 static int 1970 fas_scsi_reset(struct scsi_address *ap, int level) 1971 { 1972 struct fas *fas = ADDR2FAS(ap); 1973 int rval; 1974 1975 IPRINTF3("fas_scsi_reset: target %d.%d, level %d\n", 1976 ap->a_target, ap->a_lun, level); 1977 1978 mutex_enter(FAS_MUTEX(fas)); 1979 rval = fas_do_scsi_reset(ap, level); 1980 fas_check_waitQ_and_mutex_exit(fas); 1981 return (rval); 1982 } 1983 1984 /* 1985 * entry point for reset notification setup, to register or to cancel. 1986 */ 1987 static int 1988 fas_scsi_reset_notify(struct scsi_address *ap, int flag, 1989 void (*callback)(caddr_t), caddr_t arg) 1990 { 1991 struct fas *fas = ADDR2FAS(ap); 1992 1993 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 1994 &fas->f_mutex, &fas->f_reset_notify_listf)); 1995 } 1996 1997 /* 1998 * capability interface 1999 */ 2000 /*ARGSUSED*/ 2001 static int 2002 fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 2003 { 2004 struct fas *fas = ADDR2FAS(ap); 2005 DPRINTF3("fas_scsi_getcap: tgt=%x, cap=%s, whom=%x\n", 2006 ap->a_target, cap, whom); 2007 return (fas_commoncap(ap, cap, 0, whom, 0)); 2008 } 2009 2010 /*ARGSUSED*/ 2011 static int 2012 fas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 2013 { 2014 struct fas *fas = ADDR2FAS(ap); 2015 IPRINTF4("fas_scsi_setcap: tgt=%x, cap=%s, value=%x, whom=%x\n", 2016 ap->a_target, cap, value, whom); 2017 return (fas_commoncap(ap, cap, value, whom, 1)); 2018 } 2019 2020 /* 2021 * pkt and dma allocation and deallocation 2022 */ 2023 /*ARGSUSED*/ 2024 static void 2025 fas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 2026 { 2027 struct fas_cmd *cmd = PKT2CMD(pkt); 2028 2029 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START, 2030 "fas_scsi_dmafree_start"); 2031 2032 if (cmd->cmd_flags & CFLAG_DMAVALID) { 2033 /* 2034 * Free the mapping. 2035 */ 2036 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle); 2037 cmd->cmd_flags ^= CFLAG_DMAVALID; 2038 } 2039 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END, 2040 "fas_scsi_dmafree_end"); 2041 } 2042 2043 /*ARGSUSED*/ 2044 static void 2045 fas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2046 { 2047 struct fas_cmd *sp = PKT2CMD(pkt); 2048 2049 if (sp->cmd_flags & CFLAG_DMAVALID) { 2050 if (ddi_dma_sync(sp->cmd_dmahandle, 0, 0, 2051 (sp->cmd_flags & CFLAG_DMASEND) ? 2052 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) != 2053 DDI_SUCCESS) { 2054 fas_log(ADDR2FAS(ap), CE_WARN, 2055 "sync of pkt (%p) failed", (void *)pkt); 2056 } 2057 } 2058 } 2059 2060 /* 2061 * initialize pkt and allocate DVMA resources 2062 */ 2063 static struct scsi_pkt * 2064 fas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 2065 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 2066 int flags, int (*callback)(), caddr_t arg) 2067 { 2068 int kf; 2069 int failure = 1; 2070 struct fas_cmd *cmd; 2071 struct fas *fas = ADDR2FAS(ap); 2072 struct fas_cmd *new_cmd; 2073 int rval; 2074 2075 /* #define FAS_TEST_EXTRN_ALLOC */ 2076 #ifdef FAS_TEST_EXTRN_ALLOC 2077 cmdlen *= 4; statuslen *= 4; tgtlen *= 4; 2078 #endif 2079 /* 2080 * if no pkt was passed then allocate a pkt first 2081 */ 2082 if (pkt == NULL) { 2083 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_START, 2084 "fas_scsi_impl_pktalloc_start"); 2085 2086 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP; 2087 2088 /* 2089 * only one size of pkt (with arq). 2090 */ 2091 cmd = kmem_cache_alloc(fas->f_kmem_cache, kf); 2092 2093 if (cmd) { 2094 2095 ddi_dma_handle_t save_dma_handle; 2096 2097 save_dma_handle = cmd->cmd_dmahandle; 2098 bzero(cmd, EXTCMD_SIZE); 2099 cmd->cmd_dmahandle = save_dma_handle; 2100 2101 pkt = (struct scsi_pkt *)((uchar_t *)cmd + 2102 sizeof (struct fas_cmd)); 2103 cmd->cmd_pkt = pkt; 2104 pkt->pkt_ha_private = (opaque_t)cmd; 2105 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb; 2106 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb; 2107 pkt->pkt_address = *ap; 2108 2109 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb; 2110 pkt->pkt_private = cmd->cmd_pkt_private; 2111 2112 cmd->cmd_cdblen = cmdlen; 2113 cmd->cmd_scblen = statuslen; 2114 cmd->cmd_privlen = tgtlen; 2115 cmd->cmd_slot = 2116 (Tgt(cmd) * NLUNS_PER_TARGET) | Lun(cmd); 2117 failure = 0; 2118 } 2119 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) || 2120 (tgtlen > PKT_PRIV_LEN) || 2121 (statuslen > EXTCMDS_STATUS_SIZE)) { 2122 if (failure == 0) { 2123 /* 2124 * if extern alloc fails, all will be 2125 * deallocated, including cmd 2126 */ 2127 failure = fas_pkt_alloc_extern(fas, cmd, 2128 cmdlen, tgtlen, statuslen, kf); 2129 } 2130 if (failure) { 2131 /* 2132 * nothing to deallocate so just return 2133 */ 2134 TRACE_0(TR_FAC_SCSI_FAS, 2135 TR_FAS_SCSI_IMPL_PKTALLOC_END, 2136 "fas_scsi_impl_pktalloc_end"); 2137 return (NULL); 2138 } 2139 } 2140 2141 new_cmd = cmd; 2142 2143 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_END, 2144 "fas_scsi_impl_pktalloc_end"); 2145 } else { 2146 cmd = PKT2CMD(pkt); 2147 new_cmd = NULL; 2148 } 2149 2150 /* 2151 * Second step of fas_scsi_init_pkt: 2152 * bind the buf to the handle 2153 */ 2154 if (bp && bp->b_bcount != 0 && 2155 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) { 2156 2157 int cmd_flags, dma_flags; 2158 uint_t dmacookie_count; 2159 2160 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_START, 2161 "fas_scsi_impl_dmaget_start"); 2162 2163 cmd_flags = cmd->cmd_flags; 2164 2165 if (bp->b_flags & B_READ) { 2166 cmd_flags &= ~CFLAG_DMASEND; 2167 dma_flags = DDI_DMA_READ | DDI_DMA_PARTIAL; 2168 } else { 2169 cmd_flags |= CFLAG_DMASEND; 2170 dma_flags = DDI_DMA_WRITE | DDI_DMA_PARTIAL; 2171 } 2172 if (flags & PKT_CONSISTENT) { 2173 cmd_flags |= CFLAG_CMDIOPB; 2174 dma_flags |= DDI_DMA_CONSISTENT; 2175 } 2176 2177 /* 2178 * bind the handle to the buf 2179 */ 2180 ASSERT(cmd->cmd_dmahandle != NULL); 2181 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp, 2182 dma_flags, callback, arg, &cmd->cmd_dmacookie, 2183 &dmacookie_count); 2184 2185 if (rval && rval != DDI_DMA_PARTIAL_MAP) { 2186 switch (rval) { 2187 case DDI_DMA_NORESOURCES: 2188 bioerror(bp, 0); 2189 break; 2190 case DDI_DMA_BADATTR: 2191 case DDI_DMA_NOMAPPING: 2192 bioerror(bp, EFAULT); 2193 break; 2194 case DDI_DMA_TOOBIG: 2195 default: 2196 bioerror(bp, EINVAL); 2197 break; 2198 } 2199 cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID; 2200 if (new_cmd) { 2201 fas_scsi_destroy_pkt(ap, pkt); 2202 } 2203 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END, 2204 "fas_scsi_impl_dmaget_end"); 2205 return ((struct scsi_pkt *)NULL); 2206 } 2207 ASSERT(dmacookie_count == 1); 2208 cmd->cmd_dmacount = bp->b_bcount; 2209 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID; 2210 2211 ASSERT(cmd->cmd_dmahandle != NULL); 2212 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END, 2213 "fas_scsi_impl_dmaget_end"); 2214 } 2215 2216 return (pkt); 2217 } 2218 2219 /* 2220 * unbind dma resources and deallocate the pkt 2221 */ 2222 static void 2223 fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2224 { 2225 struct fas_cmd *sp = PKT2CMD(pkt); 2226 struct fas *fas = ADDR2FAS(ap); 2227 2228 /* 2229 * fas_scsi_impl_dmafree inline to speed things up 2230 */ 2231 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START, 2232 "fas_scsi_impl_dmafree_start"); 2233 2234 if (sp->cmd_flags & CFLAG_DMAVALID) { 2235 /* 2236 * Free the mapping. 2237 */ 2238 (void) ddi_dma_unbind_handle(sp->cmd_dmahandle); 2239 sp->cmd_flags ^= CFLAG_DMAVALID; 2240 } 2241 2242 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END, 2243 "fas_scsi_impl_dmafree_end"); 2244 2245 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_START, 2246 "fas_scsi_impl_pktfree_start"); 2247 2248 if ((sp->cmd_flags & 2249 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN | 2250 CFLAG_SCBEXTERN)) == 0) { 2251 sp->cmd_flags = CFLAG_FREE; 2252 kmem_cache_free(fas->f_kmem_cache, (void *)sp); 2253 } else { 2254 fas_pkt_destroy_extern(fas, sp); 2255 } 2256 2257 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_END, 2258 "fas_scsi_impl_pktfree_end"); 2259 } 2260 2261 /* 2262 * allocate and deallocate external pkt space (ie. not part of fas_cmd) for 2263 * non-standard length cdb, pkt_private, status areas 2264 * if allocation fails, then deallocate all external space and the pkt 2265 */ 2266 /* ARGSUSED */ 2267 static int 2268 fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp, 2269 int cmdlen, int tgtlen, int statuslen, int kf) 2270 { 2271 caddr_t cdbp, scbp, tgt; 2272 int failure = 0; 2273 2274 tgt = cdbp = scbp = NULL; 2275 if (cmdlen > sizeof (sp->cmd_cdb)) { 2276 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) { 2277 failure++; 2278 } else { 2279 sp->cmd_pkt->pkt_cdbp = (opaque_t)cdbp; 2280 sp->cmd_flags |= CFLAG_CDBEXTERN; 2281 } 2282 } 2283 if (tgtlen > PKT_PRIV_LEN) { 2284 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) { 2285 failure++; 2286 } else { 2287 sp->cmd_flags |= CFLAG_PRIVEXTERN; 2288 sp->cmd_pkt->pkt_private = tgt; 2289 } 2290 } 2291 if (statuslen > EXTCMDS_STATUS_SIZE) { 2292 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) { 2293 failure++; 2294 } else { 2295 sp->cmd_flags |= CFLAG_SCBEXTERN; 2296 sp->cmd_pkt->pkt_scbp = (opaque_t)scbp; 2297 } 2298 } 2299 if (failure) { 2300 fas_pkt_destroy_extern(fas, sp); 2301 } 2302 return (failure); 2303 } 2304 2305 /* 2306 * deallocate external pkt space and deallocate the pkt 2307 */ 2308 static void 2309 fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp) 2310 { 2311 if (sp->cmd_flags & CFLAG_FREE) { 2312 panic("fas_pkt_destroy_extern: freeing free packet"); 2313 _NOTE(NOT_REACHED) 2314 /* NOTREACHED */ 2315 } 2316 if (sp->cmd_flags & CFLAG_CDBEXTERN) { 2317 kmem_free((caddr_t)sp->cmd_pkt->pkt_cdbp, 2318 (size_t)sp->cmd_cdblen); 2319 } 2320 if (sp->cmd_flags & CFLAG_SCBEXTERN) { 2321 kmem_free((caddr_t)sp->cmd_pkt->pkt_scbp, 2322 (size_t)sp->cmd_scblen); 2323 } 2324 if (sp->cmd_flags & CFLAG_PRIVEXTERN) { 2325 kmem_free((caddr_t)sp->cmd_pkt->pkt_private, 2326 (size_t)sp->cmd_privlen); 2327 } 2328 sp->cmd_flags = CFLAG_FREE; 2329 kmem_cache_free(fas->f_kmem_cache, (void *)sp); 2330 } 2331 2332 /* 2333 * kmem cache constructor and destructor: 2334 * When constructing, we bzero the cmd and allocate the dma handle 2335 * When destructing, just free the dma handle 2336 */ 2337 static int 2338 fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags) 2339 { 2340 struct fas_cmd *cmd = buf; 2341 struct fas *fas = cdrarg; 2342 int (*callback)(caddr_t) = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP: 2343 DDI_DMA_DONTWAIT; 2344 2345 bzero(buf, EXTCMD_SIZE); 2346 2347 /* 2348 * allocate a dma handle 2349 */ 2350 if ((ddi_dma_alloc_handle(fas->f_dev, fas->f_dma_attr, callback, 2351 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) { 2352 return (-1); 2353 } 2354 return (0); 2355 } 2356 2357 /*ARGSUSED*/ 2358 static void 2359 fas_kmem_cache_destructor(void *buf, void *cdrarg) 2360 { 2361 struct fas_cmd *cmd = buf; 2362 if (cmd->cmd_dmahandle) { 2363 ddi_dma_free_handle(&cmd->cmd_dmahandle); 2364 } 2365 } 2366 2367 /* 2368 * fas_scsi_start - Accept commands for transport 2369 */ 2370 static int 2371 fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 2372 { 2373 struct fas_cmd *sp = PKT2CMD(pkt); 2374 struct fas *fas = ADDR2FAS(ap); 2375 int rval; 2376 int intr = 0; 2377 2378 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_START, "fas_scsi_start_start"); 2379 2380 #ifdef FAS_TEST 2381 if (fas_transport_busy > 0) { 2382 fas_transport_busy--; 2383 return (TRAN_BUSY); 2384 } 2385 if ((fas_transport_busy_rqs > 0) && 2386 (*(sp->cmd_pkt->pkt_cdbp) == SCMD_REQUEST_SENSE)) { 2387 fas_transport_busy_rqs--; 2388 return (TRAN_BUSY); 2389 } 2390 if (fas_transport_reject > 0) { 2391 fas_transport_reject--; 2392 return (TRAN_BADPKT); 2393 } 2394 #endif 2395 /* 2396 * prepare packet before taking the mutex 2397 */ 2398 rval = fas_prepare_pkt(fas, sp); 2399 if (rval != TRAN_ACCEPT) { 2400 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_PREPARE_PKT_END, 2401 "fas_scsi_start_end (prepare_pkt)"); 2402 return (rval); 2403 } 2404 2405 /* 2406 * fas mutex can be held for a long time; therefore, if the mutex is 2407 * held, we queue the packet in a waitQ; we now should check 2408 * the waitQ on every mutex_exit(FAS_MUTEX(fas)) but we really only 2409 * need to do this when the bus is free 2410 * don't put NOINTR cmds including proxy cmds in waitQ! These 2411 * cmds are handled by fas_runpoll() 2412 * if the waitQ is non-empty, queue the pkt anyway to preserve 2413 * order 2414 * the goal is to queue in waitQ as much as possible so at 2415 * interrupt time, we can move the packets to readyQ or start 2416 * a packet immediately. It helps to do this at interrupt 2417 * time because we can then field more interrupts 2418 */ 2419 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) { 2420 2421 /* 2422 * if the bus is not free, we will get an interrupt shortly 2423 * so we don't want to take the fas mutex but queue up 2424 * the packet in the waitQ 2425 * also, if the waitQ is non-empty or there is an interrupt 2426 * pending then queue up the packet in the waitQ and let the 2427 * interrupt handler empty the waitQ 2428 */ 2429 mutex_enter(&fas->f_waitQ_mutex); 2430 2431 if ((fas->f_state != STATE_FREE) || 2432 fas->f_waitf || (intr = INTPENDING(fas))) { 2433 goto queue_in_waitQ; 2434 } 2435 2436 /* 2437 * we didn't queue up in the waitQ, so now try to accept 2438 * the packet. if we fail to get the fas mutex, go back to 2439 * the waitQ again 2440 * do not release the waitQ mutex yet because that 2441 * leaves a window where the interrupt handler has 2442 * emptied the waitQ but not released the fas mutex yet 2443 * 2444 * the interrupt handler gets the locks in opposite order 2445 * but because we do a tryenter, there is no deadlock 2446 * 2447 * if another thread has the fas mutex then either this 2448 * thread or the other may find the bus free and 2449 * empty the waitQ 2450 */ 2451 if (mutex_tryenter(FAS_MUTEX(fas))) { 2452 mutex_exit(&fas->f_waitQ_mutex); 2453 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK); 2454 } else { 2455 /* 2456 * we didn't get the fas mutex so 2457 * the packet has to go in the waitQ now 2458 */ 2459 goto queue_in_waitQ; 2460 } 2461 } else { 2462 /* 2463 * for polled cmds, we have to take the mutex and 2464 * start the packet using fas_runpoll() 2465 */ 2466 mutex_enter(FAS_MUTEX(fas)); 2467 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK); 2468 } 2469 2470 /* 2471 * if the bus is free then empty waitQ and release the mutex 2472 * (this should be unlikely that the bus is still free after 2473 * accepting the packet. it may be the relatively unusual case 2474 * that we are throttling) 2475 */ 2476 if (fas->f_state == STATE_FREE) { 2477 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas); 2478 } else { 2479 mutex_exit(FAS_MUTEX(fas)); 2480 } 2481 2482 done: 2483 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END, 2484 "fas_scsi_start_end: fas 0x%p", fas); 2485 return (rval); 2486 2487 queue_in_waitQ: 2488 if (fas->f_waitf == NULL) { 2489 fas->f_waitb = fas->f_waitf = sp; 2490 sp->cmd_forw = NULL; 2491 } else { 2492 struct fas_cmd *dp = fas->f_waitb; 2493 dp->cmd_forw = fas->f_waitb = sp; 2494 sp->cmd_forw = NULL; 2495 } 2496 2497 /* 2498 * check again the fas mutex 2499 * if there was an interrupt then the interrupt 2500 * handler will eventually empty the waitQ 2501 */ 2502 if ((intr == 0) && (fas->f_state == STATE_FREE) && 2503 mutex_tryenter(FAS_MUTEX(fas))) { 2504 /* 2505 * double check if the bus is still free 2506 * (this actually reduced mutex contention a bit) 2507 */ 2508 if (fas->f_state == STATE_FREE) { 2509 fas_empty_waitQ(fas); 2510 } 2511 mutex_exit(FAS_MUTEX(fas)); 2512 } 2513 mutex_exit(&fas->f_waitQ_mutex); 2514 2515 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END, 2516 "fas_scsi_start_end: fas 0x%p", fas); 2517 return (rval); 2518 } 2519 2520 /* 2521 * prepare the pkt: 2522 * the pkt may have been resubmitted or just reused so 2523 * initialize some fields, reset the dma window, and do some checks 2524 */ 2525 static int 2526 fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp) 2527 { 2528 struct scsi_pkt *pkt = CMD2PKT(sp); 2529 2530 /* 2531 * Reinitialize some fields that need it; the packet may 2532 * have been resubmitted 2533 */ 2534 pkt->pkt_reason = CMD_CMPLT; 2535 pkt->pkt_state = 0; 2536 pkt->pkt_statistics = 0; 2537 pkt->pkt_resid = 0; 2538 sp->cmd_age = 0; 2539 sp->cmd_pkt_flags = pkt->pkt_flags; 2540 2541 /* 2542 * Copy the cdb pointer to the pkt wrapper area as we 2543 * might modify this pointer. Zero status byte 2544 */ 2545 sp->cmd_cdbp = pkt->pkt_cdbp; 2546 *(pkt->pkt_scbp) = 0; 2547 2548 if (sp->cmd_flags & CFLAG_DMAVALID) { 2549 pkt->pkt_resid = sp->cmd_dmacount; 2550 2551 /* 2552 * if the pkt was resubmitted then the 2553 * windows may be at the wrong number 2554 */ 2555 if (sp->cmd_cur_win) { 2556 sp->cmd_cur_win = 0; 2557 if (fas_set_new_window(fas, sp)) { 2558 IPRINTF("cannot reset window\n"); 2559 return (TRAN_BADPKT); 2560 } 2561 } 2562 sp->cmd_saved_cur_addr = 2563 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address; 2564 2565 /* 2566 * the common case is just one window, we worry 2567 * about multiple windows when we run out of the 2568 * current window 2569 */ 2570 sp->cmd_nwin = sp->cmd_saved_win = 0; 2571 sp->cmd_data_count = sp->cmd_saved_data_count = 0; 2572 2573 /* 2574 * consistent packets need to be sync'ed first 2575 * (only for data going out) 2576 */ 2577 if ((sp->cmd_flags & (CFLAG_CMDIOPB | CFLAG_DMASEND)) == 2578 (CFLAG_CMDIOPB | CFLAG_DMASEND)) { 2579 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0, 2580 DDI_DMA_SYNC_FORDEV); 2581 } 2582 } 2583 2584 sp->cmd_actual_cdblen = sp->cmd_cdblen; 2585 2586 #ifdef FAS_TEST 2587 #ifndef __lock_lint 2588 if (fas_test_untagged > 0) { 2589 if (TAGGED(Tgt(sp))) { 2590 int slot = sp->cmd_slot; 2591 sp->cmd_pkt_flags &= ~FLAG_TAGMASK; 2592 sp->cmd_pkt_flags &= ~FLAG_NODISCON; 2593 sp->cmd_pkt_flags |= 0x80000000; 2594 fas_log(fas, CE_NOTE, 2595 "starting untagged cmd, target=%d," 2596 " tcmds=%d, sp=0x%p, throttle=%d\n", 2597 Tgt(sp), fas->f_tcmds[slot], (void *)sp, 2598 fas->f_throttle[slot]); 2599 fas_test_untagged = -10; 2600 } 2601 } 2602 #endif 2603 #endif 2604 2605 #ifdef FASDEBUG 2606 if (NOTAG(Tgt(sp)) && (pkt->pkt_flags & FLAG_TAGMASK)) { 2607 IPRINTF2("tagged packet for non-tagged target %d.%d\n", 2608 Tgt(sp), Lun(sp)); 2609 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END, 2610 "fas_prepare_pkt_end (tran_badpkt)"); 2611 return (TRAN_BADPKT); 2612 } 2613 2614 /* 2615 * the SCSA spec states that it is an error to have no 2616 * completion function when FLAG_NOINTR is not set 2617 */ 2618 if ((pkt->pkt_comp == NULL) && 2619 ((pkt->pkt_flags & FLAG_NOINTR) == 0)) { 2620 IPRINTF("intr packet with pkt_comp == 0\n"); 2621 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END, 2622 "fas_prepare_pkt_end (tran_badpkt)"); 2623 return (TRAN_BADPKT); 2624 } 2625 #endif /* FASDEBUG */ 2626 2627 if ((fas->f_target_scsi_options[Tgt(sp)] & SCSI_OPTIONS_DR) == 0) { 2628 /* 2629 * no need to reset tag bits since tag queueing will 2630 * not be enabled if disconnects are disabled 2631 */ 2632 sp->cmd_pkt_flags |= FLAG_NODISCON; 2633 } 2634 2635 sp->cmd_flags = (sp->cmd_flags & ~CFLAG_TRANFLAG) | 2636 CFLAG_PREPARED | CFLAG_IN_TRANSPORT; 2637 2638 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_ACCEPT_END, 2639 "fas_prepare_pkt_end (tran_accept)"); 2640 return (TRAN_ACCEPT); 2641 } 2642 2643 /* 2644 * emptying the waitQ just before releasing FAS_MUTEX is a bit 2645 * tricky; if we release the waitQ mutex and then the FAS_MUTEX, 2646 * another thread could queue a cmd in the waitQ, just before 2647 * the FAS_MUTEX is released. This cmd is then stuck in the waitQ unless 2648 * another cmd comes in or fas_intr() or fas_watch() checks the waitQ. 2649 * Therefore, by releasing the FAS_MUTEX before releasing the waitQ mutex, 2650 * we prevent fas_scsi_start() filling the waitQ 2651 * 2652 * By setting NO_TRAN_BUSY, we force fas_accept_pkt() to queue up 2653 * the waitQ pkts in the readyQ. 2654 * If a QFull condition occurs, the target driver may set its throttle 2655 * too high because of the requests queued up in the readyQ but this 2656 * is not a big problem. The throttle should be periodically reset anyway. 2657 */ 2658 static void 2659 fas_empty_waitQ(struct fas *fas) 2660 { 2661 struct fas_cmd *sp; 2662 int rval; 2663 struct fas_cmd *waitf, *waitb; 2664 2665 ASSERT(mutex_owned(&fas->f_waitQ_mutex)); 2666 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_START, 2667 "fas_empty_waitQ_start"); 2668 2669 while (fas->f_waitf) { 2670 2671 /* copy waitQ, zero the waitQ and release the mutex */ 2672 waitf = fas->f_waitf; 2673 waitb = fas->f_waitb; 2674 fas->f_waitf = fas->f_waitb = NULL; 2675 mutex_exit(&fas->f_waitQ_mutex); 2676 2677 do { 2678 sp = waitf; 2679 waitf = sp->cmd_forw; 2680 if (waitb == sp) { 2681 waitb = NULL; 2682 } 2683 2684 rval = fas_accept_pkt(fas, sp, NO_TRAN_BUSY); 2685 2686 /* 2687 * If the packet was rejected for other reasons then 2688 * complete it here 2689 */ 2690 if (rval != TRAN_ACCEPT) { 2691 ASSERT(rval != TRAN_BUSY); 2692 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 2693 if (sp->cmd_pkt->pkt_comp) { 2694 sp->cmd_flags |= CFLAG_FINISHED; 2695 fas_call_pkt_comp(fas, sp); 2696 } 2697 } 2698 2699 if (INTPENDING(fas)) { 2700 /* 2701 * stop processing the waitQ and put back 2702 * the remaining packets on the waitQ 2703 */ 2704 mutex_enter(&fas->f_waitQ_mutex); 2705 if (waitf) { 2706 ASSERT(waitb != NULL); 2707 waitb->cmd_forw = fas->f_waitf; 2708 fas->f_waitf = waitf; 2709 if (fas->f_waitb == NULL) { 2710 fas->f_waitb = waitb; 2711 } 2712 } 2713 return; 2714 } 2715 } while (waitf); 2716 2717 mutex_enter(&fas->f_waitQ_mutex); 2718 } 2719 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_END, 2720 "fas_empty_waitQ_end"); 2721 } 2722 2723 static void 2724 fas_move_waitQ_to_readyQ(struct fas *fas) 2725 { 2726 /* 2727 * this may actually start cmds but it is most likely 2728 * that if waitQ is not empty that the bus is not free 2729 */ 2730 ASSERT(mutex_owned(FAS_MUTEX(fas))); 2731 mutex_enter(&fas->f_waitQ_mutex); 2732 fas_empty_waitQ(fas); 2733 mutex_exit(&fas->f_waitQ_mutex); 2734 } 2735 2736 2737 /* 2738 * function wrapper for two frequently used macros. for the non-critical 2739 * path we use the function 2740 */ 2741 static void 2742 fas_check_waitQ_and_mutex_exit(struct fas *fas) 2743 { 2744 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(fas->f_mutex)) 2745 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas); 2746 FAS_EMPTY_CALLBACKQ(fas); 2747 } 2748 2749 /* 2750 * fas_accept_pkt(): 2751 * the flag argument is to force fas_accept_pkt to accept the pkt; 2752 * the caller cannot take the pkt back and it has to be queued up in 2753 * the readyQ 2754 */ 2755 static int 2756 fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag) 2757 { 2758 short slot = sp->cmd_slot; 2759 int rval = TRAN_ACCEPT; 2760 2761 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_START, "fas_accept_pkt_start"); 2762 ASSERT(mutex_owned(FAS_MUTEX(fas))); 2763 ASSERT(fas->f_ncmds >= 0 && fas->f_ndisc >= 0); 2764 ASSERT(fas->f_ncmds >= fas->f_ndisc); 2765 ASSERT(fas->f_tcmds[slot] >= 0); 2766 2767 /* 2768 * prepare packet for transport if this hasn't been done yet and 2769 * do some checks 2770 */ 2771 if ((sp->cmd_flags & CFLAG_PREPARED) == 0) { 2772 rval = fas_prepare_pkt(fas, sp); 2773 if (rval != TRAN_ACCEPT) { 2774 IPRINTF1("prepare pkt failed, slot=%x\n", slot); 2775 sp->cmd_flags &= ~CFLAG_TRANFLAG; 2776 goto done; 2777 } 2778 } 2779 2780 if (Lun(sp)) { 2781 EPRINTF("fas_accept_pkt: switching target and lun slot scan\n"); 2782 fas->f_dslot = 1; 2783 2784 if ((fas->f_active[slot] == NULL) || 2785 ((fas->f_active[slot]->f_n_slots != NTAGS) && 2786 TAGGED(Tgt(sp)))) { 2787 (void) fas_alloc_active_slots(fas, slot, KM_NOSLEEP); 2788 } 2789 if ((fas->f_active[slot] == NULL) || 2790 (NOTAG(Tgt(sp)) && (sp->cmd_pkt_flags & FLAG_TAGMASK))) { 2791 IPRINTF("fatal error on non-zero lun pkt\n"); 2792 return (TRAN_FATAL_ERROR); 2793 } 2794 } 2795 2796 /* 2797 * we accepted the command; increment the count 2798 * (we may still reject later if TRAN_BUSY_OK) 2799 */ 2800 fas_check_ncmds(fas); 2801 fas->f_ncmds++; 2802 2803 /* 2804 * if it is a nointr packet, start it now 2805 * (NO_INTR pkts are not queued in the waitQ) 2806 */ 2807 if (sp->cmd_pkt_flags & FLAG_NOINTR) { 2808 EPRINTF("starting a nointr cmd\n"); 2809 fas_runpoll(fas, slot, sp); 2810 sp->cmd_flags &= ~CFLAG_TRANFLAG; 2811 goto done; 2812 } 2813 2814 /* 2815 * reset the throttle if we were draining 2816 */ 2817 if ((fas->f_tcmds[slot] == 0) && 2818 (fas->f_throttle[slot] == DRAIN_THROTTLE)) { 2819 DPRINTF("reset throttle\n"); 2820 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0); 2821 fas_full_throttle(fas, slot); 2822 } 2823 2824 /* 2825 * accept the command: 2826 * If no readyQ and no bus free, and throttle is OK, 2827 * run cmd immediately. 2828 */ 2829 #ifdef FASDEBUG 2830 fas->f_total_cmds++; 2831 #endif 2832 2833 if ((fas->f_readyf[slot] == NULL) && (fas->f_state == STATE_FREE) && 2834 (fas->f_throttle[slot] > fas->f_tcmds[slot])) { 2835 ASSERT(fas->f_current_sp == 0); 2836 (void) fas_startcmd(fas, sp); 2837 goto exit; 2838 } else { 2839 /* 2840 * If FLAG_HEAD is set, run cmd if target and bus are 2841 * available. if first cmd in ready Q is request sense 2842 * then insert after this command, there shouldn't be more 2843 * than one request sense. 2844 */ 2845 if (sp->cmd_pkt_flags & FLAG_HEAD) { 2846 struct fas_cmd *ssp = fas->f_readyf[slot]; 2847 EPRINTF("que head\n"); 2848 if (ssp && 2849 *(ssp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) { 2850 fas_head_of_readyQ(fas, sp); 2851 } else if (ssp) { 2852 struct fas_cmd *dp = ssp->cmd_forw; 2853 ssp->cmd_forw = sp; 2854 sp->cmd_forw = dp; 2855 if (fas->f_readyb[slot] == ssp) { 2856 fas->f_readyb[slot] = sp; 2857 } 2858 } else { 2859 fas->f_readyf[slot] = fas->f_readyb[slot] = sp; 2860 sp->cmd_forw = NULL; 2861 } 2862 2863 /* 2864 * for tagged targets, check for qfull condition and 2865 * return TRAN_BUSY (if permitted), if throttle has been 2866 * exceeded 2867 */ 2868 } else if (TAGGED(Tgt(sp)) && 2869 (fas->f_tcmds[slot] >= fas->f_throttle[slot]) && 2870 (fas->f_throttle[slot] > HOLD_THROTTLE) && 2871 (flag == TRAN_BUSY_OK)) { 2872 IPRINTF2( 2873 "transport busy, slot=%x, ncmds=%x\n", 2874 slot, fas->f_ncmds); 2875 rval = TRAN_BUSY; 2876 fas->f_ncmds--; 2877 sp->cmd_flags &= 2878 ~(CFLAG_PREPARED | CFLAG_IN_TRANSPORT); 2879 goto done; 2880 /* 2881 * append to readyQ or start a new readyQ 2882 */ 2883 } else if (fas->f_readyf[slot]) { 2884 struct fas_cmd *dp = fas->f_readyb[slot]; 2885 ASSERT(dp != 0); 2886 fas->f_readyb[slot] = sp; 2887 sp->cmd_forw = NULL; 2888 dp->cmd_forw = sp; 2889 } else { 2890 fas->f_readyf[slot] = fas->f_readyb[slot] = sp; 2891 sp->cmd_forw = NULL; 2892 } 2893 2894 } 2895 2896 done: 2897 /* 2898 * just in case that the bus is free and we haven't 2899 * been able to restart for some reason 2900 */ 2901 if (fas->f_state == STATE_FREE) { 2902 (void) fas_istart(fas); 2903 } 2904 2905 exit: 2906 fas_check_ncmds(fas); 2907 ASSERT(mutex_owned(FAS_MUTEX(fas))); 2908 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_END, "fas_accept_pkt_end"); 2909 return (rval); 2910 } 2911 2912 /* 2913 * allocate a tag byte and check for tag aging 2914 */ 2915 static char fas_tag_lookup[] = 2916 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG}; 2917 2918 static int 2919 fas_alloc_tag(struct fas *fas, struct fas_cmd *sp) 2920 { 2921 struct f_slots *tag_slots; 2922 int tag; 2923 short slot = sp->cmd_slot; 2924 2925 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_START, "fas_alloc_tag_start"); 2926 ASSERT(mutex_owned(FAS_MUTEX(fas))); 2927 2928 tag_slots = fas->f_active[slot]; 2929 ASSERT(tag_slots->f_n_slots == NTAGS); 2930 2931 alloc_tag: 2932 tag = (fas->f_active[slot]->f_tags)++; 2933 if (fas->f_active[slot]->f_tags >= NTAGS) { 2934 /* 2935 * we reserve tag 0 for non-tagged cmds 2936 */ 2937 fas->f_active[slot]->f_tags = 1; 2938 } 2939 EPRINTF1("tagged cmd, tag = %d\n", tag); 2940 2941 /* Validate tag, should never fail. */ 2942 if (tag_slots->f_slot[tag] == 0) { 2943 /* 2944 * Store assigned tag and tag queue type. 2945 * Note, in case of multiple choice, default to simple queue. 2946 */ 2947 ASSERT(tag < NTAGS); 2948 sp->cmd_tag[1] = (uchar_t)tag; 2949 sp->cmd_tag[0] = fas_tag_lookup[((sp->cmd_pkt_flags & 2950 FLAG_TAGMASK) >> 12)]; 2951 EPRINTF1("tag= %d\n", tag); 2952 tag_slots->f_slot[tag] = sp; 2953 (fas->f_tcmds[slot])++; 2954 ASSERT(mutex_owned(FAS_MUTEX(fas))); 2955 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END, 2956 "fas_alloc_tag_end"); 2957 return (0); 2958 2959 } else { 2960 int age, i; 2961 2962 /* 2963 * Check tag age. If timeouts enabled and 2964 * tag age greater than 1, print warning msg. 2965 * If timeouts enabled and tag age greater than 2966 * age limit, begin draining tag que to check for 2967 * lost tag cmd. 2968 */ 2969 age = tag_slots->f_slot[tag]->cmd_age++; 2970 if (age >= fas->f_scsi_tag_age_limit && 2971 tag_slots->f_slot[tag]->cmd_pkt->pkt_time) { 2972 IPRINTF2("tag %d in use, age= %d\n", tag, age); 2973 DPRINTF("draining tag queue\n"); 2974 if (fas->f_reset_delay[Tgt(sp)] == 0) { 2975 fas->f_throttle[slot] = DRAIN_THROTTLE; 2976 } 2977 } 2978 2979 /* If tag in use, scan until a free one is found. */ 2980 for (i = 1; i < NTAGS; i++) { 2981 tag = fas->f_active[slot]->f_tags; 2982 if (!tag_slots->f_slot[tag]) { 2983 EPRINTF1("found free tag %d\n", tag); 2984 break; 2985 } 2986 if (++(fas->f_active[slot]->f_tags) >= NTAGS) { 2987 /* 2988 * we reserve tag 0 for non-tagged cmds 2989 */ 2990 fas->f_active[slot]->f_tags = 1; 2991 } 2992 EPRINTF1("found in use tag %d\n", tag); 2993 } 2994 2995 /* 2996 * If no free tags, we're in serious trouble. 2997 * the target driver submitted more than 255 2998 * requests 2999 */ 3000 if (tag_slots->f_slot[tag]) { 3001 IPRINTF1("slot %x: All tags in use!!!\n", slot); 3002 goto fail; 3003 } 3004 goto alloc_tag; 3005 } 3006 3007 fail: 3008 fas_head_of_readyQ(fas, sp); 3009 3010 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END, 3011 "fas_alloc_tag_end"); 3012 return (-1); 3013 } 3014 3015 /* 3016 * Internal Search Routine. 3017 * 3018 * Search for a command to start. 3019 */ 3020 static int 3021 fas_istart(struct fas *fas) 3022 { 3023 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_START, 3024 "fas_istart_start"); 3025 EPRINTF("fas_istart:\n"); 3026 3027 if (fas->f_state == STATE_FREE && fas->f_ncmds > fas->f_ndisc) { 3028 (void) fas_ustart(fas); 3029 } 3030 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_END, 3031 "fas_istart_end"); 3032 return (ACTION_RETURN); 3033 } 3034 3035 static int 3036 fas_ustart(struct fas *fas) 3037 { 3038 struct fas_cmd *sp; 3039 short slot = fas->f_next_slot; 3040 short start_slot = slot; 3041 short dslot = fas->f_dslot; 3042 3043 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_START, "fas_ustart_start"); 3044 EPRINTF1("fas_ustart: start_slot=%x\n", fas->f_next_slot); 3045 ASSERT(fas->f_current_sp == NULL); 3046 ASSERT(dslot != 0); 3047 if (dslot == NLUNS_PER_TARGET) { 3048 ASSERT((slot % NLUNS_PER_TARGET) == 0); 3049 } 3050 3051 /* 3052 * if readyQ not empty and we are not draining, then we 3053 * can start another cmd 3054 */ 3055 do { 3056 /* 3057 * If all cmds drained from tag Q, back to full throttle and 3058 * start queueing up new cmds again. 3059 */ 3060 if (fas->f_throttle[slot] == DRAIN_THROTTLE && 3061 fas->f_tcmds[slot] == 0) { 3062 fas_full_throttle(fas, slot); 3063 } 3064 3065 if (fas->f_readyf[slot] && 3066 (fas->f_throttle[slot] > fas->f_tcmds[slot])) { 3067 sp = fas->f_readyf[slot]; 3068 fas->f_readyf[slot] = sp->cmd_forw; 3069 if (sp->cmd_forw == NULL) { 3070 fas->f_readyb[slot] = NULL; 3071 } 3072 fas->f_next_slot = NEXTSLOT(slot, dslot); 3073 ASSERT((sp->cmd_pkt_flags & FLAG_NOINTR) == 0); 3074 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_END, 3075 "fas_ustart_end"); 3076 return (fas_startcmd(fas, sp)); 3077 } else { 3078 slot = NEXTSLOT(slot, dslot); 3079 } 3080 } while (slot != start_slot); 3081 3082 EPRINTF("fas_ustart: no cmds to start\n"); 3083 fas->f_next_slot = NEXTSLOT(slot, dslot); 3084 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_NOT_FOUND_END, 3085 "fas_ustart_end (not_found)"); 3086 return (FALSE); 3087 } 3088 3089 /* 3090 * Start a command off 3091 */ 3092 static int 3093 fas_startcmd(struct fas *fas, struct fas_cmd *sp) 3094 { 3095 volatile struct fasreg *fasreg = fas->f_reg; 3096 ushort_t nstate; 3097 uchar_t cmd, target, lun; 3098 ushort_t tshift; 3099 volatile uchar_t *tp = fas->f_cmdarea; 3100 struct scsi_pkt *pkt = CMD2PKT(sp); 3101 int slot = sp->cmd_slot; 3102 struct f_slots *slots = fas->f_active[slot]; 3103 int i, cdb_len; 3104 3105 #define LOAD_CMDP *(tp++) 3106 3107 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_START, "fas_startcmd_start"); 3108 3109 EPRINTF2("fas_startcmd: sp=0x%p flags=%x\n", 3110 (void *)sp, sp->cmd_pkt_flags); 3111 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0); 3112 ASSERT((sp->cmd_flags & CFLAG_COMPLETED) == 0); 3113 ASSERT(fas->f_current_sp == NULL && fas->f_state == STATE_FREE); 3114 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) { 3115 ASSERT(fas->f_throttle[slot] > 0); 3116 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0); 3117 } 3118 3119 target = Tgt(sp); 3120 lun = Lun(sp); 3121 3122 /* 3123 * if a non-tagged cmd is submitted to an active tagged target 3124 * then drain before submitting this cmd; SCSI-2 allows RQSENSE 3125 * to be untagged 3126 */ 3127 if (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) && 3128 TAGGED(target) && fas->f_tcmds[slot] && 3129 ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) && 3130 (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) { 3131 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) { 3132 struct fas_cmd *dp; 3133 3134 IPRINTF("untagged cmd, start draining\n"); 3135 3136 if (fas->f_reset_delay[Tgt(sp)] == 0) { 3137 fas->f_throttle[slot] = DRAIN_THROTTLE; 3138 } 3139 dp = fas->f_readyf[slot]; 3140 fas->f_readyf[slot] = sp; 3141 sp->cmd_forw = dp; 3142 if (fas->f_readyb[slot] == NULL) { 3143 fas->f_readyb[slot] = sp; 3144 } 3145 } 3146 return (FALSE); 3147 } 3148 3149 /* 3150 * allocate a tag; if no tag available then put request back 3151 * on the ready queue and return; eventually a cmd returns and we 3152 * get going again or we timeout 3153 */ 3154 if (TAGGED(target) && (sp->cmd_pkt_flags & FLAG_TAGMASK)) { 3155 if (fas_alloc_tag(fas, sp)) { 3156 return (FALSE); 3157 } 3158 } else { 3159 /* 3160 * tag slot 0 is reserved for non-tagged cmds 3161 * and should be empty because we have drained 3162 */ 3163 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 3164 ASSERT(fas->f_active[slot]->f_slot[0] == NULL); 3165 fas->f_active[slot]->f_slot[0] = sp; 3166 sp->cmd_tag[1] = 0; 3167 if (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) { 3168 ASSERT(fas->f_tcmds[slot] == 0); 3169 /* 3170 * don't start any other cmd until this 3171 * one is finished. The throttle is reset 3172 * later in fas_watch() 3173 */ 3174 fas->f_throttle[slot] = 1; 3175 } 3176 (fas->f_tcmds[slot])++; 3177 3178 } 3179 } 3180 3181 fas->f_current_sp = sp; 3182 fas->f_omsglen = 0; 3183 tshift = 1<<target; 3184 fas->f_sdtr_sent = fas->f_wdtr_sent = 0; 3185 cdb_len = sp->cmd_actual_cdblen; 3186 3187 if (sp->cmd_pkt_flags & FLAG_RENEGOTIATE_WIDE_SYNC) { 3188 fas_force_renegotiation(fas, Tgt(sp)); 3189 } 3190 3191 /* 3192 * first send identify message, with or without disconnect priv. 3193 */ 3194 if (sp->cmd_pkt_flags & FLAG_NODISCON) { 3195 LOAD_CMDP = fas->f_last_msgout = MSG_IDENTIFY | lun; 3196 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0); 3197 } else { 3198 LOAD_CMDP = fas->f_last_msgout = MSG_DR_IDENTIFY | lun; 3199 } 3200 3201 /* 3202 * normal case, tagQ and we have negotiated wide and sync 3203 * or we don't need to renegotiate because wide and sync 3204 * have been disabled 3205 * (proxy msg's don't have tag flag set) 3206 */ 3207 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) && 3208 ((fas->f_wide_known | fas->f_nowide) & 3209 (fas->f_sync_known | fas->f_nosync) & tshift)) { 3210 3211 EPRINTF("tag cmd\n"); 3212 ASSERT((sp->cmd_pkt_flags & FLAG_NODISCON) == 0); 3213 3214 fas->f_last_msgout = LOAD_CMDP = sp->cmd_tag[0]; 3215 LOAD_CMDP = sp->cmd_tag[1]; 3216 3217 nstate = STATE_SELECT_NORMAL; 3218 cmd = CMD_SEL_ATN3 | CMD_DMA; 3219 3220 /* 3221 * is this a proxy message 3222 */ 3223 } else if (sp->cmd_flags & CFLAG_CMDPROXY) { 3224 3225 IPRINTF2("proxy cmd, len=%x, msg=%x\n", 3226 sp->cmd_cdb[FAS_PROXY_DATA], 3227 sp->cmd_cdb[FAS_PROXY_DATA+1]); 3228 /* 3229 * This is a proxy command. It will have 3230 * a message to send as part of post-selection 3231 * (e.g, MSG_ABORT or MSG_DEVICE_RESET) 3232 */ 3233 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA]; 3234 for (i = 0; i < (uint_t)fas->f_omsglen; i++) { 3235 fas->f_cur_msgout[i] = 3236 sp->cmd_cdb[FAS_PROXY_DATA+1+i]; 3237 } 3238 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE; 3239 cdb_len = 0; 3240 cmd = CMD_SEL_STOP | CMD_DMA; 3241 nstate = STATE_SELECT_N_SENDMSG; 3242 3243 /* 3244 * always negotiate wide first and sync after wide 3245 */ 3246 } else if (((fas->f_wide_known | fas->f_nowide) & tshift) == 0) { 3247 int i = 0; 3248 3249 /* First the tag message bytes */ 3250 if (sp->cmd_pkt_flags & FLAG_TAGMASK) { 3251 fas->f_cur_msgout[i++] = sp->cmd_tag[0]; 3252 fas->f_cur_msgout[i++] = sp->cmd_tag[1]; 3253 } 3254 3255 /* 3256 * Set up to send wide negotiating message. This is getting 3257 * a bit tricky as we dma out the identify message and 3258 * send the other messages via the fifo buffer. 3259 */ 3260 EPRINTF1("cmd with wdtr msg, tag=%x\n", sp->cmd_tag[1]); 3261 3262 fas_make_wdtr(fas, i, target, FAS_XFER_WIDTH); 3263 3264 cdb_len = 0; 3265 nstate = STATE_SELECT_N_SENDMSG; 3266 cmd = CMD_SEL_STOP | CMD_DMA; 3267 3268 /* 3269 * negotiate sync xfer rate 3270 */ 3271 } else if (((fas->f_sync_known | fas->f_nosync) & tshift) == 0) { 3272 int i = 0; 3273 /* 3274 * Set up to send sync negotiating message. This is getting 3275 * a bit tricky as we dma out the identify message and 3276 * send the other messages via the fifo buffer. 3277 */ 3278 if (sp->cmd_pkt_flags & FLAG_TAGMASK) { 3279 fas->f_cur_msgout[i++] = sp->cmd_tag[0]; 3280 fas->f_cur_msgout[i++] = sp->cmd_tag[1]; 3281 } 3282 3283 fas_make_sdtr(fas, i, target); 3284 3285 cdb_len = 0; 3286 cmd = CMD_SEL_STOP | CMD_DMA; 3287 nstate = STATE_SELECT_N_SENDMSG; 3288 3289 /* 3290 * normal cmds, no negotiations and not a proxy and no TQ 3291 */ 3292 } else { 3293 3294 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0); 3295 EPRINTF("std. cmd\n"); 3296 3297 nstate = STATE_SELECT_NORMAL; 3298 cmd = CMD_SEL_ATN | CMD_DMA; 3299 } 3300 3301 /* 3302 * Now load cdb (if any) 3303 */ 3304 for (i = 0; i < cdb_len; i++) { 3305 LOAD_CMDP = sp->cmd_cdbp[i]; 3306 } 3307 3308 /* 3309 * calculate total dma amount: 3310 */ 3311 fas->f_lastcount = (uintptr_t)tp - (uintptr_t)fas->f_cmdarea; 3312 3313 /* 3314 * load target id and enable bus id encoding and 32 bit counter 3315 */ 3316 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid, 3317 (target & 0xf) | FAS_BUSID_ENCODID | FAS_BUSID_32BIT_COUNTER); 3318 3319 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target); 3320 3321 fas_reg_cmd_write(fas, CMD_FLUSH); 3322 3323 FAS_DMA_READ(fas, fas->f_lastcount, 3324 fas->f_dmacookie.dmac_address, 16, cmd); 3325 3326 New_state(fas, (int)nstate); 3327 3328 #ifdef FASDEBUG 3329 if (DDEBUGGING) { 3330 fas_dump_cmd(fas, sp); 3331 } 3332 #endif /* FASDEBUG */ 3333 3334 /* 3335 * if timeout == 0, then it has no effect on the timeout 3336 * handling; we deal with this when an actual timeout occurs. 3337 */ 3338 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 3339 ASSERT(fas->f_tcmds[slot] >= 1); 3340 } 3341 i = pkt->pkt_time - slots->f_timebase; 3342 3343 if (i == 0) { 3344 EPRINTF("dup timeout\n"); 3345 (slots->f_dups)++; 3346 slots->f_timeout = slots->f_timebase; 3347 } else if (i > 0) { 3348 EPRINTF("new timeout\n"); 3349 slots->f_timeout = slots->f_timebase = pkt->pkt_time; 3350 slots->f_dups = 1; 3351 } 3352 3353 fas_check_ncmds(fas); 3354 3355 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_END, "fas_startcmd_end"); 3356 3357 return (TRUE); 3358 } 3359 3360 /* 3361 * Interrupt Entry Point. 3362 * Poll interrupts until they go away 3363 */ 3364 static uint_t 3365 fas_intr(caddr_t arg) 3366 { 3367 struct fas *fas = (struct fas *)arg; 3368 int rval = DDI_INTR_UNCLAIMED; 3369 int kstat_updated = 0; 3370 3371 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_START, "fas_intr_start"); 3372 3373 do { 3374 mutex_enter(FAS_MUTEX(fas)); 3375 3376 do { 3377 if (fas_intr_svc(fas)) { 3378 /* 3379 * do not return immediately here because 3380 * we have to guarantee to always empty 3381 * the waitQ and callbackQ in the interrupt 3382 * handler 3383 */ 3384 if (fas->f_polled_intr) { 3385 rval = DDI_INTR_CLAIMED; 3386 fas->f_polled_intr = 0; 3387 } 3388 } else { 3389 rval = DDI_INTR_CLAIMED; 3390 } 3391 } while (INTPENDING(fas)); 3392 3393 if (!kstat_updated && fas->f_intr_kstat && 3394 rval == DDI_INTR_CLAIMED) { 3395 FAS_KSTAT_INTR(fas); 3396 kstat_updated++; 3397 } 3398 3399 /* 3400 * check and empty the waitQ and the callbackQ 3401 */ 3402 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas); 3403 FAS_EMPTY_CALLBACKQ(fas); 3404 3405 } while (INTPENDING(fas)); 3406 3407 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_END, "fas_intr_end"); 3408 3409 return (rval); 3410 } 3411 3412 /* 3413 * General interrupt service routine. 3414 */ 3415 static char *dma_bits = DMA_BITS; 3416 3417 static int 3418 fas_intr_svc(struct fas *fas) 3419 { 3420 static int (*evec[])(struct fas *fas) = { 3421 fas_finish_select, 3422 fas_reconnect, 3423 fas_phasemanage, 3424 fas_finish, 3425 fas_reset_recovery, 3426 fas_istart, 3427 fas_abort_curcmd, 3428 fas_reset_bus, 3429 fas_reset_bus, 3430 fas_handle_selection 3431 }; 3432 int action; 3433 uchar_t intr, stat; 3434 volatile struct fasreg *fasreg = fas->f_reg; 3435 int i = 0; 3436 3437 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_START, "fas_intr_svc_start"); 3438 3439 /* 3440 * A read of FAS interrupt register clears interrupt, 3441 * so any other volatile information needs to be latched 3442 * up prior to reading the interrupt register. 3443 */ 3444 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat); 3445 3446 EPRINTF2("fas_intr_svc: state=%x stat=%x\n", fas->f_state, 3447 fas->f_stat); 3448 3449 /* 3450 * this wasn't our interrupt? 3451 */ 3452 if ((fas->f_stat & FAS_STAT_IPEND) == 0) { 3453 if (fas_check_dma_error(fas)) { 3454 action = ACTION_RESET; 3455 goto start_action; 3456 } 3457 return (-1); 3458 } 3459 3460 /* 3461 * if we are reset state, handle this first 3462 */ 3463 if (fas->f_state == ACTS_RESET) { 3464 action = ACTION_FINRST; 3465 goto start_action; 3466 } 3467 3468 /* 3469 * check for gross error. fas366 hardware seems to register 3470 * the gross error bit when a parity error is found. Make sure 3471 * to ignore the gross error bit when a parity error is detected. 3472 */ 3473 if ((fas->f_stat & FAS_STAT_GERR) && 3474 (fas->f_stat & FAS_STAT_PERR) == 0) { 3475 action = fas_handle_gross_err(fas); 3476 goto start_action; 3477 } 3478 3479 /* 3480 * now it is finally safe to read the interrupt register 3481 * if we haven't done so yet 3482 * Note: we don't read step register here but only in 3483 * fas_finish_select(). It is not entirely safe but saves 3484 * redundant PIOs or extra code in this critical path 3485 */ 3486 fas->f_intr = 3487 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr); 3488 3489 /* 3490 * read the fifo if there is something there or still in the 3491 * input shuttle 3492 */ 3493 stat = fas->f_stat & FAS_PHASE_MASK; 3494 3495 if ((intr & FAS_INT_RESEL) || 3496 ((stat != FAS_PHASE_DATA_IN) && (stat != FAS_PHASE_DATA_OUT) && 3497 ((fas->f_state & STATE_SELECTING) == 0) && 3498 (fas->f_state != ACTS_DATA_DONE) && 3499 (fas->f_state != ACTS_C_CMPLT))) { 3500 3501 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2); 3502 3503 if (((fas->f_stat2 & FAS_STAT2_EMPTY) == 0) || 3504 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) { 3505 fas_read_fifo(fas); 3506 } 3507 } 3508 3509 EPRINTF2("fas_intr_svc: intr=%x, stat=%x\n", fas->f_intr, fas->f_stat); 3510 EPRINTF2("dmacsr=%b\n", fas->f_dma->dma_csr, dma_bits); 3511 3512 /* 3513 * Based upon the current state of the host adapter driver 3514 * we should be able to figure out what to do with an interrupt. 3515 * 3516 * The FAS asserts an interrupt with one or more of 8 possible 3517 * bits set in its interrupt register. These conditions are 3518 * SCSI bus reset detected, an illegal command fed to the FAS, 3519 * one of DISCONNECT, BUS SERVICE, FUNCTION COMPLETE conditions 3520 * for the FAS, a Reselection interrupt, or one of Selection 3521 * or Selection with Attention. 3522 * 3523 * Of these possible interrupts, we can deal with some right 3524 * here and now, irrespective of the current state of the driver. 3525 * 3526 * take care of the most likely interrupts first and call the action 3527 * immediately 3528 */ 3529 if ((intr & (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN| 3530 FAS_INT_RESEL)) == 0) { 3531 /* 3532 * The rest of the reasons for an interrupt can 3533 * be handled based purely on the state that the driver 3534 * is currently in now. 3535 */ 3536 if (fas->f_state & STATE_SELECTING) { 3537 action = fas_finish_select(fas); 3538 3539 } else if (fas->f_state & STATE_ITPHASES) { 3540 action = fas_phasemanage(fas); 3541 3542 } else { 3543 fas_log(fas, CE_WARN, "spurious interrupt"); 3544 action = ACTION_RETURN; 3545 } 3546 3547 } else if ((intr & FAS_INT_RESEL) && ((intr & 3548 (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN)) == 0)) { 3549 3550 if ((fas->f_state & STATE_SELECTING) == 0) { 3551 ASSERT(fas->f_state == STATE_FREE); 3552 action = fas_reconnect(fas); 3553 } else { 3554 action = fas_reselect_preempt(fas); 3555 } 3556 3557 } else if (intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) { 3558 action = fas_illegal_cmd_or_bus_reset(fas); 3559 3560 } else if (intr & (FAS_INT_SEL|FAS_INT_SELATN)) { 3561 action = ACTION_SELECT; 3562 } 3563 3564 start_action: 3565 while (action != ACTION_RETURN) { 3566 ASSERT((action >= 0) && (action <= ACTION_SELECT)); 3567 TRACE_3(TR_FAC_SCSI_FAS, TR_FASSVC_ACTION_CALL, 3568 "fas_intr_svc call: fas 0x%p, action %d (%d)", 3569 fas, action, i); 3570 i++; 3571 action = (*evec[action])(fas); 3572 } 3573 exit: 3574 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_END, "fas_intr_svc_end"); 3575 3576 return (0); 3577 } 3578 3579 /* 3580 * Manage phase transitions. 3581 */ 3582 static int 3583 fas_phasemanage(struct fas *fas) 3584 { 3585 ushort_t state; 3586 int action; 3587 static int (*pvecs[])(struct fas *fas) = { 3588 fas_handle_cmd_start, 3589 fas_handle_cmd_done, 3590 fas_handle_msg_out_start, 3591 fas_handle_msg_out_done, 3592 fas_handle_msg_in_start, 3593 fas_handle_more_msgin, 3594 fas_handle_msg_in_done, 3595 fas_handle_clearing, 3596 fas_handle_data_start, 3597 fas_handle_data_done, 3598 fas_handle_c_cmplt, 3599 fas_reconnect, 3600 fas_handle_unknown, 3601 fas_reset_recovery 3602 }; 3603 int i = 0; 3604 3605 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_START, 3606 "fas_phasemanage_start"); 3607 3608 do { 3609 EPRINTF1("fas_phasemanage: %s\n", 3610 fas_state_name(fas->f_state & STATE_ITPHASES)); 3611 3612 TRACE_2(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_CALL, 3613 "fas_phasemanage_call: fas 0x%p (%d)", fas, i++); 3614 3615 state = fas->f_state; 3616 3617 if (!(state == STATE_FREE || state > ACTS_ENDVEC)) { 3618 ASSERT(pvecs[state-1] != NULL); 3619 action = (*pvecs[state-1]) (fas); 3620 } else { 3621 fas_log(fas, CE_WARN, "lost state in phasemanage"); 3622 action = ACTION_ABORT_ALLCMDS; 3623 } 3624 3625 } while (action == ACTION_PHASEMANAGE); 3626 3627 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_END, 3628 "fas_phasemanage_end"); 3629 return (action); 3630 } 3631 3632 /* 3633 * remove a cmd from active list and if timeout flag is set, then 3634 * adjust timeouts; if a the same cmd will be resubmitted soon, don't 3635 * bother to adjust timeouts (ie. don't set this flag) 3636 */ 3637 static void 3638 fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int new_timeout_flag) 3639 { 3640 int tag = sp->cmd_tag[1]; 3641 int slot = sp->cmd_slot; 3642 struct f_slots *tag_slots = fas->f_active[slot]; 3643 3644 ASSERT(sp != NULL); 3645 EPRINTF4("remove tag %d slot %d for target %d.%d\n", 3646 tag, slot, Tgt(sp), Lun(sp)); 3647 3648 if (sp == tag_slots->f_slot[tag]) { 3649 tag_slots->f_slot[tag] = NULL; 3650 fas->f_tcmds[slot]--; 3651 } 3652 if (fas->f_current_sp == sp) { 3653 fas->f_current_sp = NULL; 3654 } 3655 3656 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]); 3657 3658 if (new_timeout_flag != NEW_TIMEOUT) { 3659 return; 3660 } 3661 3662 /* 3663 * Figure out what to set tag Q timeout for... 3664 * 3665 * Optimize: If we have duplicate's of same timeout 3666 * we're using, then we'll use it again until we run 3667 * out of duplicates. This should be the normal case 3668 * for block and raw I/O. 3669 * If no duplicates, we have to scan through tag que and 3670 * find the longest timeout value and use it. This is 3671 * going to take a while... 3672 */ 3673 if (sp->cmd_pkt->pkt_time == tag_slots->f_timebase) { 3674 if (--(tag_slots->f_dups) <= 0) { 3675 if (fas->f_tcmds[slot]) { 3676 struct fas_cmd *ssp; 3677 uint_t n = 0; 3678 ushort_t t = tag_slots->f_n_slots; 3679 ushort_t i; 3680 /* 3681 * This crude check assumes we don't do 3682 * this too often which seems reasonable 3683 * for block and raw I/O. 3684 */ 3685 for (i = 0; i < t; i++) { 3686 ssp = tag_slots->f_slot[i]; 3687 if (ssp && 3688 (ssp->cmd_pkt->pkt_time > n)) { 3689 n = ssp->cmd_pkt->pkt_time; 3690 tag_slots->f_dups = 1; 3691 } else if (ssp && 3692 (ssp->cmd_pkt->pkt_time == n)) { 3693 tag_slots->f_dups++; 3694 } 3695 } 3696 tag_slots->f_timebase = n; 3697 EPRINTF1("searching, new_timeout= %d\n", n); 3698 } else { 3699 tag_slots->f_dups = 0; 3700 tag_slots->f_timebase = 0; 3701 } 3702 } 3703 } 3704 tag_slots->f_timeout = tag_slots->f_timebase; 3705 3706 ASSERT(fas->f_ncmds >= fas->f_ndisc); 3707 } 3708 3709 /* 3710 * decrement f_ncmds and f_ndisc for this cmd before completing 3711 */ 3712 static void 3713 fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp) 3714 { 3715 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0); 3716 if ((sp->cmd_flags & CFLAG_FINISHED) == 0) { 3717 fas->f_ncmds--; 3718 if (sp->cmd_flags & CFLAG_CMDDISC) { 3719 fas->f_ndisc--; 3720 } 3721 sp->cmd_flags |= CFLAG_FINISHED; 3722 sp->cmd_flags &= ~CFLAG_CMDDISC; 3723 } 3724 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0)); 3725 ASSERT(fas->f_ncmds >= fas->f_ndisc); 3726 } 3727 3728 /* 3729 * Most commonly called phase handlers: 3730 * 3731 * Finish routines 3732 */ 3733 static int 3734 fas_finish(struct fas *fas) 3735 { 3736 struct fas_cmd *sp = fas->f_current_sp; 3737 struct scsi_pkt *pkt = CMD2PKT(sp); 3738 int action = ACTION_SEARCH; 3739 struct scsi_status *status = 3740 (struct scsi_status *)sp->cmd_pkt->pkt_scbp; 3741 3742 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_START, 3743 "fas_finish_start"); 3744 EPRINTF("fas_finish\n"); 3745 3746 #ifdef FAS_TEST 3747 if (fas_test_stop && (sp->cmd_pkt_flags & 0x80000000)) { 3748 debug_enter("untagged cmd completed"); 3749 } 3750 #endif 3751 3752 /* 3753 * immediately enable reselects 3754 */ 3755 fas_reg_cmd_write(fas, CMD_EN_RESEL); 3756 if (status->sts_chk) { 3757 /* 3758 * In the case that we are getting a check condition 3759 * clear our knowledge of synchronous capabilities. 3760 * This will unambiguously force a renegotiation 3761 * prior to any possible data transfer (we hope), 3762 * including the data transfer for a UNIT ATTENTION 3763 * condition generated by somebody powering on and 3764 * off a target. 3765 */ 3766 fas_force_renegotiation(fas, Tgt(sp)); 3767 } 3768 3769 /* 3770 * backoff sync/wide if there were parity errors 3771 */ 3772 if (sp->cmd_pkt->pkt_statistics & STAT_PERR) { 3773 fas_sync_wide_backoff(fas, sp, sp->cmd_slot); 3774 #ifdef FAS_TEST 3775 if (fas_test_stop) { 3776 debug_enter("parity error"); 3777 } 3778 #endif 3779 } 3780 3781 /* 3782 * Free from active list and update counts 3783 * We need to clean up this cmd now, just in case fas_ustart() 3784 * hits a reset or other fatal transport error 3785 */ 3786 fas_check_ncmds(fas); 3787 fas_remove_cmd(fas, sp, NEW_TIMEOUT); 3788 fas_decrement_ncmds(fas, sp); 3789 fas_check_ncmds(fas); 3790 3791 /* 3792 * go to state free and try to start a new cmd now 3793 */ 3794 New_state(fas, STATE_FREE); 3795 3796 if ((fas->f_ncmds > fas->f_ndisc) && (*((char *)status) == 0) && 3797 (INTPENDING(fas) == 0)) { 3798 if (fas_ustart(fas)) { 3799 action = ACTION_RETURN; 3800 } 3801 } 3802 3803 /* 3804 * if there was a data xfer then calculate residue and 3805 * sync data for consistent memory xfers 3806 */ 3807 if (pkt->pkt_state & STATE_XFERRED_DATA) { 3808 pkt->pkt_resid = sp->cmd_dmacount - sp->cmd_data_count; 3809 if (sp->cmd_flags & CFLAG_CMDIOPB) { 3810 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0, 3811 DDI_DMA_SYNC_FORCPU); 3812 } 3813 if (pkt->pkt_resid) { 3814 IPRINTF3("%d.%d finishes with %ld resid\n", 3815 Tgt(sp), Lun(sp), pkt->pkt_resid); 3816 } 3817 } 3818 3819 if (sp->cmd_pkt_flags & FLAG_NOINTR) { 3820 fas_call_pkt_comp(fas, sp); 3821 action = ACTION_RETURN; 3822 } else { 3823 /* 3824 * start an autorequest sense if there was a check condition. 3825 * if arq has not been enabled, fas_handle_sts_chk will do 3826 * do the callback 3827 */ 3828 if (status->sts_chk) { 3829 if (fas_handle_sts_chk(fas, sp)) { 3830 /* 3831 * we can't start an arq because one is 3832 * already in progress. the target is 3833 * probably confused 3834 */ 3835 action = ACTION_ABORT_CURCMD; 3836 } 3837 } else if ((*((char *)status) & STATUS_MASK) == 3838 STATUS_QFULL) { 3839 fas_handle_qfull(fas, sp); 3840 } else { 3841 #ifdef FAS_TEST 3842 if (fas_arqs_failure && (status->sts_chk == 0)) { 3843 struct scsi_arq_status *arqstat; 3844 status->sts_chk = 1; 3845 arqstat = (struct scsi_arq_status *) 3846 (sp->cmd_pkt->pkt_scbp); 3847 arqstat->sts_rqpkt_reason = CMD_TRAN_ERR; 3848 sp->cmd_pkt->pkt_state |= STATE_ARQ_DONE; 3849 fas_arqs_failure = 0; 3850 } 3851 if (fas_tran_err) { 3852 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR; 3853 fas_tran_err = 0; 3854 } 3855 #endif 3856 fas_call_pkt_comp(fas, sp); 3857 } 3858 } 3859 3860 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_END, "fas_finish_end"); 3861 return (action); 3862 } 3863 3864 /* 3865 * Complete the process of selecting a target 3866 */ 3867 static int 3868 fas_finish_select(struct fas *fas) 3869 { 3870 volatile struct dma *dmar = fas->f_dma; 3871 struct fas_cmd *sp = fas->f_current_sp; 3872 uchar_t intr = fas->f_intr; 3873 uchar_t step; 3874 3875 step = fas_reg_read(fas, &fas->f_reg->fas_step) & FAS_STEP_MASK; 3876 3877 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_START, 3878 "fas_finish_select_start"); 3879 EPRINTF("fas_finish_select:\n"); 3880 ASSERT(sp != 0); 3881 3882 /* 3883 * Check for DMA gate array errors 3884 */ 3885 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr)) 3886 & DMA_ERRPEND) { 3887 /* 3888 * It would be desirable to set the ATN* line and attempt to 3889 * do the whole schmear of INITIATOR DETECTED ERROR here, 3890 * but that is too hard to do at present. 3891 */ 3892 fas_log(fas, CE_WARN, 3893 "Unrecoverable DMA error during selection"); 3894 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 3895 3896 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET1_END, 3897 "fas_finish_select_end (ACTION_RESET1)"); 3898 return (ACTION_RESET); 3899 } 3900 3901 /* 3902 * Shut off DMA gate array 3903 */ 3904 FAS_FLUSH_DMA(fas); 3905 3906 /* 3907 * Did something respond to selection? 3908 */ 3909 if (intr == (FAS_INT_BUS|FAS_INT_FCMP)) { 3910 /* 3911 * We succesfully selected a target (we think). 3912 * Now we figure out how botched things are 3913 * based upon the kind of selection we were 3914 * doing and the state of the step register. 3915 */ 3916 switch (step) { 3917 case FAS_STEP_ARBSEL: 3918 /* 3919 * In this case, we selected the target, but went 3920 * neither into MESSAGE OUT nor COMMAND phase. 3921 * However, this isn't a fatal error, so we just 3922 * drive on. 3923 * 3924 * This might be a good point to note that we have 3925 * a target that appears to not accomodate 3926 * disconnecting, 3927 * but it really isn't worth the effort to distinguish 3928 * such targets fasecially from others. 3929 */ 3930 /* FALLTHROUGH */ 3931 3932 case FAS_STEP_SENTID: 3933 /* 3934 * In this case, we selected the target and sent 3935 * message byte and have stopped with ATN* still on. 3936 * This case should only occur if we use the SELECT 3937 * AND STOP command. 3938 */ 3939 /* FALLTHROUGH */ 3940 3941 case FAS_STEP_NOTCMD: 3942 /* 3943 * In this case, we either didn't transition to command 3944 * phase, or, 3945 * if we were using the SELECT WITH ATN3 command, 3946 * we possibly didn't send all message bytes. 3947 */ 3948 break; 3949 3950 case FAS_STEP_PCMD: 3951 /* 3952 * In this case, not all command bytes transferred. 3953 */ 3954 /* FALLTHROUGH */ 3955 3956 case FAS_STEP_DONE: 3957 /* 3958 * This is the usual 'good' completion point. 3959 * If we we sent message byte(s), we subtract 3960 * off the number of message bytes that were 3961 * ahead of the command. 3962 */ 3963 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD; 3964 break; 3965 3966 default: 3967 fas_log(fas, CE_WARN, 3968 "bad sequence step (0x%x) in selection", step); 3969 TRACE_0(TR_FAC_SCSI_FAS, 3970 TR_FAS_FINISH_SELECT_RESET3_END, 3971 "fas_finish_select_end (ACTION_RESET3)"); 3972 return (ACTION_RESET); 3973 } 3974 3975 /* 3976 * OR in common state... 3977 */ 3978 sp->cmd_pkt->pkt_state |= (STATE_GOT_BUS|STATE_GOT_TARGET); 3979 3980 /* 3981 * data pointer initialization has already been done 3982 */ 3983 New_state(fas, ACTS_UNKNOWN); 3984 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_ACTION3_END, 3985 "fas_finish_select_end (action3)"); 3986 return (fas_handle_unknown(fas)); 3987 3988 } else if (intr == FAS_INT_DISCON) { 3989 /* 3990 * make sure we negotiate when this target comes 3991 * on line later on 3992 */ 3993 fas_force_renegotiation(fas, Tgt(sp)); 3994 3995 fas->f_sdtr_sent = fas->f_wdtr_sent = 0; 3996 sp->cmd_pkt->pkt_state |= STATE_GOT_BUS; 3997 3998 /* 3999 * Set the throttle to DRAIN_THROTTLE to make 4000 * sure any disconnected commands will get timed out 4001 * incase the drive dies 4002 */ 4003 4004 if (fas->f_reset_delay[Tgt(sp)] == 0) { 4005 fas->f_throttle[sp->cmd_slot] = DRAIN_THROTTLE; 4006 } 4007 4008 fas_set_pkt_reason(fas, sp, CMD_INCOMPLETE, 0); 4009 4010 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_FINISH_END, 4011 "fas_finish_select_end (ACTION_FINISH)"); 4012 return (ACTION_FINISH); 4013 } else { 4014 fas_printstate(fas, "undetermined selection failure"); 4015 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET2_END, 4016 "fas_finish_select_end (ACTION_RESET2)"); 4017 return (ACTION_RESET); 4018 } 4019 _NOTE(NOT_REACHED) 4020 /* NOTREACHED */ 4021 } 4022 4023 /* 4024 * a selection got preempted by a reselection; shut down dma 4025 * and put back cmd in the ready queue unless NOINTR 4026 */ 4027 static int 4028 fas_reselect_preempt(struct fas *fas) 4029 { 4030 int rval; 4031 4032 /* 4033 * A reselection attempt glotzed our selection attempt. 4034 * we put request back in the ready queue 4035 */ 4036 struct fas_cmd *sp = fas->f_current_sp; 4037 4038 /* 4039 * Shut off DMA gate array 4040 */ 4041 FAS_FLUSH_DMA(fas); 4042 4043 /* 4044 * service the reconnect now and clean up later 4045 */ 4046 New_state(fas, STATE_FREE); 4047 rval = fas_reconnect(fas); 4048 4049 /* 4050 * If selection for a non-tagged command is preempted, the 4051 * command could be stuck because throttle was set to DRAIN, 4052 * and a disconnected command timeout follows. 4053 */ 4054 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) 4055 fas->f_throttle[sp->cmd_slot] = 1; 4056 4057 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 4058 fas_remove_cmd(fas, sp, NEW_TIMEOUT); 4059 } 4060 4061 /* 4062 * if we attempted to renegotiate on this cmd, undo this now 4063 */ 4064 if (fas->f_wdtr_sent) { 4065 fas->f_wide_known &= ~(1<<Tgt(sp)); 4066 fas->f_wdtr_sent = 0; 4067 } 4068 if (fas->f_sdtr_sent) { 4069 fas->f_sync_known &= ~(1<<Tgt(sp)); 4070 fas->f_sdtr_sent = 0; 4071 } 4072 4073 fas_head_of_readyQ(fas, sp); 4074 4075 return (rval); 4076 } 4077 4078 /* 4079 * Handle the reconnection of a target 4080 */ 4081 static int 4082 fas_reconnect(struct fas *fas) 4083 { 4084 volatile struct fasreg *fasreg = fas->f_reg; 4085 struct fas_cmd *sp = NULL; 4086 uchar_t target, lun; 4087 uchar_t tmp; 4088 uchar_t slot; 4089 char *bad_reselect = NULL; 4090 4091 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_START, 4092 "fas_reconnect_start"); 4093 EPRINTF("fas_reconnect:\n"); 4094 4095 fas_check_ncmds(fas); 4096 4097 switch (fas->f_state) { 4098 default: 4099 /* 4100 * Pick up target id from fifo 4101 * 4102 * There should only be the reselecting target's id 4103 * and an identify message in the fifo. 4104 */ 4105 target = fas->f_fifo[0]; 4106 4107 /* 4108 * we know the target so update period, conf3, 4109 * offset reg, if necessary, and accept the msg 4110 */ 4111 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target); 4112 4113 /* 4114 * now we can accept the message. an untagged 4115 * target will go immediately into data phase so 4116 * the period/offset/conf3 registers need to be 4117 * updated before accepting the message 4118 */ 4119 fas_reg_cmd_write(fas, CMD_MSG_ACPT); 4120 4121 if (fas->f_fifolen != 2) { 4122 bad_reselect = "bad reselect bytes"; 4123 break; 4124 } 4125 4126 /* 4127 * normal initial reconnect; we get another interrupt later 4128 * for the tag 4129 */ 4130 New_state(fas, ACTS_RESEL); 4131 4132 if (fas->f_stat & FAS_STAT_PERR) { 4133 break; 4134 } 4135 4136 /* 4137 * Check sanity of message. 4138 */ 4139 tmp = fas->f_fifo[1]; 4140 fas->f_last_msgin = tmp; 4141 4142 if (!(IS_IDENTIFY_MSG(tmp)) || (tmp & INI_CAN_DISCON)) { 4143 bad_reselect = "bad identify msg"; 4144 break; 4145 } 4146 4147 lun = tmp & (NLUNS_PER_TARGET-1); 4148 4149 EPRINTF2("fas_reconnect: target=%x, idmsg=%x\n", 4150 target, tmp); 4151 4152 fas->f_resel_slot = slot = (target * NLUNS_PER_TARGET) | lun; 4153 4154 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid, 4155 (target & 0xf) | FAS_BUSID_ENCODID | 4156 FAS_BUSID_32BIT_COUNTER); 4157 4158 /* 4159 * If tag queueing in use, DMA in tag. 4160 * Otherwise, we're ready to go. 4161 * if tag 0 slot is non-empty, a non-tagged cmd is 4162 * reconnecting 4163 */ 4164 if (TAGGED(target) && fas->f_tcmds[slot] && 4165 (fas->f_active[slot]->f_slot[0] == NULL)) { 4166 volatile uchar_t *c = 4167 (uchar_t *)fas->f_cmdarea; 4168 4169 /* 4170 * If we've been doing tagged queueing and this 4171 * request doesn't do it, 4172 * maybe it was disabled for this one. This is rather 4173 * dangerous as it blows all pending tagged cmds away. 4174 * But if target is confused, then we'll blow up 4175 * shortly. 4176 */ 4177 *c++ = INVALID_MSG; 4178 *c = INVALID_MSG; 4179 4180 FAS_DMA_WRITE_SETUP(fas, 2, 4181 fas->f_dmacookie.dmac_address); 4182 4183 /* 4184 * For tagged queuing, we should still be in msgin 4185 * phase. 4186 * If not, then either we aren't running tagged 4187 * queueing like we thought or the target died. 4188 */ 4189 if (INTPENDING(fas) == 0) { 4190 EPRINTF1("slow reconnect, slot=%x\n", slot); 4191 TRACE_0(TR_FAC_SCSI_FAS, 4192 TR_FAS_RECONNECT_RETURN1_END, 4193 "fas_reconnect_end (_RETURN1)"); 4194 return (ACTION_RETURN); 4195 } 4196 4197 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat); 4198 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr); 4199 if (fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET)) { 4200 return (fas_illegal_cmd_or_bus_reset(fas)); 4201 } 4202 4203 if ((fas->f_stat & FAS_PHASE_MASK) != 4204 FAS_PHASE_MSG_IN) { 4205 bad_reselect = "not in msgin phase"; 4206 break; 4207 } 4208 4209 if (fas->f_intr & FAS_INT_DISCON) { 4210 bad_reselect = "unexpected bus free"; 4211 break; 4212 } 4213 } else { 4214 fas->f_current_sp = sp = fas->f_active[slot]->f_slot[0]; 4215 break; 4216 } 4217 /*FALLTHROUGH*/ 4218 4219 case ACTS_RESEL: 4220 { 4221 volatile uchar_t *c = 4222 (uchar_t *)fas->f_cmdarea; 4223 struct f_slots *tag_slots; 4224 int id, tag; 4225 uint_t i; 4226 4227 slot = fas->f_resel_slot; 4228 target = slot/NLUNS_PER_TARGET; 4229 4230 if ((fas->f_stat & FAS_PHASE_MASK) != 4231 FAS_PHASE_MSG_IN) { 4232 IPRINTF1("no tag for slot %x\n", slot); 4233 if (fas->f_intr & ~(FAS_INT_BUS | 4234 FAS_INT_FCMP)) { 4235 New_state(fas, ACTS_UNKNOWN); 4236 TRACE_0(TR_FAC_SCSI_FAS, 4237 TR_FAS_RECONNECT_PHASEMANAGE_END, 4238 "fas_reconnect_end (_PHASEMANAGE)"); 4239 return (ACTION_PHASEMANAGE); 4240 } else { 4241 bad_reselect = "not in msgin phase"; 4242 break; 4243 } 4244 } 4245 fas_reg_cmd_write(fas, CMD_TRAN_INFO|CMD_DMA); 4246 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 4247 fas->f_dma_csr); 4248 4249 fas_reg_cmd_write(fas, CMD_MSG_ACPT); 4250 4251 for (i = 0; i < (uint_t)RECONNECT_TAG_RCV_TIMEOUT; 4252 i++) { 4253 /* 4254 * timeout is not very accurate but this 4255 * should take no time at all 4256 */ 4257 if (INTPENDING(fas)) { 4258 fas->f_stat = fas_reg_read(fas, 4259 (uchar_t *)&fas->f_reg->fas_stat); 4260 fas->f_intr = fas_reg_read(fas, 4261 (uchar_t *)&fas->f_reg->fas_intr); 4262 if (fas->f_intr & (FAS_INT_RESET | 4263 FAS_INT_ILLEGAL)) { 4264 return ( 4265 fas_illegal_cmd_or_bus_reset(fas)); 4266 } 4267 if (fas->f_intr & FAS_INT_FCMP) { 4268 break; 4269 } 4270 } 4271 } 4272 4273 if (i == (uint_t)RECONNECT_TAG_RCV_TIMEOUT) { 4274 bad_reselect = "timeout on receiving tag msg"; 4275 break; 4276 } 4277 4278 FAS_FLUSH_DMA(fas); 4279 4280 /* 4281 * we should really do a sync here but that 4282 * hurts performance too much; we'll just hang 4283 * around till the tag byte flips 4284 * This is necessary on any system with an 4285 * XBox 4286 */ 4287 if (*c == INVALID_MSG) { 4288 EPRINTF( 4289 "fas_reconnect: invalid msg, polling\n"); 4290 for (i = 0; i < 1000000; i++) { 4291 if (*c != INVALID_MSG) 4292 break; 4293 } 4294 } 4295 4296 if (fas->f_stat & FAS_STAT_PERR) { 4297 break; 4298 } 4299 4300 if ((fas->f_stat & FAS_STAT_XZERO) == 0 || 4301 (id = *c++) < MSG_SIMPLE_QTAG || 4302 id > MSG_ORDERED_QTAG) { 4303 /* 4304 * Target agreed to do tagged queueing 4305 * and lied! 4306 * This problem implies the drive firmware is 4307 * broken. 4308 */ 4309 bad_reselect = "botched tag"; 4310 break; 4311 } 4312 tag = *c; 4313 4314 /* Set ptr to reconnecting scsi pkt */ 4315 tag_slots = fas->f_active[slot]; 4316 if (tag_slots != NULL) { 4317 sp = tag_slots->f_slot[tag]; 4318 } else { 4319 bad_reselect = "Invalid tag"; 4320 break; 4321 } 4322 4323 fas->f_current_sp = sp; 4324 } 4325 } 4326 4327 if (fas->f_stat & FAS_STAT_PERR) { 4328 sp = NULL; 4329 bad_reselect = "Parity error in reconnect msg's"; 4330 } 4331 4332 if ((sp == NULL || 4333 #ifdef FAS_TEST 4334 (fas_atest_reconn & (1<<Tgt(sp))) || 4335 #endif 4336 (sp->cmd_flags & (CFLAG_CMDDISC|CFLAG_CMDPROXY)) == 0)) { 4337 /* 4338 * this shouldn't really happen, so it is better 4339 * to reset the bus; some disks accept the abort 4340 * and then still reconnect 4341 */ 4342 if (bad_reselect == NULL) { 4343 bad_reselect = "no command"; 4344 } 4345 #ifdef FAS_TEST 4346 if (sp && !(fas_atest_reconn & (1<<Tgt(sp))) && 4347 fas_test_stop) { 4348 debug_enter("bad reconnect"); 4349 } else { 4350 fas_atest_reconn = 0; 4351 } 4352 #endif 4353 goto bad; 4354 4355 /* 4356 * XXX remove this case or make it an ASSERT 4357 */ 4358 } else if (sp->cmd_flags & CFLAG_CMDPROXY) { 4359 /* 4360 * If we got here, we were already attempting to 4361 * run a polled proxy command for this target. 4362 * Set ATN and, copy in the message, and drive 4363 * on (ignoring any parity error on the identify). 4364 */ 4365 IPRINTF1("fas_reconnect: fielding proxy cmd for %d\n", 4366 target); 4367 fas_assert_atn(fas); 4368 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA]; 4369 tmp = 0; 4370 while (tmp < fas->f_omsglen) { 4371 fas->f_cur_msgout[tmp] = 4372 sp->cmd_cdb[FAS_PROXY_DATA+1+tmp]; 4373 tmp++; 4374 } 4375 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE; 4376 4377 /* 4378 * pretend that the disconnected cmd is still disconnected 4379 * (this prevents ndisc from going negative) 4380 */ 4381 fas->f_ndisc++; 4382 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0)); 4383 ASSERT(fas->f_ncmds >= fas->f_ndisc); 4384 } 4385 4386 ASSERT(fas->f_resel_slot == slot); 4387 ASSERT(fas->f_ndisc > 0); 4388 fas->f_ndisc--; 4389 sp->cmd_flags &= ~CFLAG_CMDDISC; 4390 New_state(fas, ACTS_UNKNOWN); 4391 4392 /* 4393 * A reconnect may imply a restore pointers operation 4394 * Note that some older disks (Micropolis in Pbox) do not 4395 * send a save data ptr on disconnect if all data has been 4396 * xferred. So, we cannot restore ptrs yet here. 4397 */ 4398 if ((sp->cmd_flags & CFLAG_DMAVALID) && 4399 (sp->cmd_data_count != sp->cmd_saved_data_count)) { 4400 sp->cmd_flags |= CFLAG_RESTORE_PTRS; 4401 } 4402 4403 /* 4404 * Return to await the FUNCTION COMPLETE interrupt we 4405 * should get out of accepting the IDENTIFY message. 4406 */ 4407 EPRINTF2("Reconnecting %d.%d\n", target, slot % NLUNS_PER_TARGET); 4408 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RETURN2_END, 4409 "fas_reconnect_end (_RETURN2)"); 4410 return (ACTION_RETURN); 4411 4412 bad: 4413 if (sp && (fas->f_stat & FAS_STAT_PERR)) { 4414 sp->cmd_pkt->pkt_statistics |= STAT_PERR; 4415 } 4416 fas_log(fas, CE_WARN, "target %x: failed reselection (%s)", 4417 target, bad_reselect); 4418 4419 #ifdef FASDEBUG 4420 fas_printstate(fas, "failed reselection"); 4421 #endif 4422 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RESET5_END, 4423 "fas_reconnect_end (_RESET5)"); 4424 return (ACTION_RESET); 4425 } 4426 4427 /* 4428 * handle unknown bus phase 4429 * we don't know what to expect so check status register for current 4430 * phase 4431 */ 4432 int 4433 fas_handle_unknown(struct fas *fas) 4434 { 4435 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_START, 4436 "fas_handle_unknown_start: fas 0x%p", fas); 4437 EPRINTF("fas_handle_unknown:\n"); 4438 4439 if ((fas->f_intr & FAS_INT_DISCON) == 0) { 4440 /* 4441 * we call actions here rather than returning to phasemanage 4442 * (this is the most frequently called action) 4443 */ 4444 switch (fas->f_stat & FAS_PHASE_MASK) { 4445 case FAS_PHASE_DATA_IN: 4446 case FAS_PHASE_DATA_OUT: 4447 New_state(fas, ACTS_DATA); 4448 TRACE_0(TR_FAC_SCSI_FAS, 4449 TR_FAS_HANDLE_UNKNOWN_PHASE_DATA_END, 4450 "fas_handle_unknown_end (phase_data)"); 4451 return (fas_handle_data_start(fas)); 4452 4453 case FAS_PHASE_MSG_OUT: 4454 New_state(fas, ACTS_MSG_OUT); 4455 TRACE_0(TR_FAC_SCSI_FAS, 4456 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_OUT_END, 4457 "fas_handle_unknown_end (phase_msg_out)"); 4458 return (fas_handle_msg_out_start(fas)); 4459 4460 case FAS_PHASE_MSG_IN: 4461 New_state(fas, ACTS_MSG_IN); 4462 TRACE_0(TR_FAC_SCSI_FAS, 4463 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_IN_END, 4464 "fas_handle_unknown_end (phase_msg_in)"); 4465 return (fas_handle_msg_in_start(fas)); 4466 4467 case FAS_PHASE_STATUS: 4468 fas_reg_cmd_write(fas, CMD_FLUSH); 4469 #ifdef FAS_TEST 4470 if (fas_ptest_status & (1<<Tgt(fas->f_current_sp))) { 4471 fas_assert_atn(fas); 4472 } 4473 #endif /* FAS_TEST */ 4474 4475 fas_reg_cmd_write(fas, CMD_COMP_SEQ); 4476 New_state(fas, ACTS_C_CMPLT); 4477 4478 TRACE_0(TR_FAC_SCSI_FAS, 4479 TR_FAS_HANDLE_UNKNOWN_PHASE_STATUS_END, 4480 "fas_handle_unknown_end (phase_status)"); 4481 return (fas_handle_c_cmplt(fas)); 4482 4483 case FAS_PHASE_COMMAND: 4484 New_state(fas, ACTS_CMD_START); 4485 TRACE_0(TR_FAC_SCSI_FAS, 4486 TR_FAS_HANDLE_UNKNOWN_PHASE_CMD_END, 4487 "fas_handle_unknown_end (phase_cmd)"); 4488 return (fas_handle_cmd_start(fas)); 4489 } 4490 4491 fas_printstate(fas, "Unknown bus phase"); 4492 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_RESET_END, 4493 "fas_handle_unknown_end (reset)"); 4494 return (ACTION_RESET); 4495 4496 } else { 4497 /* 4498 * Okay. What to do now? Let's try (for the time being) 4499 * assuming that the target went south and dropped busy, 4500 * as a disconnect implies that either we received 4501 * a completion or a disconnect message, or that we 4502 * had sent an ABORT OPERATION or BUS DEVICE RESET 4503 * message. In either case, we expected the disconnect 4504 * and should have fielded it elsewhere. 4505 * 4506 * If we see a chip disconnect here, this is an unexpected 4507 * loss of BSY*. Clean up the state of the chip and return. 4508 * 4509 */ 4510 int msgout = fas->f_cur_msgout[0]; 4511 struct fas_cmd *sp = fas->f_current_sp; 4512 int target = Tgt(sp); 4513 4514 if (msgout == MSG_HEAD_QTAG || msgout == MSG_SIMPLE_QTAG) { 4515 msgout = fas->f_cur_msgout[2]; 4516 } 4517 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n", 4518 fas->f_cur_msgout[0], fas->f_cur_msgout[1], 4519 fas->f_cur_msgout[2], fas->f_last_msgout); 4520 4521 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG || 4522 msgout == MSG_DEVICE_RESET) { 4523 IPRINTF2("Successful %s message to target %d\n", 4524 scsi_mname(msgout), Tgt(sp)); 4525 if (sp->cmd_flags & CFLAG_CMDPROXY) { 4526 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE; 4527 } 4528 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) { 4529 fas->f_abort_msg_sent++; 4530 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 4531 fas_set_pkt_reason(fas, sp, 4532 CMD_ABORTED, STAT_ABORTED); 4533 } 4534 } else if (msgout == MSG_DEVICE_RESET) { 4535 fas->f_reset_msg_sent++; 4536 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 4537 fas_set_pkt_reason(fas, sp, 4538 CMD_RESET, STAT_DEV_RESET); 4539 } 4540 fas_force_renegotiation(fas, target); 4541 } 4542 } else { 4543 if ((fas->f_last_msgout == MSG_EXTENDED) && 4544 (fas->f_last_msgin == MSG_REJECT)) { 4545 /* 4546 * the target rejected the negotiations, 4547 * so resubmit again (no_sync/no_wide 4548 * is now set) 4549 */ 4550 New_state(fas, STATE_FREE); 4551 fas_reg_cmd_write(fas, CMD_EN_RESEL); 4552 fas_remove_cmd(fas, sp, NEW_TIMEOUT); 4553 fas_decrement_ncmds(fas, sp); 4554 fas_check_ncmds(fas); 4555 sp->cmd_flags &= ~CFLAG_TRANFLAG; 4556 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY); 4557 fas_check_ncmds(fas); 4558 TRACE_0(TR_FAC_SCSI_FAS, 4559 TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END, 4560 "fas_handle_unknown_end (int_discon)"); 4561 return (ACTION_SEARCH); 4562 4563 } else if (fas->f_last_msgout == MSG_EXTENDED) { 4564 /* 4565 * target dropped off the bus during 4566 * negotiations 4567 */ 4568 fas_reset_sync_wide(fas); 4569 fas->f_sdtr_sent = fas->f_wdtr_sent = 0; 4570 } 4571 4572 fas_set_pkt_reason(fas, sp, CMD_UNX_BUS_FREE, 0); 4573 #ifdef FASDEBUG 4574 fas_printstate(fas, "unexpected bus free"); 4575 #endif 4576 } 4577 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END, 4578 "fas_handle_unknown_end (int_discon)"); 4579 return (ACTION_FINISH); 4580 } 4581 _NOTE(NOT_REACHED) 4582 /* NOTREACHED */ 4583 } 4584 4585 /* 4586 * handle target disconnecting 4587 */ 4588 static int 4589 fas_handle_clearing(struct fas *fas) 4590 { 4591 struct fas_cmd *sp = fas->f_current_sp; 4592 4593 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_START, 4594 "fas_handle_clearing_start"); 4595 EPRINTF("fas_handle_clearing:\n"); 4596 4597 if (fas->f_laststate == ACTS_C_CMPLT || 4598 fas->f_laststate == ACTS_MSG_IN_DONE) { 4599 if (INTPENDING(fas)) { 4600 volatile struct fasreg *fasreg = fas->f_reg; 4601 4602 fas->f_stat = fas_reg_read(fas, 4603 (uchar_t *)&fasreg->fas_stat); 4604 fas->f_intr = fas_reg_read(fas, 4605 (uchar_t *)&fasreg->fas_intr); 4606 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) { 4607 return (fas_illegal_cmd_or_bus_reset(fas)); 4608 } 4609 } else { 4610 /* 4611 * change e_laststate for the next time around 4612 */ 4613 fas->f_laststate = ACTS_CLEARING; 4614 TRACE_0(TR_FAC_SCSI_FAS, 4615 TR_FAS_HANDLE_CLEARING_RETURN1_END, 4616 "fas_handle_clearing_end (ACTION_RETURN1)"); 4617 return (ACTION_RETURN); 4618 } 4619 } 4620 4621 if (fas->f_intr == FAS_INT_DISCON) { 4622 /* 4623 * At this point the FAS chip has disconnected. The bus should 4624 * be either quiet or someone may be attempting a reselection 4625 * of us (or somebody else). Call the routine that sets the 4626 * chip back to a correct and known state. 4627 * If the last message in was a disconnect, search 4628 * for new work to do, else return to call fas_finish() 4629 */ 4630 fas->f_last_msgout = 0xff; 4631 fas->f_omsglen = 0; 4632 if (fas->f_last_msgin == MSG_DISCONNECT) { 4633 4634 fas_reg_cmd_write(fas, CMD_EN_RESEL); 4635 4636 New_state(fas, STATE_FREE); 4637 4638 ASSERT(fas->f_current_sp != NULL); 4639 EPRINTF2("disconnecting %d.%d\n", Tgt(sp), Lun(sp)); 4640 4641 sp->cmd_pkt->pkt_statistics |= STAT_DISCON; 4642 sp->cmd_flags |= CFLAG_CMDDISC; 4643 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 4644 fas->f_ndisc++; 4645 } 4646 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0)); 4647 ASSERT(fas->f_ncmds >= fas->f_ndisc); 4648 4649 fas->f_current_sp = NULL; 4650 4651 /* 4652 * start a cmd here to save time 4653 */ 4654 if ((fas->f_ncmds > fas->f_ndisc) && fas_ustart(fas)) { 4655 TRACE_0(TR_FAC_SCSI_FAS, 4656 TR_FAS_HANDLE_CLEARING_RETURN2_END, 4657 "fas_handle_clearing_end (ACTION_RETURN2)"); 4658 return (ACTION_RETURN); 4659 } 4660 4661 4662 TRACE_0(TR_FAC_SCSI_FAS, 4663 TR_FAS_HANDLE_CLEARING_RETURN3_END, 4664 "fas_handle_clearing_end (ACTION_RETURN3)"); 4665 return (ACTION_RETURN); 4666 } else { 4667 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_END, 4668 "fas_handle_clearing_end"); 4669 return (fas_finish(fas)); 4670 } 4671 } else { 4672 /* 4673 * If the target didn't disconnect from the 4674 * bus, that is a gross fatal error. 4675 * XXX this can be caused by asserting ATN 4676 * XXX check bus phase and if msgout, send a message 4677 */ 4678 fas_log(fas, CE_WARN, 4679 "Target %d didn't disconnect after sending %s", 4680 Tgt(sp), scsi_mname(fas->f_last_msgin)); 4681 4682 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 4683 4684 #ifdef FASDEBUG 4685 IPRINTF4("msgout: %x %x %x, last_msgout=%x\n", 4686 fas->f_cur_msgout[0], fas->f_cur_msgout[1], 4687 fas->f_cur_msgout[2], fas->f_last_msgout); 4688 IPRINTF1("last msgin=%x\n", fas->f_last_msgin); 4689 #endif 4690 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_ABORT_END, 4691 "fas_handle_clearing_end (ACTION_ABORT_CURCMD)"); 4692 return (ACTION_ABORT_ALLCMDS); 4693 } 4694 } 4695 4696 /* 4697 * handle data phase start 4698 */ 4699 static int 4700 fas_handle_data_start(struct fas *fas) 4701 { 4702 uint64_t end; 4703 uint32_t amt; 4704 struct fas_cmd *sp = fas->f_current_sp; 4705 int sending, phase; 4706 4707 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_START, 4708 "fas_handle_data_start"); 4709 EPRINTF("fas_handle_data_start:\n"); 4710 4711 if ((sp->cmd_flags & CFLAG_DMAVALID) == 0) { 4712 fas_printstate(fas, "unexpected data phase"); 4713 bad: 4714 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 4715 4716 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT1_END, 4717 "fas_handle_data_end (ACTION_ABORT_CURCMD1)"); 4718 return (ACTION_ABORT_CURCMD); 4719 } else { 4720 sending = (sp->cmd_flags & CFLAG_DMASEND)? 1 : 0; 4721 } 4722 4723 if (sp->cmd_flags & CFLAG_RESTORE_PTRS) { 4724 if (fas_restore_pointers(fas, sp)) { 4725 return (ACTION_ABORT_CURCMD); 4726 } 4727 sp->cmd_flags &= ~CFLAG_RESTORE_PTRS; 4728 } 4729 4730 /* 4731 * And make sure our DMA pointers are in good shape. 4732 * 4733 * Because SCSI is SCSI, the current DMA pointer has got to be 4734 * greater than or equal to our DMA base address. All other cases 4735 * that might have affected this always set curaddr to be >= 4736 * to the DMA base address. 4737 */ 4738 ASSERT(sp->cmd_cur_addr >= sp->cmd_dmacookie.dmac_address); 4739 end = (uint64_t)sp->cmd_dmacookie.dmac_address + 4740 (uint64_t)sp->cmd_dmacookie.dmac_size; 4741 4742 DPRINTF5( 4743 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%" 4744 PRIx64 ", nwin=%x\n", 4745 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end, 4746 sp->cmd_nwin); 4747 DPRINTF2("dmac_address = %x, dmac_size=%lx\n", 4748 sp->cmd_dmacookie.dmac_address, sp->cmd_dmacookie.dmac_size); 4749 4750 if (sp->cmd_cur_addr >= end) { 4751 if (fas_next_window(fas, sp, end)) { 4752 goto bad; 4753 } 4754 end = (uint64_t)sp->cmd_dmacookie.dmac_address + 4755 (uint64_t)sp->cmd_dmacookie.dmac_size; 4756 DPRINTF2("dmac_address=%x, dmac_size=%lx\n", 4757 sp->cmd_dmacookie.dmac_address, 4758 sp->cmd_dmacookie.dmac_size); 4759 } 4760 4761 amt = end - sp->cmd_cur_addr; 4762 if (fas->f_dma_attr->dma_attr_count_max < amt) { 4763 amt = fas->f_dma_attr->dma_attr_count_max; 4764 } 4765 DPRINTF3("amt=%x, end=%lx, cur_addr=%x\n", amt, end, sp->cmd_cur_addr); 4766 4767 #ifdef FASDEBUG 4768 /* 4769 * Make sure that we don't cross a boundary we can't handle 4770 */ 4771 end = (uint64_t)sp->cmd_cur_addr + (uint64_t)amt - 1; 4772 if ((end & ~fas->f_dma_attr->dma_attr_seg) != 4773 (sp->cmd_cur_addr & ~fas->f_dma_attr->dma_attr_seg)) { 4774 EPRINTF3("curaddr %x curaddr+amt %" PRIx64 4775 " cntr_max %" PRIx64 "\n", 4776 sp->cmd_cur_addr, end, fas->f_dma_attr->dma_attr_seg); 4777 amt = (end & ~fas->f_dma_attr->dma_attr_seg) - sp->cmd_cur_addr; 4778 if (amt == 0 || amt > fas->f_dma_attr->dma_attr_count_max) { 4779 fas_log(fas, CE_WARN, "illegal dma boundary? %x", amt); 4780 goto bad; 4781 } 4782 } 4783 #endif 4784 4785 end = (uint64_t)sp->cmd_dmacookie.dmac_address + 4786 (uint64_t)sp->cmd_dmacookie.dmac_size - 4787 (uint64_t)sp->cmd_cur_addr; 4788 if (amt > end) { 4789 EPRINTF4("ovflow amt %x s.b. %" PRIx64 " curaddr %x count %x\n", 4790 amt, end, sp->cmd_cur_addr, sp->cmd_dmacount); 4791 amt = (uint32_t)end; 4792 } 4793 4794 fas->f_lastcount = amt; 4795 4796 EPRINTF4("%d.%d cmd 0x%x to xfer %x\n", Tgt(sp), Lun(sp), 4797 sp->cmd_pkt->pkt_cdbp[0], amt); 4798 4799 phase = fas->f_stat & FAS_PHASE_MASK; 4800 4801 if ((phase == FAS_PHASE_DATA_IN) && !sending) { 4802 FAS_DMA_WRITE(fas, amt, sp->cmd_cur_addr, 4803 CMD_TRAN_INFO|CMD_DMA); 4804 } else if ((phase == FAS_PHASE_DATA_OUT) && sending) { 4805 FAS_DMA_READ(fas, amt, sp->cmd_cur_addr, amt, 4806 CMD_TRAN_INFO|CMD_DMA); 4807 } else { 4808 fas_log(fas, CE_WARN, 4809 "unwanted data xfer direction for Target %d", Tgt(sp)); 4810 fas_set_pkt_reason(fas, sp, CMD_DMA_DERR, 0); 4811 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT2_END, 4812 "fas_handle_data_end (ACTION_ABORT_CURCMD2)"); 4813 return (ACTION_ABORT_CURCMD); 4814 } 4815 4816 #ifdef FAS_TEST 4817 if (!sending && (fas_ptest_data_in & (1<<Tgt(sp)))) { 4818 fas_assert_atn(fas); 4819 } 4820 #endif /* FAS_TEST */ 4821 4822 New_state(fas, ACTS_DATA_DONE); 4823 4824 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_END, 4825 "fas_handle_data_end (ACTION_RETURN)"); 4826 return (ACTION_RETURN); 4827 } 4828 4829 static int 4830 fas_handle_data_done(struct fas *fas) 4831 { 4832 volatile struct fasreg *fasreg = fas->f_reg; 4833 volatile struct dma *dmar = fas->f_dma; 4834 struct fas_cmd *sp = fas->f_current_sp; 4835 uint32_t xfer_amt; 4836 char was_sending; 4837 uchar_t stat, fifoamt, tgt; 4838 4839 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_START, 4840 "fas_handle_data_done_start"); 4841 EPRINTF("fas_handle_data_done\n"); 4842 4843 tgt = Tgt(sp); 4844 stat = fas->f_stat; 4845 was_sending = (sp->cmd_flags & CFLAG_DMASEND) ? 1 : 0; 4846 4847 /* 4848 * Check for DMA errors (parity or memory fault) 4849 */ 4850 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr)) & 4851 DMA_ERRPEND) { 4852 /* 4853 * It would be desirable to set the ATN* line and attempt to 4854 * do the whole schmear of INITIATOR DETECTED ERROR here, 4855 * but that is too hard to do at present. 4856 */ 4857 fas_log(fas, CE_WARN, "Unrecoverable DMA error on dma %s", 4858 (was_sending) ? "send" : "receive"); 4859 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 4860 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END, 4861 "fas_handle_data_done_end (ACTION_RESET)"); 4862 return (ACTION_RESET); 4863 } 4864 4865 /* 4866 * Data Receive conditions: 4867 * 4868 * Check for parity errors. If we have a parity error upon 4869 * receive, the FAS chip has asserted ATN* for us already. 4870 */ 4871 if (!was_sending) { 4872 #ifdef FAS_TEST 4873 if (fas_ptest_data_in & (1<<tgt)) { 4874 fas_ptest_data_in = 0; 4875 stat |= FAS_STAT_PERR; 4876 if (fas_test_stop > 1) { 4877 debug_enter("ptest_data_in"); 4878 } 4879 } 4880 #endif /* FAS_TEST */ 4881 if (stat & FAS_STAT_PERR) { 4882 fas_log(fas, CE_WARN, 4883 "SCSI bus DATA IN phase parity error"); 4884 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR; 4885 fas->f_omsglen = 1; 4886 sp->cmd_pkt->pkt_statistics |= STAT_PERR; 4887 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR; 4888 } 4889 } 4890 4891 FAS_FLUSH_DMA(fas); 4892 4893 /* 4894 * Check to make sure we're still connected to the target. 4895 * If the target dropped the bus, that is a fatal error. 4896 * We don't even attempt to count what we were transferring 4897 * here. Let fas_handle_unknown clean up for us. 4898 */ 4899 if (fas->f_intr != FAS_INT_BUS) { 4900 New_state(fas, ACTS_UNKNOWN); 4901 TRACE_0(TR_FAC_SCSI_FAS, 4902 TR_FAS_HANDLE_DATA_DONE_PHASEMANAGE_END, 4903 "fas_handle_data_done_end (ACTION_PHASEMANAGE)"); 4904 return (ACTION_PHASEMANAGE); 4905 } 4906 4907 /* 4908 * Figure out how far we got. 4909 * Latch up fifo amount first and double if wide has been enabled 4910 */ 4911 fifoamt = FIFO_CNT(fas); 4912 if (fas->f_wide_enabled & (1<<tgt)) { 4913 fifoamt = fifoamt << 1; 4914 } 4915 4916 if (stat & FAS_STAT_XZERO) { 4917 xfer_amt = fas->f_lastcount; 4918 } else { 4919 GET_FAS_COUNT(fasreg, xfer_amt); 4920 xfer_amt = fas->f_lastcount - xfer_amt; 4921 } 4922 DPRINTF4("fifoamt=%x, xfer_amt=%x, lastcount=%x, stat=%x\n", 4923 fifoamt, xfer_amt, fas->f_lastcount, stat); 4924 4925 4926 /* 4927 * Unconditionally knock off by the amount left 4928 * in the fifo if we were sending out the SCSI bus. 4929 * 4930 * If we were receiving from the SCSI bus, believe 4931 * what the chip told us (either XZERO or by the 4932 * value calculated from the counter register). 4933 * The reason we don't look at the fifo for 4934 * incoming data is that in synchronous mode 4935 * the fifo may have further data bytes, and 4936 * for async mode we assume that all data in 4937 * the fifo will have been transferred before 4938 * the fas asserts an interrupt. 4939 */ 4940 if (was_sending) { 4941 xfer_amt -= fifoamt; 4942 } 4943 4944 #ifdef FASDEBUG 4945 { 4946 int phase = stat & FAS_PHASE_MASK; 4947 fas->f_stat2 = fas_reg_read(fas, 4948 (uchar_t *)&fasreg->fas_stat2); 4949 4950 if (((fas->f_stat & FAS_STAT_XZERO) == 0) && 4951 (phase != FAS_PHASE_DATA_IN) && 4952 (phase != FAS_PHASE_DATA_OUT) && 4953 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) { 4954 fas_log(fas, CE_WARN, 4955 "input shuttle not empty at end of data phase"); 4956 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 4957 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END, 4958 "fas_handle_data_done_end (ACTION_RESET)"); 4959 return (ACTION_RESET); 4960 } 4961 } 4962 #endif /* FASDEBUG */ 4963 4964 /* 4965 * If this was a synchronous transfer, flag it. 4966 * Also check for the errata condition of long 4967 * last REQ/ pulse for some synchronous targets 4968 */ 4969 if (fas->f_offset[tgt]) { 4970 /* 4971 * flag that a synchronous data xfer took place 4972 */ 4973 sp->cmd_pkt->pkt_statistics |= STAT_SYNC; 4974 4975 if (was_sending) 4976 fas_reg_cmd_write(fas, CMD_FLUSH); 4977 } else { 4978 /* 4979 * If we aren't doing Synchronous Data Transfers, 4980 * definitely offload the fifo. 4981 */ 4982 fas_reg_cmd_write(fas, CMD_FLUSH); 4983 } 4984 4985 /* 4986 * adjust pointers... 4987 */ 4988 DPRINTF3("before:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n", 4989 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt); 4990 sp->cmd_data_count += xfer_amt; 4991 sp->cmd_cur_addr += xfer_amt; 4992 sp->cmd_pkt->pkt_state |= STATE_XFERRED_DATA; 4993 New_state(fas, ACTS_UNKNOWN); 4994 DPRINTF3("after:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n", 4995 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt); 4996 4997 stat &= FAS_PHASE_MASK; 4998 if (stat == FAS_PHASE_DATA_IN || stat == FAS_PHASE_DATA_OUT) { 4999 fas->f_state = ACTS_DATA; 5000 TRACE_0(TR_FAC_SCSI_FAS, 5001 TR_FAS_HANDLE_DATA_DONE_ACTION1_END, 5002 "fas_handle_data_done_end (action1)"); 5003 return (fas_handle_data_start(fas)); 5004 } 5005 5006 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_ACTION2_END, 5007 "fas_handle_data_done_end (action2)"); 5008 return (fas_handle_unknown(fas)); 5009 } 5010 5011 static char msginperr[] = "SCSI bus MESSAGE IN phase parity error"; 5012 5013 static int 5014 fas_handle_c_cmplt(struct fas *fas) 5015 { 5016 struct fas_cmd *sp = fas->f_current_sp; 5017 volatile struct fasreg *fasreg = fas->f_reg; 5018 uchar_t sts, msg, intr, perr; 5019 5020 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_START, 5021 "fas_handle_c_cmplt_start"); 5022 EPRINTF("fas_handle_c_cmplt:\n"); 5023 5024 5025 /* 5026 * if target is fast, we can get cmd. completion by the time we get 5027 * here. Otherwise, we'll have to taken an interrupt. 5028 */ 5029 if (fas->f_laststate == ACTS_UNKNOWN) { 5030 if (INTPENDING(fas)) { 5031 fas->f_stat = fas_reg_read(fas, 5032 (uchar_t *)&fasreg->fas_stat); 5033 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr); 5034 fas->f_intr = intr; 5035 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) { 5036 return (fas_illegal_cmd_or_bus_reset(fas)); 5037 } 5038 } else { 5039 /* 5040 * change f_laststate for the next time around 5041 */ 5042 fas->f_laststate = ACTS_C_CMPLT; 5043 TRACE_0(TR_FAC_SCSI_FAS, 5044 TR_FAS_HANDLE_C_CMPLT_RETURN1_END, 5045 "fas_handle_c_cmplt_end (ACTION_RETURN1)"); 5046 return (ACTION_RETURN); 5047 } 5048 } else { 5049 intr = fas->f_intr; 5050 } 5051 5052 #ifdef FAS_TEST 5053 if (fas_ptest_status & (1<<Tgt(sp))) { 5054 fas_ptest_status = 0; 5055 fas->f_stat |= FAS_STAT_PERR; 5056 if (fas_test_stop > 1) { 5057 debug_enter("ptest_status"); 5058 } 5059 } else if ((fas_ptest_msgin & (1<<Tgt(sp))) && fas_ptest_msg == 0) { 5060 fas_ptest_msgin = 0; 5061 fas_ptest_msg = -1; 5062 fas->f_stat |= FAS_STAT_PERR; 5063 if (fas_test_stop > 1) { 5064 debug_enter("ptest_completion"); 5065 } 5066 } 5067 #endif /* FAS_TEST */ 5068 5069 if (intr == FAS_INT_DISCON) { 5070 New_state(fas, ACTS_UNKNOWN); 5071 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION1_END, 5072 "fas_handle_c_cmplt_end (action1)"); 5073 return (fas_handle_unknown(fas)); 5074 } 5075 5076 if ((perr = (fas->f_stat & FAS_STAT_PERR)) != 0) { 5077 fas_assert_atn(fas); 5078 sp->cmd_pkt->pkt_statistics |= STAT_PERR; 5079 } 5080 5081 /* 5082 * do a msg accept now and read the fifo data 5083 */ 5084 if (intr & FAS_INT_FCMP) { 5085 /* 5086 * The FAS manuals state that this sequence completes 5087 * with a BUS SERVICE interrupt if just the status 5088 * byte was received, else a FUNCTION COMPLETE interrupt 5089 * if both status and a message was received. 5090 * 5091 * if we give the MSG_ACT before reading the msg byte 5092 * we get the status byte again and if the status is zero 5093 * then we won't detect a failure 5094 */ 5095 *(sp->cmd_pkt->pkt_scbp) = 5096 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data); 5097 fas->f_last_msgin = fas->f_imsgarea[0] = 5098 msg = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data); 5099 5100 fas_reg_cmd_write(fas, CMD_MSG_ACPT); 5101 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS; 5102 5103 /* 5104 * The manuals also state that ATN* is asserted if 5105 * bad parity is detected. 5106 * 5107 * The one case that we cannot handle is where we detect 5108 * bad parity for the status byte, but the target refuses 5109 * to go to MESSAGE OUT phase right away. This means that 5110 * if that happens, we will misconstrue the parity error 5111 * to be for the completion message, not the status byte. 5112 */ 5113 if (perr) { 5114 fas_log(fas, CE_WARN, msginperr); 5115 sp->cmd_pkt->pkt_statistics |= STAT_PERR; 5116 5117 fas->f_cur_msgout[0] = MSG_MSG_PARITY; 5118 fas->f_omsglen = 1; 5119 New_state(fas, ACTS_UNKNOWN); 5120 TRACE_0(TR_FAC_SCSI_FAS, 5121 TR_FAS_HANDLE_C_CMPLT_ACTION5_END, 5122 "fas_handle_c_cmplt_end (action5)"); 5123 return (ACTION_RETURN); 5124 } 5125 5126 } else if (intr == FAS_INT_BUS) { 5127 /* 5128 * We only got the status byte. 5129 */ 5130 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data); 5131 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS; 5132 *(sp->cmd_pkt->pkt_scbp) = sts; 5133 msg = INVALID_MSG; 5134 5135 IPRINTF1("fas_handle_cmd_cmplt: sts=%x, no msg byte\n", sts); 5136 5137 if (perr) { 5138 /* 5139 * If we get a parity error on a status byte 5140 * assume that it was a CHECK CONDITION 5141 */ 5142 sts = STATUS_CHECK; 5143 fas_log(fas, CE_WARN, 5144 "SCSI bus STATUS phase parity error"); 5145 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR; 5146 fas->f_omsglen = 1; 5147 New_state(fas, ACTS_UNKNOWN); 5148 TRACE_0(TR_FAC_SCSI_FAS, 5149 TR_FAS_HANDLE_C_CMPLT_ACTION5_END, 5150 "fas_handle_c_cmplt_end (action5)"); 5151 return (fas_handle_unknown(fas)); 5152 } 5153 5154 } else { 5155 msg = sts = INVALID_MSG; 5156 IPRINTF("fas_handle_cmd_cmplt: unexpected intr\n"); 5157 New_state(fas, ACTS_UNKNOWN); 5158 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION2_END, 5159 "fas_handle_c_cmplt_end (action2)"); 5160 return (fas_handle_unknown(fas)); 5161 } 5162 5163 EPRINTF2("fas_handle_c_cmplt: status=%x, msg=%x\n", sts, msg); 5164 5165 EPRINTF1("Completion Message=%s\n", scsi_mname(msg)); 5166 if (msg == MSG_COMMAND_COMPLETE) { 5167 /* 5168 * Actually, if the message was a 'linked command 5169 * complete' message, the target isn't going to be 5170 * clearing the bus. 5171 */ 5172 New_state(fas, ACTS_CLEARING); 5173 TRACE_0(TR_FAC_SCSI_FAS, 5174 TR_FAS_HANDLE_C_CMPLT_ACTION4_END, 5175 "fas_handle_c_cmplt_end (action4)"); 5176 return (fas_handle_clearing(fas)); 5177 } else { 5178 fas->f_imsglen = 1; 5179 fas->f_imsgindex = 1; 5180 New_state(fas, ACTS_MSG_IN_DONE); 5181 TRACE_0(TR_FAC_SCSI_FAS, 5182 TR_FAS_HANDLE_C_CMPLT_ACTION3_END, 5183 "fas_handle_c_cmplt_end (action3)"); 5184 return (fas_handle_msg_in_done(fas)); 5185 } 5186 } 5187 5188 /* 5189 * prepare for accepting a message byte from the fifo 5190 */ 5191 static int 5192 fas_handle_msg_in_start(struct fas *fas) 5193 { 5194 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_START, 5195 "fas_handle_msg_in_start"); 5196 EPRINTF("fas_handle_msg_in_start\n"); 5197 5198 /* 5199 * Pick up a message byte. 5200 * Clear the FIFO so we 5201 * don't get confused. 5202 */ 5203 if (!FIFO_EMPTY(fas)) { 5204 fas_reg_cmd_write(fas, CMD_FLUSH); 5205 } 5206 fas_reg_cmd_write(fas, CMD_TRAN_INFO); 5207 fas->f_imsglen = 1; 5208 fas->f_imsgindex = 0; 5209 New_state(fas, ACTS_MSG_IN_DONE); 5210 5211 /* 5212 * give a little extra time by returning to phasemanage 5213 */ 5214 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_END, 5215 "fas_handle_msg_in_end (ACTION_PHASEMANAGE)"); 5216 return (ACTION_PHASEMANAGE); 5217 } 5218 5219 /* 5220 * We come here after issuing a MSG_ACCEPT 5221 * command and are expecting more message bytes. 5222 * The FAS should be asserting a BUS SERVICE 5223 * interrupt status, but may have asserted 5224 * a different interrupt in the case that 5225 * the target disconnected and dropped BSY*. 5226 * 5227 * In the case that we are eating up message 5228 * bytes (and throwing them away unread) because 5229 * we have ATN* asserted (we are trying to send 5230 * a message), we do not consider it an error 5231 * if the phase has changed out of MESSAGE IN. 5232 */ 5233 static int 5234 fas_handle_more_msgin(struct fas *fas) 5235 { 5236 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_START, 5237 "fas_handle_more_msgin_start"); 5238 EPRINTF("fas_handle_more_msgin\n"); 5239 5240 if (fas->f_intr & FAS_INT_BUS) { 5241 if ((fas->f_stat & FAS_PHASE_MASK) == FAS_PHASE_MSG_IN) { 5242 /* 5243 * Fetch another byte of a message in. 5244 */ 5245 fas_reg_cmd_write(fas, CMD_TRAN_INFO); 5246 New_state(fas, ACTS_MSG_IN_DONE); 5247 TRACE_0(TR_FAC_SCSI_FAS, 5248 TR_FAS_HANDLE_MORE_MSGIN_RETURN1_END, 5249 "fas_handle_more_msgin_end (ACTION_RETURN)"); 5250 return (ACTION_RETURN); 5251 } 5252 5253 /* 5254 * If we were gobbling up a message and we have 5255 * changed phases, handle this silently, else 5256 * complain. In either case, we return to let 5257 * fas_phasemanage() handle things. 5258 * 5259 * If it wasn't a BUS SERVICE interrupt, 5260 * let fas_phasemanage() find out if the 5261 * chip disconnected. 5262 */ 5263 if (fas->f_imsglen != 0) { 5264 fas_log(fas, CE_WARN, 5265 "Premature end of extended message"); 5266 } 5267 } 5268 New_state(fas, ACTS_UNKNOWN); 5269 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_RETURN2_END, 5270 "fas_handle_more_msgin_end (action)"); 5271 return (fas_handle_unknown(fas)); 5272 } 5273 5274 static int 5275 fas_handle_msg_in_done(struct fas *fas) 5276 { 5277 struct fas_cmd *sp = fas->f_current_sp; 5278 volatile struct fasreg *fasreg = fas->f_reg; 5279 int sndmsg = 0; 5280 uchar_t msgin; 5281 5282 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_START, 5283 "fas_handle_msg_in_done_start"); 5284 EPRINTF("fas_handle_msg_in_done:\n"); 5285 if (fas->f_laststate == ACTS_MSG_IN) { 5286 if (INTPENDING(fas)) { 5287 fas->f_stat = fas_reg_read(fas, 5288 (uchar_t *)&fasreg->fas_stat); 5289 fas->f_stat2 = fas_reg_read(fas, 5290 (uchar_t *)&fasreg->fas_stat2); 5291 5292 fas_read_fifo(fas); 5293 5294 fas->f_intr = fas_reg_read(fas, 5295 (uchar_t *)&fasreg->fas_intr); 5296 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) { 5297 return (fas_illegal_cmd_or_bus_reset(fas)); 5298 } 5299 } else { 5300 /* 5301 * change f_laststate for the next time around 5302 */ 5303 fas->f_laststate = ACTS_MSG_IN_DONE; 5304 TRACE_0(TR_FAC_SCSI_FAS, 5305 TR_FAS_HANDLE_MSG_IN_DONE_RETURN1_END, 5306 "fas_handle_msg_in_done_end (ACTION_RETURN1)"); 5307 return (ACTION_RETURN); 5308 } 5309 } 5310 5311 /* 5312 * the most common case is a disconnect message. we do 5313 * a fast path for this condition and if it fails then 5314 * we go for the detailed error handling 5315 */ 5316 #ifndef FAS_TEST 5317 if (((fas->f_laststate == ACTS_MSG_IN) || 5318 (fas->f_laststate == ACTS_MSG_IN_DONE)) && 5319 ((fas->f_intr & FAS_INT_DISCON) == 0) && 5320 ((fas->f_stat & FAS_STAT_PERR) == 0) && 5321 ((sp->cmd_pkt_flags & FLAG_NODISCON) == 0)) { 5322 5323 if ((fas->f_fifolen == 1) && 5324 (fas->f_imsglen == 1) && 5325 (fas->f_fifo[0] == MSG_DISCONNECT)) { 5326 5327 fas_reg_cmd_write(fas, CMD_MSG_ACPT); 5328 fas->f_imsgarea[fas->f_imsgindex++] = fas->f_fifo[0]; 5329 fas->f_last_msgin = MSG_DISCONNECT; 5330 New_state(fas, ACTS_CLEARING); 5331 5332 TRACE_0(TR_FAC_SCSI_FAS, 5333 TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END, 5334 "fas_handle_msg_in_done_end (action)"); 5335 5336 return (fas_handle_clearing(fas)); 5337 } 5338 } 5339 #endif /* not FAS_TEST */ 5340 5341 /* 5342 * We can be called here for both the case where 5343 * we had requested the FAS chip to fetch a message 5344 * byte from the target (at the target's request). 5345 * We can also be called in the case where we had 5346 * been using the CMD_COMP_SEQ command to pick up 5347 * both a status byte and a completion message from 5348 * a target, but where the message wasn't one of 5349 * COMMAND COMPLETE, LINKED COMMAND COMPLETE, or 5350 * LINKED COMMAND COMPLETE (with flag). This is a 5351 * legal (albeit extremely unusual) SCSI bus trans- 5352 * -ition, so we have to handle it. 5353 */ 5354 if (fas->f_laststate != ACTS_C_CMPLT) { 5355 #ifdef FAS_TEST 5356 reloop: 5357 #endif /* FAS_TEST */ 5358 5359 if (fas->f_intr & FAS_INT_DISCON) { 5360 fas_log(fas, CE_WARN, 5361 "premature end of input message"); 5362 New_state(fas, ACTS_UNKNOWN); 5363 TRACE_0(TR_FAC_SCSI_FAS, 5364 TR_FAS_HANDLE_MSG_IN_DONE_PHASEMANAGE_END, 5365 "fas_handle_msg_in_done_end (ACTION_PHASEMANAGE)"); 5366 return (ACTION_PHASEMANAGE); 5367 } 5368 5369 /* 5370 * Note that if f_imsglen is zero, then we are skipping 5371 * input message bytes, so there is no reason to look for 5372 * parity errors. 5373 */ 5374 if (fas->f_imsglen != 0 && (fas->f_stat & FAS_STAT_PERR)) { 5375 fas_log(fas, CE_WARN, msginperr); 5376 sndmsg = MSG_MSG_PARITY; 5377 sp->cmd_pkt->pkt_statistics |= STAT_PERR; 5378 fas_reg_cmd_write(fas, CMD_FLUSH); 5379 5380 } else if ((msgin = fas->f_fifolen) != 1) { 5381 5382 /* 5383 * If we have got more than one or 0 bytes in the fifo, 5384 * that is a gross screwup, and we should let the 5385 * target know that we have completely fouled up. 5386 */ 5387 fas_printf(fas, "fifocount=%x", msgin); 5388 fas_printstate(fas, "input message botch"); 5389 sndmsg = MSG_INITIATOR_ERROR; 5390 fas_reg_cmd_write(fas, CMD_FLUSH); 5391 fas_log(fas, CE_WARN, "input message botch"); 5392 5393 } else if (fas->f_imsglen == 0) { 5394 /* 5395 * If we are in the middle of gobbling up and throwing 5396 * away a message (due to a previous message input 5397 * error), drive on. 5398 */ 5399 msgin = fas_reg_read(fas, 5400 (uchar_t *)&fasreg->fas_fifo_data); 5401 New_state(fas, ACTS_MSG_IN_MORE); 5402 5403 } else { 5404 msgin = fas->f_fifo[0]; 5405 fas->f_imsgarea[fas->f_imsgindex++] = msgin; 5406 } 5407 5408 } else { 5409 /* 5410 * In this case, we have been called (from 5411 * fas_handle_c_cmplt()) with the message 5412 * already stored in the message array. 5413 */ 5414 msgin = fas->f_imsgarea[0]; 5415 } 5416 5417 /* 5418 * Process this message byte (but not if we are 5419 * going to be trying to send back some error 5420 * anyway) 5421 */ 5422 if (sndmsg == 0 && fas->f_imsglen != 0) { 5423 5424 if (fas->f_imsgindex < fas->f_imsglen) { 5425 5426 EPRINTF2("message byte %d: 0x%x\n", 5427 fas->f_imsgindex-1, 5428 fas->f_imsgarea[fas->f_imsgindex-1]); 5429 5430 New_state(fas, ACTS_MSG_IN_MORE); 5431 5432 } else if (fas->f_imsglen == 1) { 5433 5434 #ifdef FAS_TEST 5435 if ((fas_ptest_msgin & (1<<Tgt(sp))) && 5436 fas_ptest_msg == msgin) { 5437 fas_ptest_msgin = 0; 5438 fas_ptest_msg = -1; 5439 fas_assert_atn(fas); 5440 fas->f_stat |= FAS_STAT_PERR; 5441 fas->f_imsgindex -= 1; 5442 if (fas_test_stop > 1) { 5443 debug_enter("ptest msgin"); 5444 } 5445 goto reloop; 5446 } 5447 #endif /* FAS_TEST */ 5448 5449 sndmsg = fas_onebyte_msg(fas); 5450 5451 } else if (fas->f_imsglen == 2) { 5452 #ifdef FAS_TEST 5453 if (fas_ptest_emsgin & (1<<Tgt(sp))) { 5454 fas_ptest_emsgin = 0; 5455 fas_assert_atn(fas); 5456 fas->f_stat |= FAS_STAT_PERR; 5457 fas->f_imsgindex -= 1; 5458 if (fas_test_stop > 1) { 5459 debug_enter("ptest emsgin"); 5460 } 5461 goto reloop; 5462 } 5463 #endif /* FAS_TEST */ 5464 5465 if (fas->f_imsgarea[0] == MSG_EXTENDED) { 5466 static char *tool = 5467 "Extended message 0x%x is too long"; 5468 5469 /* 5470 * Is the incoming message too long 5471 * to be stored in our local array? 5472 */ 5473 if ((int)(msgin+2) > IMSGSIZE) { 5474 fas_log(fas, CE_WARN, 5475 tool, fas->f_imsgarea[0]); 5476 sndmsg = MSG_REJECT; 5477 } else { 5478 fas->f_imsglen = msgin + 2; 5479 New_state(fas, ACTS_MSG_IN_MORE); 5480 } 5481 } else { 5482 sndmsg = fas_twobyte_msg(fas); 5483 } 5484 5485 } else { 5486 sndmsg = fas_multibyte_msg(fas); 5487 } 5488 } 5489 5490 if (sndmsg < 0) { 5491 /* 5492 * If sndmsg is less than zero, one of the subsidiary 5493 * routines needs to return some other state than 5494 * ACTION_RETURN. 5495 */ 5496 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_SNDMSG_END, 5497 "fas_handle_msg_in_done_end (-sndmsg)"); 5498 return (-sndmsg); 5499 5500 } else if (sndmsg > 0) { 5501 if (IS_1BYTE_MSG(sndmsg)) { 5502 fas->f_omsglen = 1; 5503 } 5504 fas->f_cur_msgout[0] = (uchar_t)sndmsg; 5505 5506 /* 5507 * The target is not guaranteed to go to message out 5508 * phase, period. Moreover, until the entire incoming 5509 * message is transferred, the target may (and likely 5510 * will) continue to transfer message bytes (which 5511 * we will have to ignore). 5512 * 5513 * In order to do this, we'll go to 'infinite' 5514 * message in handling by setting the current input 5515 * message length to a sentinel of zero. 5516 * 5517 * This works regardless of the message we are trying 5518 * to send out. At the point in time which we want 5519 * to send a message in response to an incoming message 5520 * we do not care any more about the incoming message. 5521 * 5522 * If we are sending a message in response to detecting 5523 * a parity error on input, the FAS chip has already 5524 * set ATN* for us, but it doesn't hurt to set it here 5525 * again anyhow. 5526 */ 5527 fas_assert_atn(fas); 5528 New_state(fas, ACTS_MSG_IN_MORE); 5529 fas->f_imsglen = 0; 5530 } 5531 5532 fas_reg_cmd_write(fas, CMD_FLUSH); 5533 5534 fas_reg_cmd_write(fas, CMD_MSG_ACPT); 5535 5536 if ((fas->f_laststate == ACTS_MSG_IN_DONE) && 5537 (fas->f_state == ACTS_CLEARING)) { 5538 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END, 5539 "fas_handle_msg_in_done_end (action)"); 5540 return (fas_handle_clearing(fas)); 5541 } 5542 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_RETURN2_END, 5543 "fas_handle_msg_in_done_end (ACTION_RETURN2)"); 5544 return (ACTION_RETURN); 5545 } 5546 5547 static int 5548 fas_onebyte_msg(struct fas *fas) 5549 { 5550 struct fas_cmd *sp = fas->f_current_sp; 5551 int msgout = 0; 5552 uchar_t msgin = fas->f_last_msgin = fas->f_imsgarea[0]; 5553 int tgt = Tgt(sp); 5554 5555 EPRINTF("fas_onebyte_msg\n"); 5556 5557 if (msgin & MSG_IDENTIFY) { 5558 /* 5559 * How did we get here? We should only see identify 5560 * messages on a reconnection, but we'll handle this 5561 * fine here (just in case we get this) as long as 5562 * we believe that this is a valid identify message. 5563 * 5564 * For this to be a valid incoming message, 5565 * bits 6-4 must must be zero. Also, the 5566 * bit that says that I'm an initiator and 5567 * can support disconnection cannot possibly 5568 * be set here. 5569 */ 5570 5571 char garbled = ((msgin & (BAD_IDENTIFY|INI_CAN_DISCON)) != 0); 5572 5573 fas_log(fas, CE_WARN, "%s message 0x%x from Target %d", 5574 garbled ? "Garbled" : "Identify", msgin, tgt); 5575 5576 if (garbled) { 5577 /* 5578 * If it's a garbled message, 5579 * try and tell the target... 5580 */ 5581 msgout = MSG_INITIATOR_ERROR; 5582 } else { 5583 New_state(fas, ACTS_UNKNOWN); 5584 } 5585 return (msgout); 5586 5587 } else if (IS_2BYTE_MSG(msgin) || IS_EXTENDED_MSG(msgin)) { 5588 fas->f_imsglen = 2; 5589 New_state(fas, ACTS_MSG_IN_MORE); 5590 return (0); 5591 } 5592 5593 New_state(fas, ACTS_UNKNOWN); 5594 5595 switch (msgin) { 5596 case MSG_DISCONNECT: 5597 /* 5598 * If we 'cannot' disconnect- reject this message. 5599 * Note that we only key off of the pkt_flags here- 5600 * the FLAG_NODISCON was set in fas_accept_pkt() if 5601 * no disconnect was enabled in scsi_options 5602 */ 5603 if (sp->cmd_pkt_flags & FLAG_NODISCON) { 5604 msgout = MSG_REJECT; 5605 break; 5606 } 5607 /* FALLTHROUGH */ 5608 case MSG_COMMAND_COMPLETE: 5609 fas->f_state = ACTS_CLEARING; 5610 break; 5611 5612 case MSG_NOP: 5613 break; 5614 5615 /* XXX Make it a MSG_REJECT handler */ 5616 case MSG_REJECT: 5617 { 5618 uchar_t reason = 0; 5619 uchar_t lastmsg = fas->f_last_msgout; 5620 /* 5621 * The target is rejecting the last message we sent. 5622 * 5623 * If the last message we attempted to send out was an 5624 * extended message, we were trying to negotiate sync 5625 * xfers- and we're okay. 5626 * 5627 * Otherwise, a target has rejected a message that 5628 * it should have handled. We will abort the operation 5629 * in progress and set the pkt_reason value here to 5630 * show why we have completed. The process of aborting 5631 * may be via a message or may be via a bus reset (as 5632 * a last resort). 5633 */ 5634 msgout = (TAGGED(tgt)? MSG_ABORT_TAG : MSG_ABORT); 5635 5636 switch (lastmsg) { 5637 case MSG_EXTENDED: 5638 if (fas->f_wdtr_sent) { 5639 /* 5640 * Disable wide, Target rejected 5641 * out WDTR message 5642 */ 5643 fas_set_wide_conf3(fas, tgt, 0); 5644 fas->f_nowide |= (1<<tgt); 5645 fas->f_wdtr_sent = 0; 5646 /* 5647 * we still want to negotiate sync 5648 */ 5649 if ((fas->f_nosync & (1<<tgt)) == 0) { 5650 fas_assert_atn(fas); 5651 fas_make_sdtr(fas, 0, tgt); 5652 } 5653 } else if (fas->f_sdtr_sent) { 5654 fas_reg_cmd_write(fas, CMD_CLR_ATN); 5655 fas_revert_to_async(fas, tgt); 5656 fas->f_nosync |= (1<<tgt); 5657 fas->f_sdtr_sent = 0; 5658 } 5659 msgout = 0; 5660 break; 5661 case MSG_NOP: 5662 reason = CMD_NOP_FAIL; 5663 break; 5664 case MSG_INITIATOR_ERROR: 5665 reason = CMD_IDE_FAIL; 5666 break; 5667 case MSG_MSG_PARITY: 5668 reason = CMD_PER_FAIL; 5669 break; 5670 case MSG_REJECT: 5671 reason = CMD_REJECT_FAIL; 5672 break; 5673 /* XXX - abort not good, queue full handling or drain (?) */ 5674 case MSG_SIMPLE_QTAG: 5675 case MSG_ORDERED_QTAG: 5676 case MSG_HEAD_QTAG: 5677 msgout = MSG_ABORT; 5678 reason = CMD_TAG_REJECT; 5679 break; 5680 case MSG_DEVICE_RESET: 5681 reason = CMD_BDR_FAIL; 5682 msgout = -ACTION_ABORT_CURCMD; 5683 break; 5684 case MSG_ABORT: 5685 case MSG_ABORT_TAG: 5686 /* 5687 * If an RESET/ABORT OPERATION message is rejected 5688 * it is time to yank the chain on the bus... 5689 */ 5690 reason = CMD_ABORT_FAIL; 5691 msgout = -ACTION_ABORT_CURCMD; 5692 break; 5693 default: 5694 if (IS_IDENTIFY_MSG(lastmsg)) { 5695 if (TAGGED(tgt)) { 5696 /* 5697 * this often happens when the 5698 * target rejected our tag 5699 */ 5700 reason = CMD_TAG_REJECT; 5701 } else { 5702 reason = CMD_ID_FAIL; 5703 } 5704 } else { 5705 reason = CMD_TRAN_ERR; 5706 msgout = -ACTION_ABORT_CURCMD; 5707 } 5708 5709 break; 5710 } 5711 5712 if (msgout) { 5713 fas_log(fas, CE_WARN, 5714 "Target %d rejects our message '%s'", 5715 tgt, scsi_mname(lastmsg)); 5716 fas_set_pkt_reason(fas, sp, reason, 0); 5717 } 5718 5719 break; 5720 } 5721 case MSG_RESTORE_PTRS: 5722 sp->cmd_cdbp = sp->cmd_pkt->pkt_cdbp; 5723 if (sp->cmd_data_count != sp->cmd_saved_data_count) { 5724 if (fas_restore_pointers(fas, sp)) { 5725 msgout = -ACTION_ABORT_CURCMD; 5726 } else if ((sp->cmd_pkt->pkt_reason & CMD_TRAN_ERR) && 5727 (sp->cmd_pkt->pkt_statistics & STAT_PERR) && 5728 (sp->cmd_cur_win == 0) && 5729 (sp->cmd_data_count == 0)) { 5730 sp->cmd_pkt->pkt_reason &= ~CMD_TRAN_ERR; 5731 } 5732 } 5733 break; 5734 5735 case MSG_SAVE_DATA_PTR: 5736 sp->cmd_saved_data_count = sp->cmd_data_count; 5737 sp->cmd_saved_win = sp->cmd_cur_win; 5738 sp->cmd_saved_cur_addr = sp->cmd_cur_addr; 5739 break; 5740 5741 /* These don't make sense for us, and */ 5742 /* will be rejected */ 5743 /* case MSG_INITIATOR_ERROR */ 5744 /* case MSG_ABORT */ 5745 /* case MSG_MSG_PARITY */ 5746 /* case MSG_DEVICE_RESET */ 5747 default: 5748 msgout = MSG_REJECT; 5749 fas_log(fas, CE_WARN, 5750 "Rejecting message '%s' from Target %d", 5751 scsi_mname(msgin), tgt); 5752 break; 5753 } 5754 5755 EPRINTF1("Message in: %s\n", scsi_mname(msgin)); 5756 5757 return (msgout); 5758 } 5759 5760 /* 5761 * phase handlers that are rarely used 5762 */ 5763 static int 5764 fas_handle_cmd_start(struct fas *fas) 5765 { 5766 struct fas_cmd *sp = fas->f_current_sp; 5767 volatile uchar_t *tp = fas->f_cmdarea; 5768 int i; 5769 int amt = sp->cmd_cdblen; 5770 5771 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_START, 5772 "fas_handle_cmd_start_start"); 5773 EPRINTF("fas_handle_cmd: send cmd\n"); 5774 5775 for (i = 0; i < amt; i++) { 5776 *tp++ = sp->cmd_cdbp[i]; 5777 } 5778 fas_reg_cmd_write(fas, CMD_FLUSH); 5779 5780 FAS_DMA_READ(fas, amt, fas->f_dmacookie.dmac_address, amt, 5781 CMD_TRAN_INFO|CMD_DMA); 5782 fas->f_lastcount = amt; 5783 5784 New_state(fas, ACTS_CMD_DONE); 5785 5786 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_END, 5787 "fas_handle_cmd_start_end"); 5788 return (ACTION_RETURN); 5789 } 5790 5791 static int 5792 fas_handle_cmd_done(struct fas *fas) 5793 { 5794 struct fas_cmd *sp = fas->f_current_sp; 5795 uchar_t intr = fas->f_intr; 5796 volatile struct dma *dmar = fas->f_dma; 5797 5798 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_START, 5799 "fas_handle_cmd_done_start"); 5800 EPRINTF("fas_handle_cmd_done\n"); 5801 5802 /* 5803 * We should have gotten a BUS SERVICE interrupt. 5804 * If it isn't that, and it isn't a DISCONNECT 5805 * interrupt, we have a "cannot happen" situation. 5806 */ 5807 if ((intr & FAS_INT_BUS) == 0) { 5808 if ((intr & FAS_INT_DISCON) == 0) { 5809 fas_printstate(fas, "cmd transmission error"); 5810 TRACE_0(TR_FAC_SCSI_FAS, 5811 TR_FAS_HANDLE_CMD_DONE_ABORT1_END, 5812 "fas_handle_cmd_done_end (abort1)"); 5813 return (ACTION_ABORT_CURCMD); 5814 } 5815 } else { 5816 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD; 5817 } 5818 5819 fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr); 5820 FAS_FLUSH_DMA(fas); 5821 5822 New_state(fas, ACTS_UNKNOWN); 5823 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_END, 5824 "fas_handle_cmd_done_end"); 5825 return (fas_handle_unknown(fas)); 5826 } 5827 5828 /* 5829 * Begin to send a message out 5830 */ 5831 static int 5832 fas_handle_msg_out_start(struct fas *fas) 5833 { 5834 struct fas_cmd *sp = fas->f_current_sp; 5835 uchar_t *msgout = fas->f_cur_msgout; 5836 uchar_t amt = fas->f_omsglen; 5837 5838 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_START, 5839 "fas_handle_msg_out_start"); 5840 EPRINTF("fas_handle_msg_out_start\n"); 5841 5842 /* 5843 * Check to make *sure* that we are really 5844 * in MESSAGE OUT phase. If the last state 5845 * was ACTS_MSG_OUT_DONE, then we are trying 5846 * to resend a message that the target stated 5847 * had a parity error in it. 5848 * 5849 * If this is the case, and mark completion reason as CMD_NOMSGOUT. 5850 * XXX: Right now, we just *drive* on. Should we abort the command? 5851 */ 5852 if ((fas->f_stat & FAS_PHASE_MASK) != FAS_PHASE_MSG_OUT && 5853 fas->f_laststate == ACTS_MSG_OUT_DONE) { 5854 fas_log(fas, CE_WARN, 5855 "Target %d refused message resend", Tgt(sp)); 5856 fas_set_pkt_reason(fas, sp, CMD_NOMSGOUT, 0); 5857 New_state(fas, ACTS_UNKNOWN); 5858 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_PHASEMANAGE_END, 5859 "fas_handle_msg_out_end (ACTION_PHASEMANAGE)"); 5860 return (ACTION_PHASEMANAGE); 5861 } 5862 5863 /* 5864 * Clean the fifo. 5865 */ 5866 fas_reg_cmd_write(fas, CMD_FLUSH); 5867 5868 if (amt == 0) { 5869 /* 5870 * no msg to send 5871 */ 5872 *msgout = MSG_NOP; 5873 amt = fas->f_omsglen = 1; 5874 } 5875 5876 /* 5877 * If msg only 1 byte, just dump it in the fifo and go. For 5878 * multi-byte msgs, dma them to save time. If we have no 5879 * msg to send and we're in msg out phase, send a NOP. 5880 */ 5881 fas->f_last_msgout = *msgout; 5882 5883 /* 5884 * There is a bug in the fas366 that occasionaly 5885 * deasserts the ATN signal prematurely when we send 5886 * the sync/wide negotiation bytes out using DMA. The 5887 * workaround here is to send the negotiation bytes out 5888 * using PIO 5889 */ 5890 fas_write_fifo(fas, msgout, fas->f_omsglen, 1); 5891 fas_reg_cmd_write(fas, CMD_TRAN_INFO); 5892 5893 EPRINTF2("amt=%x, last_msgout=%x\n", amt, fas->f_last_msgout); 5894 5895 New_state(fas, ACTS_MSG_OUT_DONE); 5896 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_END, 5897 "fas_handle_msg_out_end"); 5898 return (ACTION_RETURN); 5899 } 5900 5901 static int 5902 fas_handle_msg_out_done(struct fas *fas) 5903 { 5904 struct fas_cmd *sp = fas->f_current_sp; 5905 uchar_t msgout, phase; 5906 int target = Tgt(sp); 5907 int amt = fas->f_omsglen; 5908 int action; 5909 5910 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_START, 5911 "fas_handle_msg_out_done_start"); 5912 msgout = fas->f_cur_msgout[0]; 5913 if ((msgout == MSG_HEAD_QTAG) || (msgout == MSG_SIMPLE_QTAG)) { 5914 msgout = fas->f_cur_msgout[2]; 5915 } 5916 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n", 5917 fas->f_cur_msgout[0], fas->f_cur_msgout[1], 5918 fas->f_cur_msgout[2], fas->f_last_msgout); 5919 5920 EPRINTF1("fas_handle_msgout_done: msgout=%x\n", msgout); 5921 5922 /* 5923 * flush fifo, just in case some bytes were not sent 5924 */ 5925 fas_reg_cmd_write(fas, CMD_FLUSH); 5926 5927 /* 5928 * If the FAS disconnected, then the message we sent caused 5929 * the target to decide to drop BSY* and clear the bus. 5930 */ 5931 if (fas->f_intr == FAS_INT_DISCON) { 5932 if (msgout == MSG_DEVICE_RESET || msgout == MSG_ABORT || 5933 msgout == MSG_ABORT_TAG) { 5934 /* 5935 * If we sent a device reset msg, then we need to do 5936 * a synch negotiate again unless we have already 5937 * inhibited synch. 5938 */ 5939 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) { 5940 fas->f_abort_msg_sent++; 5941 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 5942 fas_set_pkt_reason(fas, sp, 5943 CMD_ABORTED, STAT_ABORTED); 5944 } 5945 } else if (msgout == MSG_DEVICE_RESET) { 5946 fas->f_reset_msg_sent++; 5947 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 5948 fas_set_pkt_reason(fas, sp, 5949 CMD_RESET, STAT_DEV_RESET); 5950 } 5951 fas_force_renegotiation(fas, Tgt(sp)); 5952 } 5953 EPRINTF2("Successful %s message to target %d\n", 5954 scsi_mname(msgout), target); 5955 5956 if (sp->cmd_flags & CFLAG_CMDPROXY) { 5957 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE; 5958 } 5959 TRACE_0(TR_FAC_SCSI_FAS, 5960 TR_FAS_HANDLE_MSG_OUT_DONE_FINISH_END, 5961 "fas_handle_msg_out_done_end (ACTION_FINISH)"); 5962 return (ACTION_FINISH); 5963 } 5964 /* 5965 * If the target dropped busy on any other message, it 5966 * wasn't expected. We will let the code in fas_phasemanage() 5967 * handle this unexpected bus free event. 5968 */ 5969 goto out; 5970 } 5971 5972 /* 5973 * What phase have we transitioned to? 5974 */ 5975 phase = fas->f_stat & FAS_PHASE_MASK; 5976 5977 /* 5978 * If we finish sending a message out, and we are 5979 * still in message out phase, then the target has 5980 * detected one or more parity errors in the message 5981 * we just sent and it is asking us to resend the 5982 * previous message. 5983 */ 5984 if ((fas->f_intr & FAS_INT_BUS) && phase == FAS_PHASE_MSG_OUT) { 5985 /* 5986 * As per SCSI-2 specification, if the message to 5987 * be re-sent is greater than one byte, then we 5988 * have to set ATN*. 5989 */ 5990 if (amt > 1) { 5991 fas_assert_atn(fas); 5992 } 5993 fas_log(fas, CE_WARN, 5994 "SCSI bus MESSAGE OUT phase parity error"); 5995 sp->cmd_pkt->pkt_statistics |= STAT_PERR; 5996 New_state(fas, ACTS_MSG_OUT); 5997 TRACE_0(TR_FAC_SCSI_FAS, 5998 TR_FAS_HANDLE_MSG_OUT_DONE_PHASEMANAGE_END, 5999 "fas_handle_msg_out_done_end (ACTION_PHASEMANAGE)"); 6000 return (ACTION_PHASEMANAGE); 6001 } 6002 6003 6004 out: 6005 fas->f_last_msgout = msgout; 6006 fas->f_omsglen = 0; 6007 New_state(fas, ACTS_UNKNOWN); 6008 action = fas_handle_unknown(fas); 6009 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_END, 6010 "fas_handle_msg_out_done_end"); 6011 return (action); 6012 } 6013 6014 static int 6015 fas_twobyte_msg(struct fas *fas) 6016 { 6017 struct fas_cmd *sp = fas->f_current_sp; 6018 6019 if ((fas->f_imsgarea[0] == MSG_IGNORE_WIDE_RESID) && 6020 (fas->f_imsgarea[1] == 1)) { 6021 int xfer_amt; 6022 6023 /* 6024 * Knock off one byte if there 6025 * is a last transfer and is even number of bytes 6026 */ 6027 xfer_amt = sp->cmd_data_count - sp->cmd_saved_data_count; 6028 if (xfer_amt && (!(xfer_amt & 1))) { 6029 ASSERT(sp->cmd_data_count > 0); 6030 sp->cmd_data_count--; 6031 sp->cmd_cur_addr--; 6032 } 6033 IPRINTF1("ignore wide resid %d\n", fas->f_imsgarea[1]); 6034 New_state(fas, ACTS_UNKNOWN); 6035 return (0); 6036 } 6037 6038 fas_log(fas, CE_WARN, 6039 "Two byte message '%s' 0x%x rejected", 6040 scsi_mname(fas->f_imsgarea[0]), fas->f_imsgarea[1]); 6041 return (MSG_REJECT); 6042 } 6043 6044 /* 6045 * handle receiving extended messages 6046 */ 6047 static int 6048 fas_multibyte_msg(struct fas *fas) 6049 { 6050 #ifdef FASDEBUG 6051 static char *mbs = 6052 "Target %d now Synchronous at %d.%d MB/s max transmit rate\n"; 6053 static char *mbs1 = 6054 "Target %d now Synchronous at %d.0%d MB/s max transmit rate\n"; 6055 static char *mbs2 = 6056 "Target %d now Synchronous at %d.00%d MB/s max transmit rate\n"; 6057 #endif 6058 struct fas_cmd *sp = fas->f_current_sp; 6059 volatile struct fasreg *fasreg = fas->f_reg; 6060 uchar_t emsg = fas->f_imsgarea[2]; 6061 int tgt = Tgt(sp); 6062 int msgout = 0; 6063 6064 EPRINTF("fas_multibyte_msg:\n"); 6065 6066 if (emsg == MSG_SYNCHRONOUS) { 6067 uint_t period, offset, regval; 6068 uint_t minsync, maxsync, clockval; 6069 uint_t xfer_freq, xfer_div, xfer_mod, xfer_rate; 6070 6071 period = fas->f_imsgarea[3] & 0xff; 6072 offset = fas->f_imsgarea[4] & 0xff; 6073 minsync = MIN_SYNC_PERIOD(fas); 6074 maxsync = MAX_SYNC_PERIOD(fas); 6075 DPRINTF5("sync msg received: %x %x %x %x %x\n", 6076 fas->f_imsgarea[0], fas->f_imsgarea[1], 6077 fas->f_imsgarea[2], fas->f_imsgarea[3], 6078 fas->f_imsgarea[4]); 6079 DPRINTF3("received period %d offset %d from tgt %d\n", 6080 period, offset, tgt); 6081 DPRINTF3("calculated minsync %d, maxsync %d for tgt %d\n", 6082 minsync, maxsync, tgt); 6083 DPRINTF2("sync period %d, neg period %d\n", 6084 fas->f_sync_period[tgt], fas->f_neg_period[tgt]); 6085 6086 if ((++(fas->f_sdtr_sent)) & 1) { 6087 /* 6088 * In cases where the target negotiates synchronous 6089 * mode before we do, and we either have sync mode 6090 * disabled, or this target is known to be a weak 6091 * signal target, we send back a message indicating 6092 * a desire to stay in asynchronous mode (the SCSI-2 6093 * spec states that if we have synchronous capability 6094 * then we cannot reject a SYNCHRONOUS DATA TRANSFER 6095 * REQUEST message). 6096 */ 6097 IPRINTF1("SYNC negotiation initiated by target %d\n", 6098 tgt); 6099 6100 msgout = MSG_EXTENDED; 6101 6102 period = 6103 period ? max(period, MIN_SYNC_PERIOD(fas)) : 0; 6104 6105 if (fas->f_backoff & (1<<tgt)) { 6106 period = period ? 6107 max(period, fas->f_neg_period[tgt]) : 0; 6108 } 6109 offset = min(offset, fas_default_offset); 6110 } 6111 xfer_freq = regval = 0; 6112 6113 /* 6114 * If the target's offset is bigger than ours, 6115 * the target has violated the scsi protocol. 6116 */ 6117 if (offset > fas_default_offset) { 6118 period = offset = 0; 6119 msgout = MSG_REJECT; 6120 } 6121 6122 if (offset && (period > maxsync)) { 6123 /* 6124 * We cannot transmit data in synchronous 6125 * mode this slow, so convert to asynchronous 6126 * mode. 6127 */ 6128 msgout = MSG_EXTENDED; 6129 period = offset = 0; 6130 6131 } else if (offset && (period < minsync)) { 6132 /* 6133 * If the target's period is less than ours, 6134 * the target has violated the scsi protocol. 6135 */ 6136 period = offset = 0; 6137 msgout = MSG_REJECT; 6138 6139 } else if (offset) { 6140 /* 6141 * Conversion method for received PERIOD value 6142 * to the number of input clock ticks to the FAS. 6143 * 6144 * We adjust the input period value such that 6145 * we always will transmit data *not* faster 6146 * than the period value received. 6147 */ 6148 6149 clockval = fas->f_clock_cycle / 1000; 6150 regval = (((period << 2) + clockval - 1) / clockval); 6151 6152 /* 6153 * correction if xfer rate <= 5MB/sec 6154 * XXX do we need this? 6155 */ 6156 if (regval && (period >= FASTSCSI_THRESHOLD)) { 6157 regval--; 6158 } 6159 } 6160 6161 fas->f_offset[tgt] = offset; 6162 fas->f_neg_period[tgt] = period; 6163 6164 /* 6165 * Is is now safe to produce a responce to a target 6166 * initiated sdtr. period and offset have been checked. 6167 */ 6168 if (msgout == MSG_EXTENDED) { 6169 fas_make_sdtr(fas, 0, tgt); 6170 period = fas->f_neg_period[tgt]; 6171 offset = (fas->f_offset[tgt] & 0xf); 6172 } 6173 6174 if (offset) { 6175 fas->f_sync_period[tgt] = regval & SYNC_PERIOD_MASK; 6176 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period, 6177 fas->f_sync_period[tgt]); 6178 6179 fas->f_offset[tgt] = offset | fas->f_req_ack_delay; 6180 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset, 6181 fas->f_offset[tgt]); 6182 6183 /* 6184 * if transferring > 5 MB/sec then enable 6185 * fastscsi in conf3 6186 */ 6187 if (period < FASTSCSI_THRESHOLD) { 6188 fas->f_fasconf3[tgt] |= FAS_CONF3_FASTSCSI; 6189 } else { 6190 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI; 6191 } 6192 6193 fas_reg_write(fas, (uchar_t *)&fasreg->fas_conf3, 6194 fas->f_fasconf3[tgt]); 6195 6196 DPRINTF4("period %d (%d), offset %d to tgt %d\n", 6197 period, 6198 fas->f_sync_period[tgt] & SYNC_PERIOD_MASK, 6199 fas->f_offset[tgt] & 0xf, tgt); 6200 DPRINTF1("req/ack delay = %x\n", fas->f_req_ack_delay); 6201 DPRINTF1("conf3 = %x\n", fas->f_fasconf3[tgt]); 6202 #ifdef FASDEBUG 6203 /* 6204 * Convert input clock cycle per 6205 * byte to nanoseconds per byte. 6206 * (ns/b), and convert that to 6207 * k-bytes/second. 6208 */ 6209 xfer_freq = FAS_SYNC_KBPS((regval * 6210 fas->f_clock_cycle) / 1000); 6211 xfer_rate = ((fas->f_nowide & (1<<tgt))? 1 : 2) * 6212 xfer_freq; 6213 xfer_div = xfer_rate / 1000; 6214 xfer_mod = xfer_rate % 1000; 6215 6216 6217 if (xfer_mod > 99) { 6218 IPRINTF3(mbs, tgt, xfer_div, xfer_mod); 6219 } else if (xfer_mod > 9) { 6220 IPRINTF3(mbs1, tgt, xfer_div, xfer_mod); 6221 } else { 6222 IPRINTF3(mbs2, tgt, xfer_div, xfer_mod); 6223 } 6224 #endif 6225 fas->f_sync_enabled |= (1<<tgt); 6226 6227 } else { 6228 /* 6229 * We are converting back to async mode. 6230 */ 6231 fas_revert_to_async(fas, tgt); 6232 } 6233 6234 /* 6235 * If this target violated the scsi spec, reject the 6236 * sdtr msg and don't negotiate sdtr again. 6237 */ 6238 if (msgout == MSG_REJECT) { 6239 fas->f_nosync |= (1<<tgt); 6240 } 6241 6242 fas->f_props_update |= (1<<tgt); 6243 6244 } else if (emsg == MSG_WIDE_DATA_XFER) { 6245 uchar_t width = fas->f_imsgarea[3] & 0xff; 6246 6247 DPRINTF4("wide msg received: %x %x %x %x\n", 6248 fas->f_imsgarea[0], fas->f_imsgarea[1], 6249 fas->f_imsgarea[2], fas->f_imsgarea[3]); 6250 6251 /* always renegotiate sync after wide */ 6252 msgout = MSG_EXTENDED; 6253 6254 if ((++(fas->f_wdtr_sent)) & 1) { 6255 IPRINTF1("Wide negotiation initiated by target %d\n", 6256 tgt); 6257 /* 6258 * allow wide neg even if the target driver hasn't 6259 * enabled wide yet. 6260 */ 6261 fas->f_nowide &= ~(1<<tgt); 6262 fas_make_wdtr(fas, 0, tgt, width); 6263 IPRINTF1("sending wide sync %d back\n", width); 6264 /* 6265 * Let us go back to async mode(SCSI spec) 6266 * and depend on target to do sync 6267 * after wide negotiations. 6268 * If target does not do a sync neg and enters 6269 * async mode we will negotiate sync on next command 6270 */ 6271 fas_revert_to_async(fas, tgt); 6272 fas->f_sync_known &= ~(1<<tgt); 6273 } else { 6274 /* 6275 * renegotiate sync after wide 6276 */ 6277 fas_set_wide_conf3(fas, tgt, width); 6278 ASSERT(width <= 1); 6279 fas->f_wdtr_sent = 0; 6280 if ((fas->f_nosync & (1<<tgt)) == 0) { 6281 fas_make_sdtr(fas, 0, tgt); 6282 } else { 6283 msgout = 0; 6284 } 6285 } 6286 6287 fas->f_props_update |= (1<<tgt); 6288 6289 } else if (emsg == MSG_MODIFY_DATA_PTR) { 6290 msgout = MSG_REJECT; 6291 } else { 6292 fas_log(fas, CE_WARN, 6293 "Rejecting message %s 0x%x from Target %d", 6294 scsi_mname(MSG_EXTENDED), emsg, tgt); 6295 msgout = MSG_REJECT; 6296 } 6297 out: 6298 New_state(fas, ACTS_UNKNOWN); 6299 return (msgout); 6300 } 6301 6302 /* 6303 * Back off sync negotiation 6304 * and got to async mode 6305 */ 6306 static void 6307 fas_revert_to_async(struct fas *fas, int tgt) 6308 { 6309 volatile struct fasreg *fasreg = fas->f_reg; 6310 6311 fas->f_sync_period[tgt] = 0; 6312 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period, 0); 6313 fas->f_offset[tgt] = 0; 6314 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset, 0); 6315 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI; 6316 fas_reg_write(fas, &fasreg->fas_conf3, fas->f_fasconf3[tgt]); 6317 fas->f_sync_enabled &= ~(1<<tgt); 6318 } 6319 6320 /* 6321 * handle an unexpected selection attempt 6322 * XXX look for better way: msg reject, drop off the bus 6323 */ 6324 static int 6325 fas_handle_selection(struct fas *fas) 6326 { 6327 fas_reg_cmd_write(fas, CMD_DISCONNECT); 6328 fas_reg_cmd_write(fas, CMD_FLUSH); 6329 fas_reg_cmd_write(fas, CMD_EN_RESEL); 6330 return (ACTION_RETURN); 6331 } 6332 6333 /* 6334 * dma window handling 6335 */ 6336 static int 6337 fas_restore_pointers(struct fas *fas, struct fas_cmd *sp) 6338 { 6339 if (sp->cmd_data_count != sp->cmd_saved_data_count) { 6340 sp->cmd_data_count = sp->cmd_saved_data_count; 6341 sp->cmd_cur_addr = sp->cmd_saved_cur_addr; 6342 6343 if (sp->cmd_cur_win != sp->cmd_saved_win) { 6344 sp->cmd_cur_win = sp->cmd_saved_win; 6345 if (fas_set_new_window(fas, sp)) { 6346 return (-1); 6347 } 6348 } 6349 DPRINTF1("curaddr=%x\n", sp->cmd_cur_addr); 6350 } 6351 return (0); 6352 } 6353 6354 static int 6355 fas_set_new_window(struct fas *fas, struct fas_cmd *sp) 6356 { 6357 off_t offset; 6358 size_t len; 6359 uint_t count; 6360 6361 if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_cur_win, 6362 &offset, &len, &sp->cmd_dmacookie, &count) != DDI_SUCCESS) { 6363 return (-1); 6364 } 6365 6366 DPRINTF4("new window %x: off=%lx, len=%lx, count=%x\n", 6367 sp->cmd_cur_win, offset, len, count); 6368 6369 ASSERT(count == 1); 6370 return (0); 6371 } 6372 6373 static int 6374 fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end) 6375 { 6376 6377 /* are there more windows? */ 6378 if (sp->cmd_nwin == 0) { 6379 uint_t nwin = 0; 6380 (void) ddi_dma_numwin(sp->cmd_dmahandle, &nwin); 6381 sp->cmd_nwin = (uchar_t)nwin; 6382 } 6383 6384 DPRINTF5( 6385 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%lx, nwin=%x\n", 6386 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end, 6387 sp->cmd_nwin); 6388 6389 if (sp->cmd_cur_win < sp->cmd_nwin) { 6390 sp->cmd_cur_win++; 6391 if (fas_set_new_window(fas, sp)) { 6392 fas_printstate(fas, "cannot set new window"); 6393 sp->cmd_cur_win--; 6394 return (-1); 6395 } 6396 /* 6397 * if there are no more windows, we have a data overrun condition 6398 */ 6399 } else { 6400 int slot = sp->cmd_slot; 6401 6402 fas_printstate(fas, "data transfer overrun"); 6403 fas_set_pkt_reason(fas, sp, CMD_DATA_OVR, 0); 6404 6405 /* 6406 * if we get data transfer overruns, assume we have 6407 * a weak scsi bus. Note that this won't catch consistent 6408 * underruns or other noise related syndromes. 6409 */ 6410 fas_sync_wide_backoff(fas, sp, slot); 6411 return (-1); 6412 } 6413 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address; 6414 DPRINTF1("cur_addr=%x\n", sp->cmd_cur_addr); 6415 return (0); 6416 } 6417 6418 /* 6419 * dma error handler 6420 */ 6421 static int 6422 fas_check_dma_error(struct fas *fas) 6423 { 6424 /* 6425 * was there a dma error that caused fas_intr_svc() to be called? 6426 */ 6427 if (fas->f_dma->dma_csr & DMA_ERRPEND) { 6428 /* 6429 * It would be desirable to set the ATN* line and attempt to 6430 * do the whole schmear of INITIATOR DETECTED ERROR here, 6431 * but that is too hard to do at present. 6432 */ 6433 fas_log(fas, CE_WARN, "Unrecoverable DMA error"); 6434 fas_printstate(fas, "dma error"); 6435 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0); 6436 return (-1); 6437 } 6438 return (0); 6439 } 6440 6441 /* 6442 * check for gross error or spurious interrupt 6443 */ 6444 static int 6445 fas_handle_gross_err(struct fas *fas) 6446 { 6447 volatile struct fasreg *fasreg = fas->f_reg; 6448 6449 fas_log(fas, CE_WARN, 6450 "gross error in fas status (%x)", fas->f_stat); 6451 6452 IPRINTF5("fas_cmd=%x, stat=%x, intr=%x, step=%x, fifoflag=%x\n", 6453 fasreg->fas_cmd, fas->f_stat, fas->f_intr, fasreg->fas_step, 6454 fasreg->fas_fifo_flag); 6455 6456 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0); 6457 6458 fas_internal_reset(fas, FAS_RESET_FAS); 6459 return (ACTION_RESET); 6460 } 6461 6462 6463 /* 6464 * handle illegal cmd interrupt or (external) bus reset cleanup 6465 */ 6466 static int 6467 fas_illegal_cmd_or_bus_reset(struct fas *fas) 6468 { 6469 /* 6470 * If we detect a SCSI reset, we blow away the current 6471 * command (if there is one) and all disconnected commands 6472 * because we now don't know the state of them at all. 6473 */ 6474 ASSERT(fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET)); 6475 6476 if (fas->f_intr & FAS_INT_RESET) { 6477 return (ACTION_FINRST); 6478 } 6479 6480 /* 6481 * Illegal cmd to fas: 6482 * This should not happen. The one situation where 6483 * we can get an ILLEGAL COMMAND interrupt is due to 6484 * a bug in the FAS366 during reselection which we 6485 * should be handling in fas_reconnect(). 6486 */ 6487 if (fas->f_intr & FAS_INT_ILLEGAL) { 6488 IPRINTF1("lastcmd=%x\n", fas->f_reg->fas_cmd); 6489 fas_printstate(fas, "ILLEGAL bit set"); 6490 return (ACTION_RESET); 6491 } 6492 /*NOTREACHED*/ 6493 return (ACTION_RETURN); 6494 } 6495 6496 /* 6497 * set throttles for all luns of this target 6498 */ 6499 static void 6500 fas_set_throttles(struct fas *fas, int slot, int n, int what) 6501 { 6502 int i; 6503 6504 /* 6505 * if the bus is draining/quiesced, no changes to the throttles 6506 * are allowed. Not allowing change of throttles during draining 6507 * limits error recovery but will reduce draining time 6508 * 6509 * all throttles should have been set to HOLD_THROTTLE 6510 */ 6511 if (fas->f_softstate & (FAS_SS_QUIESCED | FAS_SS_DRAINING)) { 6512 return; 6513 } 6514 6515 ASSERT((n == 1) || (n == N_SLOTS) || (n == NLUNS_PER_TARGET)); 6516 ASSERT((slot + n) <= N_SLOTS); 6517 if (n == NLUNS_PER_TARGET) { 6518 slot &= ~(NLUNS_PER_TARGET - 1); 6519 } 6520 6521 for (i = slot; i < (slot + n); i++) { 6522 if (what == HOLD_THROTTLE) { 6523 fas->f_throttle[i] = HOLD_THROTTLE; 6524 } else if ((fas->f_reset_delay[i/NLUNS_PER_TARGET]) == 0) { 6525 if (what == MAX_THROTTLE) { 6526 int tshift = 1 << (i/NLUNS_PER_TARGET); 6527 fas->f_throttle[i] = (short) 6528 ((fas->f_notag & tshift)? 1 : what); 6529 } else { 6530 fas->f_throttle[i] = what; 6531 } 6532 } 6533 } 6534 } 6535 6536 static void 6537 fas_set_all_lun_throttles(struct fas *fas, int slot, int what) 6538 { 6539 /* 6540 * fas_set_throttle will adjust slot to starting at LUN 0 6541 */ 6542 fas_set_throttles(fas, slot, NLUNS_PER_TARGET, what); 6543 } 6544 6545 static void 6546 fas_full_throttle(struct fas *fas, int slot) 6547 { 6548 fas_set_throttles(fas, slot, 1, MAX_THROTTLE); 6549 } 6550 6551 /* 6552 * run a polled cmd 6553 */ 6554 static void 6555 fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp) 6556 { 6557 int limit, i, n; 6558 int timeout = 0; 6559 6560 DPRINTF4("runpoll: slot=%x, cmd=%x, current_sp=0x%p, tcmds=%x\n", 6561 slot, *((uchar_t *)sp->cmd_pkt->pkt_cdbp), 6562 (void *)fas->f_current_sp, fas->f_tcmds[slot]); 6563 6564 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_START, "fas_runpoll_start"); 6565 6566 /* 6567 * wait for cmd to complete 6568 * don't start new cmds so set throttles to HOLD_THROTTLE 6569 */ 6570 while ((sp->cmd_flags & CFLAG_COMPLETED) == 0) { 6571 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) { 6572 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE); 6573 } 6574 if ((fas->f_state != STATE_FREE) || INTPENDING(fas)) { 6575 if (fas_dopoll(fas, POLL_TIMEOUT) <= 0) { 6576 IPRINTF("runpoll: timeout on draining\n"); 6577 goto bad; 6578 } 6579 } 6580 6581 ASSERT(fas->f_state == STATE_FREE); 6582 ASSERT(fas->f_current_sp == NULL); 6583 6584 /* 6585 * if this is not a proxy cmd, don't start the cmd 6586 * without draining the active cmd(s) 6587 * for proxy cmds, we zap the active cmd and assume 6588 * that the caller will take care of this 6589 * For tagged cmds, wait with submitting a non-tagged 6590 * cmd until the queue has been drained 6591 * If the cmd is a request sense, then draining won't 6592 * help since we are in contingence allegiance condition 6593 */ 6594 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) { 6595 uchar_t *cmdp = (uchar_t *)sp->cmd_pkt->pkt_cdbp; 6596 6597 if ((fas->f_tcmds[slot]) && 6598 (NOTAG(Tgt(sp)) || 6599 (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) && 6600 (*cmdp != SCMD_REQUEST_SENSE)))) { 6601 if (timeout < POLL_TIMEOUT) { 6602 timeout += 100; 6603 drv_usecwait(100); 6604 continue; 6605 } else { 6606 fas_log(fas, CE_WARN, 6607 "polled cmd failed (target busy)"); 6608 goto cleanup; 6609 } 6610 } 6611 } 6612 6613 /* 6614 * If the draining of active commands killed the 6615 * the current polled command, we're done.. 6616 */ 6617 if (sp->cmd_flags & CFLAG_COMPLETED) { 6618 break; 6619 } 6620 6621 /* 6622 * ensure we are not accessing a target too quickly 6623 * after a reset. the throttles get set back later 6624 * by the reset delay watch; hopefully, we don't go 6625 * thru this loop more than once 6626 */ 6627 if (fas->f_reset_delay[slot/NLUNS_PER_TARGET]) { 6628 IPRINTF1("reset delay set for slot %x\n", slot); 6629 drv_usecwait(fas->f_scsi_reset_delay * 1000); 6630 for (i = 0; i < NTARGETS_WIDE; i++) { 6631 if (fas->f_reset_delay[i]) { 6632 int s = i * NLUNS_PER_TARGET; 6633 int e = s + NLUNS_PER_TARGET; 6634 fas->f_reset_delay[i] = 0; 6635 for (; s < e; s++) { 6636 fas_full_throttle(fas, s); 6637 } 6638 } 6639 } 6640 } 6641 6642 /* 6643 * fas_startcmd() will return false if preempted 6644 * or draining 6645 */ 6646 if (fas_startcmd(fas, sp) != TRUE) { 6647 IPRINTF("runpoll: cannot start new cmds\n"); 6648 ASSERT(fas->f_current_sp != sp); 6649 continue; 6650 } 6651 6652 /* 6653 * We're now 'running' this command. 6654 * 6655 * fas_dopoll will always return when 6656 * fas->f_state is STATE_FREE, and 6657 */ 6658 limit = sp->cmd_pkt->pkt_time * 1000000; 6659 if (limit == 0) { 6660 limit = POLL_TIMEOUT; 6661 } 6662 6663 /* 6664 * if the cmd disconnected, the first call to fas_dopoll 6665 * will return with bus free; we go thru the loop one more 6666 * time and wait limit usec for the target to reconnect 6667 */ 6668 for (i = 0; i <= POLL_TIMEOUT; i += 100) { 6669 6670 if ((n = fas_dopoll(fas, limit)) <= 0) { 6671 IPRINTF("runpoll: timeout on polling\n"); 6672 goto bad; 6673 } 6674 6675 /* 6676 * If a preemption occurred that caused this 6677 * command to actually not start, go around 6678 * the loop again. If CFLAG_COMPLETED is set, the 6679 * command completed 6680 */ 6681 if ((sp->cmd_flags & CFLAG_COMPLETED) || 6682 (sp->cmd_pkt->pkt_state == 0)) { 6683 break; 6684 } 6685 6686 /* 6687 * the bus may have gone free because the target 6688 * disconnected; go thru the loop again 6689 */ 6690 ASSERT(fas->f_state == STATE_FREE); 6691 if (n == 0) { 6692 /* 6693 * bump i, we have waited limit usecs in 6694 * fas_dopoll 6695 */ 6696 i += limit - 100; 6697 } 6698 } 6699 6700 if ((sp->cmd_flags & CFLAG_COMPLETED) == 0) { 6701 6702 if (i > POLL_TIMEOUT) { 6703 IPRINTF("polled timeout on disc. cmd\n"); 6704 goto bad; 6705 } 6706 6707 if (sp->cmd_pkt->pkt_state) { 6708 /* 6709 * don't go thru the loop again; the cmd 6710 * was already started 6711 */ 6712 IPRINTF("fas_runpoll: cmd started??\n"); 6713 goto bad; 6714 } 6715 } 6716 } 6717 6718 /* 6719 * blindly restore throttles which is preferable over 6720 * leaving throttle hanging at 0 and noone to clear it 6721 */ 6722 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) { 6723 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE); 6724 } 6725 6726 /* 6727 * ensure that the cmd is completely removed 6728 */ 6729 fas_remove_cmd(fas, sp, 0); 6730 6731 /* 6732 * If we stored up commands to do, start them off now. 6733 */ 6734 if ((fas->f_state == STATE_FREE) && 6735 (!(sp->cmd_flags & CFLAG_CMDPROXY))) { 6736 (void) fas_ustart(fas); 6737 } 6738 exit: 6739 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_END, "fas_runpoll_end"); 6740 return; 6741 6742 bad: 6743 fas_log(fas, CE_WARN, "Polled cmd failed"); 6744 #ifdef FASDEBUG 6745 fas_printstate(fas, "fas_runpoll: polled cmd failed"); 6746 #endif /* FASDEBUG */ 6747 6748 cleanup: 6749 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE); 6750 6751 /* 6752 * clean up all traces of this sp because fas_runpoll will return 6753 * before fas_reset_recovery() cleans up 6754 */ 6755 fas_remove_cmd(fas, sp, NEW_TIMEOUT); 6756 fas_decrement_ncmds(fas, sp); 6757 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 6758 6759 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) { 6760 (void) fas_reset_bus(fas); 6761 } 6762 goto exit; 6763 } 6764 6765 /* 6766 * Poll for command completion (i.e., no interrupts) 6767 * limit is in usec (and will not be very accurate) 6768 * 6769 * the assumption is that we only run polled cmds in interrupt context 6770 * as scsi_transport will filter out FLAG_NOINTR 6771 */ 6772 static int 6773 fas_dopoll(struct fas *fas, int limit) 6774 { 6775 int i, n; 6776 6777 /* 6778 * timeout is not very accurate since we don't know how 6779 * long the poll takes 6780 * also if the packet gets started fairly late, we may 6781 * timeout prematurely 6782 * fas_dopoll always returns if e_state transitions to STATE_FREE 6783 */ 6784 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_START, "fas_dopoll_start"); 6785 6786 if (limit == 0) { 6787 limit = POLL_TIMEOUT; 6788 } 6789 6790 for (n = i = 0; i < limit; i += 100) { 6791 if (INTPENDING(fas)) { 6792 fas->f_polled_intr = 1; 6793 n++; 6794 (void) fas_intr_svc(fas); 6795 if (fas->f_state == STATE_FREE) 6796 break; 6797 } 6798 drv_usecwait(100); 6799 } 6800 6801 if (i >= limit && fas->f_state != STATE_FREE) { 6802 fas_printstate(fas, "polled command timeout"); 6803 n = -1; 6804 } 6805 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_END, 6806 "fas_dopoll_end: rval %x", n); 6807 return (n); 6808 } 6809 6810 /* 6811 * prepare a sync negotiation message 6812 */ 6813 static void 6814 fas_make_sdtr(struct fas *fas, int msgout_offset, int target) 6815 { 6816 uchar_t *p = fas->f_cur_msgout + msgout_offset; 6817 ushort_t tshift = 1<<target; 6818 uchar_t period = MIN_SYNC_PERIOD(fas); 6819 uchar_t offset = fas_default_offset; 6820 6821 /* 6822 * If this target experienced a sync backoff use the 6823 * target's sync speed that was adjusted in 6824 * fas_sync_wide_backoff. For second sync backoff, 6825 * offset will be ajusted below in sanity checks. 6826 */ 6827 if (fas->f_backoff & tshift) { 6828 period = fas->f_neg_period[target]; 6829 } 6830 6831 /* 6832 * If this is a responce to a target initiated sdtr, 6833 * use the agreed upon values. 6834 */ 6835 if (fas->f_sdtr_sent & 1) { 6836 period = fas->f_neg_period[target]; 6837 offset = fas->f_offset[target]; 6838 } 6839 6840 /* 6841 * If the target driver disabled 6842 * sync then make offset = 0 6843 */ 6844 if (fas->f_force_async & tshift) { 6845 offset = 0; 6846 } 6847 6848 /* 6849 * sanity check of period and offset 6850 */ 6851 if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_FAST) { 6852 if (period < (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4)) { 6853 period = (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4); 6854 } 6855 } else if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_SYNC) { 6856 if (period < (uchar_t)(DEFAULT_SYNC_PERIOD/4)) { 6857 period = (uchar_t)(DEFAULT_SYNC_PERIOD/4); 6858 } 6859 } else { 6860 fas->f_nosync |= tshift; 6861 } 6862 6863 if (fas->f_nosync & tshift) { 6864 offset = 0; 6865 } 6866 6867 if ((uchar_t)(offset & 0xf) > fas_default_offset) { 6868 offset = fas_default_offset | fas->f_req_ack_delay; 6869 } 6870 6871 fas->f_neg_period[target] = (uchar_t)period; 6872 fas->f_offset[target] = (uchar_t)offset; 6873 6874 *p++ = (uchar_t)MSG_EXTENDED; 6875 *p++ = (uchar_t)3; 6876 *p++ = (uchar_t)MSG_SYNCHRONOUS; 6877 *p++ = period; 6878 *p++ = offset & 0xf; 6879 fas->f_omsglen = 5 + msgout_offset; 6880 6881 IPRINTF2("fas_make_sdtr: period = %x, offset = %x\n", 6882 period, offset); 6883 /* 6884 * increment sdtr flag, odd value indicates that we initiated 6885 * the negotiation 6886 */ 6887 fas->f_sdtr_sent++; 6888 6889 /* 6890 * the target may reject the optional sync message so 6891 * to avoid negotiating on every cmd, set sync known here 6892 * we should not negotiate wide after sync again 6893 */ 6894 fas->f_sync_known |= 1<<target; 6895 fas->f_wide_known |= 1<<target; 6896 } 6897 6898 /* 6899 * prepare a wide negotiation message 6900 */ 6901 static void 6902 fas_make_wdtr(struct fas *fas, int msgout_offset, int target, int width) 6903 { 6904 uchar_t *p = fas->f_cur_msgout + msgout_offset; 6905 6906 if (((fas->f_target_scsi_options[target] & SCSI_OPTIONS_WIDE) == 0) || 6907 (fas->f_nowide & (1<<target))) { 6908 fas->f_nowide |= 1<<target; 6909 width = 0; 6910 } 6911 if (fas->f_force_narrow & (1<<target)) { 6912 width = 0; 6913 } 6914 width = min(FAS_XFER_WIDTH, width); 6915 6916 *p++ = (uchar_t)MSG_EXTENDED; 6917 *p++ = (uchar_t)2; 6918 *p++ = (uchar_t)MSG_WIDE_DATA_XFER; 6919 *p++ = (uchar_t)width; 6920 fas->f_omsglen = 4 + msgout_offset; 6921 IPRINTF1("fas_make_wdtr: width=%x\n", width); 6922 6923 /* 6924 * increment wdtr flag, odd value indicates that we initiated 6925 * the negotiation 6926 */ 6927 fas->f_wdtr_sent++; 6928 6929 /* 6930 * the target may reject the optional wide message so 6931 * to avoid negotiating on every cmd, set wide known here 6932 */ 6933 fas->f_wide_known |= 1<<target; 6934 6935 fas_set_wide_conf3(fas, target, width); 6936 } 6937 6938 /* 6939 * auto request sense support 6940 * create or destroy an auto request sense packet 6941 */ 6942 static int 6943 fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap) 6944 { 6945 /* 6946 * Allocate a request sense packet using get_pktiopb 6947 */ 6948 struct fas_cmd *rqpktp; 6949 uchar_t slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun; 6950 struct buf *bp; 6951 struct arq_private_data *arq_data; 6952 6953 /* 6954 * if one exists, don't create another 6955 */ 6956 if (fas->f_arq_pkt[slot] != 0) { 6957 return (0); 6958 } 6959 6960 /* 6961 * it would be nicer if we could allow the target driver 6962 * to specify the size but this is easier and OK for most 6963 * drivers to use SENSE_LENGTH 6964 * Allocate a request sense packet. 6965 */ 6966 bp = scsi_alloc_consistent_buf(ap, (struct buf *)NULL, 6967 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 6968 rqpktp = PKT2CMD(scsi_init_pkt(ap, 6969 NULL, bp, CDB_GROUP0, 1, PKT_PRIV_LEN, 6970 PKT_CONSISTENT, SLEEP_FUNC, NULL)); 6971 arq_data = 6972 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private); 6973 arq_data->arq_save_bp = bp; 6974 6975 RQ_MAKECOM_G0((CMD2PKT(rqpktp)), 6976 FLAG_SENSING | FLAG_HEAD | FLAG_NODISCON, 6977 (char)SCMD_REQUEST_SENSE, 0, (char)SENSE_LENGTH); 6978 rqpktp->cmd_flags |= CFLAG_CMDARQ; 6979 rqpktp->cmd_slot = slot; 6980 rqpktp->cmd_pkt->pkt_ha_private = rqpktp; 6981 fas->f_arq_pkt[slot] = rqpktp; 6982 6983 /* 6984 * we need a function ptr here so abort/reset can 6985 * defer callbacks; fas_call_pkt_comp() calls 6986 * fas_complete_arq_pkt() directly without releasing the lock 6987 * However, since we are not calling back directly thru 6988 * pkt_comp, don't check this with warlock 6989 */ 6990 #ifndef __lock_lint 6991 rqpktp->cmd_pkt->pkt_comp = 6992 (void (*)(struct scsi_pkt *))fas_complete_arq_pkt; 6993 #endif 6994 return (0); 6995 } 6996 6997 static int 6998 fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap) 6999 { 7000 struct fas_cmd *rqpktp; 7001 int slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun; 7002 7003 /* 7004 * if there is still a pkt saved or no rqpkt 7005 * then we cannot deallocate or there is nothing to do 7006 */ 7007 if ((rqpktp = fas->f_arq_pkt[slot]) != NULL) { 7008 struct arq_private_data *arq_data = 7009 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private); 7010 struct buf *bp = arq_data->arq_save_bp; 7011 /* 7012 * is arq pkt in use? 7013 */ 7014 if (arq_data->arq_save_sp) { 7015 return (-1); 7016 } 7017 7018 scsi_destroy_pkt(CMD2PKT(rqpktp)); 7019 scsi_free_consistent_buf(bp); 7020 fas->f_arq_pkt[slot] = 0; 7021 } 7022 return (0); 7023 } 7024 7025 /* 7026 * complete an arq packet by copying over transport info and the actual 7027 * request sense data; called with mutex held from fas_call_pkt_comp() 7028 */ 7029 void 7030 fas_complete_arq_pkt(struct scsi_pkt *pkt) 7031 { 7032 struct fas *fas = ADDR2FAS(&pkt->pkt_address); 7033 struct fas_cmd *sp = pkt->pkt_ha_private; 7034 struct scsi_arq_status *arqstat; 7035 struct arq_private_data *arq_data = 7036 (struct arq_private_data *)sp->cmd_pkt->pkt_private; 7037 struct fas_cmd *ssp = arq_data->arq_save_sp; 7038 struct buf *bp = arq_data->arq_save_bp; 7039 int slot = sp->cmd_slot; 7040 7041 DPRINTF1("completing arq pkt sp=0x%p\n", (void *)sp); 7042 ASSERT(sp == fas->f_arq_pkt[slot]); 7043 ASSERT(arq_data->arq_save_sp != NULL); 7044 ASSERT(ssp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]); 7045 7046 arqstat = (struct scsi_arq_status *)(ssp->cmd_pkt->pkt_scbp); 7047 arqstat->sts_rqpkt_status = *((struct scsi_status *) 7048 (sp->cmd_pkt->pkt_scbp)); 7049 arqstat->sts_rqpkt_reason = sp->cmd_pkt->pkt_reason; 7050 arqstat->sts_rqpkt_state = sp->cmd_pkt->pkt_state; 7051 arqstat->sts_rqpkt_statistics = sp->cmd_pkt->pkt_statistics; 7052 arqstat->sts_rqpkt_resid = sp->cmd_pkt->pkt_resid; 7053 arqstat->sts_sensedata = 7054 *((struct scsi_extended_sense *)bp->b_un.b_addr); 7055 ssp->cmd_pkt->pkt_state |= STATE_ARQ_DONE; 7056 arq_data->arq_save_sp = NULL; 7057 7058 /* 7059 * ASC=0x47 is parity error 7060 */ 7061 if (arqstat->sts_sensedata.es_key == KEY_ABORTED_COMMAND && 7062 arqstat->sts_sensedata.es_add_code == 0x47) { 7063 fas_sync_wide_backoff(fas, sp, slot); 7064 } 7065 7066 fas_call_pkt_comp(fas, ssp); 7067 } 7068 7069 /* 7070 * handle check condition and start an arq packet 7071 */ 7072 static int 7073 fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp) 7074 { 7075 struct fas_cmd *arqsp = fas->f_arq_pkt[sp->cmd_slot]; 7076 struct arq_private_data *arq_data; 7077 struct buf *bp; 7078 7079 if ((arqsp == NULL) || (arqsp == sp) || 7080 (sp->cmd_scblen < sizeof (struct scsi_arq_status))) { 7081 IPRINTF("no arq packet or cannot arq on arq pkt\n"); 7082 fas_call_pkt_comp(fas, sp); 7083 return (0); 7084 } 7085 7086 arq_data = (struct arq_private_data *)arqsp->cmd_pkt->pkt_private; 7087 bp = arq_data->arq_save_bp; 7088 7089 ASSERT(sp->cmd_flags & CFLAG_FINISHED); 7090 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]); 7091 DPRINTF3("start arq for slot=%x, arqsp=0x%p, rqpkt=0x%p\n", 7092 sp->cmd_slot, (void *)arqsp, (void *)fas->f_arq_pkt[sp->cmd_slot]); 7093 if (arq_data->arq_save_sp != NULL) { 7094 IPRINTF("auto request sense already in progress\n"); 7095 goto fail; 7096 } 7097 7098 arq_data->arq_save_sp = sp; 7099 7100 bzero(bp->b_un.b_addr, sizeof (struct scsi_extended_sense)); 7101 7102 /* 7103 * copy the timeout from the original packet by lack of a better 7104 * value 7105 * we could take the residue of the timeout but that could cause 7106 * premature timeouts perhaps 7107 */ 7108 arqsp->cmd_pkt->pkt_time = sp->cmd_pkt->pkt_time; 7109 arqsp->cmd_flags &= ~CFLAG_TRANFLAG; 7110 ASSERT(arqsp->cmd_pkt->pkt_comp != NULL); 7111 7112 /* 7113 * make sure that auto request sense always goes out 7114 * after queue full and after throttle was set to draining 7115 */ 7116 fas_full_throttle(fas, sp->cmd_slot); 7117 (void) fas_accept_pkt(fas, arqsp, NO_TRAN_BUSY); 7118 return (0); 7119 7120 fail: 7121 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0); 7122 fas_log(fas, CE_WARN, "auto request sense failed\n"); 7123 fas_dump_cmd(fas, sp); 7124 fas_call_pkt_comp(fas, sp); 7125 return (-1); 7126 } 7127 7128 7129 /* 7130 * handle qfull condition 7131 */ 7132 static void 7133 fas_handle_qfull(struct fas *fas, struct fas_cmd *sp) 7134 { 7135 int slot = sp->cmd_slot; 7136 7137 if ((++sp->cmd_qfull_retries > fas->f_qfull_retries[Tgt(sp)]) || 7138 (fas->f_qfull_retries[Tgt(sp)] == 0)) { 7139 /* 7140 * We have exhausted the retries on QFULL, or, 7141 * the target driver has indicated that it 7142 * wants to handle QFULL itself by setting 7143 * qfull-retries capability to 0. In either case 7144 * we want the target driver's QFULL handling 7145 * to kick in. We do this by having pkt_reason 7146 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL. 7147 */ 7148 IPRINTF2("%d.%d: status queue full, retries over\n", 7149 Tgt(sp), Lun(sp)); 7150 fas_set_all_lun_throttles(fas, slot, DRAIN_THROTTLE); 7151 fas_call_pkt_comp(fas, sp); 7152 } else { 7153 if (fas->f_reset_delay[Tgt(sp)] == 0) { 7154 fas->f_throttle[slot] = 7155 max((fas->f_tcmds[slot] - 2), 0); 7156 } 7157 IPRINTF3("%d.%d: status queue full, new throttle = %d, " 7158 "retrying\n", Tgt(sp), Lun(sp), fas->f_throttle[slot]); 7159 sp->cmd_pkt->pkt_flags |= FLAG_HEAD; 7160 sp->cmd_flags &= ~CFLAG_TRANFLAG; 7161 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY); 7162 7163 /* 7164 * when target gives queue full status with no commands 7165 * outstanding (f_tcmds[] == 0), throttle is set to 0 7166 * (HOLD_THROTTLE), and the queue full handling starts 7167 * (see psarc/1994/313); if there are commands outstanding, 7168 * the throttle is set to (f_tcmds[] - 2) 7169 */ 7170 if (fas->f_throttle[slot] == HOLD_THROTTLE) { 7171 /* 7172 * By setting throttle to QFULL_THROTTLE, we 7173 * avoid submitting new commands and in 7174 * fas_restart_cmd find out slots which need 7175 * their throttles to be cleared. 7176 */ 7177 fas_set_all_lun_throttles(fas, slot, QFULL_THROTTLE); 7178 if (fas->f_restart_cmd_timeid == 0) { 7179 fas->f_restart_cmd_timeid = 7180 timeout(fas_restart_cmd, fas, 7181 fas->f_qfull_retry_interval[Tgt(sp)]); 7182 } 7183 } 7184 } 7185 } 7186 7187 /* 7188 * invoked from timeout() to restart qfull cmds with throttle == 0 7189 */ 7190 static void 7191 fas_restart_cmd(void *fas_arg) 7192 { 7193 struct fas *fas = fas_arg; 7194 int i; 7195 7196 IPRINTF("fas_restart_cmd:\n"); 7197 7198 mutex_enter(FAS_MUTEX(fas)); 7199 fas->f_restart_cmd_timeid = 0; 7200 7201 for (i = 0; i < N_SLOTS; i += NLUNS_PER_TARGET) { 7202 if (fas->f_reset_delay[i/NLUNS_PER_TARGET] == 0) { 7203 if (fas->f_throttle[i] == QFULL_THROTTLE) { 7204 fas_set_all_lun_throttles(fas, 7205 i, MAX_THROTTLE); 7206 } 7207 } 7208 } 7209 7210 (void) fas_ustart(fas); 7211 mutex_exit(FAS_MUTEX(fas)); 7212 } 7213 7214 /* 7215 * Timeout handling: 7216 * Command watchdog routines 7217 */ 7218 7219 /*ARGSUSED*/ 7220 static void 7221 fas_watch(void *arg) 7222 { 7223 struct fas *fas; 7224 ushort_t props_update = 0; 7225 7226 rw_enter(&fas_global_rwlock, RW_READER); 7227 7228 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) { 7229 7230 mutex_enter(FAS_MUTEX(fas)); 7231 IPRINTF2("ncmds=%x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc); 7232 7233 #ifdef FAS_PIO_COUNTS 7234 if (fas->f_total_cmds) { 7235 int n = fas->f_total_cmds; 7236 7237 fas_log(fas, CE_NOTE, 7238 "total=%d, cmds=%d fas-rd=%d, fas-wrt=%d, dma-rd=%d, dma-wrt=%d\n", 7239 fas->f_total_cmds, 7240 fas->f_reg_cmds/n, 7241 fas->f_reg_reads/n, fas->f_reg_writes/n, 7242 fas->f_reg_dma_reads/n, fas->f_reg_dma_writes/n); 7243 7244 fas->f_reg_reads = fas->f_reg_writes = 7245 fas->f_reg_dma_reads = fas->f_reg_dma_writes = 7246 fas->f_reg_cmds = fas->f_total_cmds = 0; 7247 } 7248 #endif 7249 if (fas->f_ncmds) { 7250 int i; 7251 fas_watchsubr(fas); 7252 7253 /* 7254 * reset throttle. the throttle may have been 7255 * too low if queue full was caused by 7256 * another initiator 7257 * Only reset throttle if no cmd active in slot 0 7258 * (untagged cmd) 7259 */ 7260 #ifdef FAS_TEST 7261 if (fas_enable_untagged) { 7262 fas_test_untagged++; 7263 } 7264 #endif 7265 for (i = 0; i < N_SLOTS; i++) { 7266 if ((fas->f_throttle[i] > HOLD_THROTTLE) && 7267 (fas->f_active[i] && 7268 (fas->f_active[i]->f_slot[0] == NULL))) { 7269 fas_full_throttle(fas, i); 7270 } 7271 } 7272 } 7273 7274 if (fas->f_props_update) { 7275 int i; 7276 /* 7277 * f_mutex will be released and reentered in 7278 * fas_props_update(). 7279 * Hence we save the fas->f_props_update now and 7280 * set to 0 indicating that property has been 7281 * updated. This will avoid a race condition with 7282 * any thread that runs in interrupt context that 7283 * attempts to set the f_props_update to non-zero value 7284 */ 7285 props_update = fas->f_props_update; 7286 fas->f_props_update = 0; 7287 for (i = 0; i < NTARGETS_WIDE; i++) { 7288 if (props_update & (1<<i)) { 7289 fas_update_props(fas, i); 7290 } 7291 } 7292 } 7293 fas_check_waitQ_and_mutex_exit(fas); 7294 7295 } 7296 rw_exit(&fas_global_rwlock); 7297 7298 again: 7299 mutex_enter(&fas_global_mutex); 7300 if (fas_timeout_initted && fas_timeout_id) { 7301 fas_timeout_id = timeout(fas_watch, NULL, fas_tick); 7302 } 7303 mutex_exit(&fas_global_mutex); 7304 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_WATCH_END, "fas_watch_end"); 7305 } 7306 7307 static void 7308 fas_watchsubr(struct fas *fas) 7309 { 7310 short slot; 7311 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot); 7312 struct f_slots *tag_slots; 7313 7314 for (slot = 0; slot < N_SLOTS; slot += d) { 7315 7316 #ifdef FAS_TEST 7317 if (fas_btest) { 7318 fas_btest = 0; 7319 (void) fas_reset_bus(fas); 7320 return; 7321 } 7322 if (fas_force_timeout && fas->f_tcmds[slot]) { 7323 fas_cmd_timeout(fas, slot); 7324 fas_force_timeout = 0; 7325 return; 7326 } 7327 fas_test_reset(fas, slot); 7328 fas_test_abort(fas, slot); 7329 #endif /* FAS_TEST */ 7330 7331 /* 7332 * check tagged cmds first 7333 */ 7334 tag_slots = fas->f_active[slot]; 7335 DPRINTF3( 7336 "fas_watchsubr: slot %x: tcmds=%x, timeout=%x\n", 7337 slot, fas->f_tcmds[slot], tag_slots->f_timeout); 7338 7339 if ((fas->f_tcmds[slot] > 0) && (tag_slots->f_timebase)) { 7340 7341 if (tag_slots->f_timebase <= 7342 fas_scsi_watchdog_tick) { 7343 tag_slots->f_timebase += 7344 fas_scsi_watchdog_tick; 7345 continue; 7346 } 7347 7348 tag_slots->f_timeout -= fas_scsi_watchdog_tick; 7349 7350 if (tag_slots->f_timeout < 0) { 7351 fas_cmd_timeout(fas, slot); 7352 return; 7353 } 7354 if ((tag_slots->f_timeout) <= 7355 fas_scsi_watchdog_tick) { 7356 IPRINTF1("pending timeout on slot=%x\n", 7357 slot); 7358 IPRINTF("draining all queues\n"); 7359 fas_set_throttles(fas, 0, N_SLOTS, 7360 DRAIN_THROTTLE); 7361 } 7362 } 7363 } 7364 } 7365 7366 /* 7367 * timeout recovery 7368 */ 7369 static void 7370 fas_cmd_timeout(struct fas *fas, int slot) 7371 { 7372 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot); 7373 int target, lun, i, n, tag, ncmds; 7374 struct fas_cmd *sp = NULL; 7375 struct fas_cmd *ssp; 7376 7377 ASSERT(fas->f_tcmds[slot]); 7378 7379 #ifdef FAS_TEST 7380 if (fas_test_stop) { 7381 debug_enter("timeout"); 7382 } 7383 #endif 7384 7385 /* 7386 * set throttle back; no more draining necessary 7387 */ 7388 for (i = 0; i < N_SLOTS; i += d) { 7389 if (fas->f_throttle[i] == DRAIN_THROTTLE) { 7390 fas_full_throttle(fas, i); 7391 } 7392 } 7393 7394 if (NOTAG(slot/NLUNS_PER_TARGET)) { 7395 sp = fas->f_active[slot]->f_slot[0]; 7396 } 7397 7398 /* 7399 * if no interrupt pending for next second then the current 7400 * cmd must be stuck; switch slot and sp to current slot and cmd 7401 */ 7402 if (fas->f_current_sp && fas->f_state != STATE_FREE) { 7403 for (i = 0; (i < 10000) && (INTPENDING(fas) == 0); i++) { 7404 drv_usecwait(100); 7405 } 7406 if (INTPENDING(fas) == 0) { 7407 slot = fas->f_current_sp->cmd_slot; 7408 sp = fas->f_current_sp; 7409 } 7410 } 7411 7412 target = slot / NLUNS_PER_TARGET; 7413 lun = slot % NLUNS_PER_TARGET; 7414 7415 /* 7416 * update all outstanding pkts for this slot 7417 */ 7418 n = fas->f_active[slot]->f_n_slots; 7419 for (ncmds = tag = 0; tag < n; tag++) { 7420 ssp = fas->f_active[slot]->f_slot[tag]; 7421 if (ssp && ssp->cmd_pkt->pkt_time) { 7422 fas_set_pkt_reason(fas, ssp, CMD_TIMEOUT, 7423 STAT_TIMEOUT | STAT_ABORTED); 7424 fas_short_dump_cmd(fas, ssp); 7425 ncmds++; 7426 } 7427 } 7428 7429 /* 7430 * no timed-out cmds here? 7431 */ 7432 if (ncmds == 0) { 7433 return; 7434 } 7435 7436 /* 7437 * dump all we know about this timeout 7438 */ 7439 if (sp) { 7440 if (sp->cmd_flags & CFLAG_CMDDISC) { 7441 fas_log(fas, CE_WARN, 7442 "Disconnected command timeout for Target %d.%d", 7443 target, lun); 7444 } else { 7445 ASSERT(sp == fas->f_current_sp); 7446 fas_log(fas, CE_WARN, 7447 "Connected command timeout for Target %d.%d", 7448 target, lun); 7449 /* 7450 * Current command timeout appears to relate often 7451 * to noisy SCSI in synchronous mode. 7452 */ 7453 if (fas->f_state == ACTS_DATA_DONE) { 7454 fas_sync_wide_backoff(fas, sp, slot); 7455 } 7456 } 7457 #ifdef FASDEBUG 7458 fas_printstate(fas, "timeout"); 7459 #endif 7460 } else { 7461 fas_log(fas, CE_WARN, 7462 "Disconnected tagged cmd(s) (%d) timeout for Target %d.%d", 7463 fas->f_tcmds[slot], target, lun); 7464 } 7465 7466 if (fas_abort_cmd(fas, sp, slot) == ACTION_SEARCH) { 7467 (void) fas_istart(fas); 7468 } 7469 } 7470 7471 /* 7472 * fas_sync_wide_backoff() increases sync period and enables slow 7473 * cable mode. 7474 * the second time, we revert back to narrow/async 7475 * we count on a bus reset to disable wide in the target and will 7476 * never renegotiate wide again 7477 */ 7478 static void 7479 fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp, 7480 int slot) 7481 { 7482 char phase; 7483 ushort_t state = fas->f_state; 7484 uchar_t tgt = slot / NLUNS_PER_TARGET; 7485 uint_t tshift = 1 << tgt; 7486 7487 phase = fas_reg_read(fas, &fas->f_reg->fas_stat); 7488 phase &= FAS_PHASE_MASK; 7489 7490 IPRINTF4( 7491 "fas_sync_wide_backoff: target %d: state=%x, phase=%x, sp=0x%p\n", 7492 tgt, state, phase, (void *)sp); 7493 7494 #ifdef FASDEBUG 7495 if (fas_no_sync_wide_backoff) { 7496 return; 7497 } 7498 #endif 7499 7500 /* 7501 * if this not the first time or sync is disabled 7502 * thru scsi_options then disable wide 7503 */ 7504 if ((fas->f_backoff & tshift) || 7505 (fas->f_nosync & tshift)) { 7506 /* 7507 * disable wide for just this target 7508 */ 7509 if ((fas->f_nowide & tshift) == 0) { 7510 fas_log(fas, CE_WARN, 7511 "Target %d disabled wide SCSI mode", tgt); 7512 } 7513 /* 7514 * do not reset the bit in f_nowide because that 7515 * would not force a renegotiation of wide 7516 * and do not change any register value yet because 7517 * we may have reconnects before the renegotiations 7518 */ 7519 fas->f_target_scsi_options[tgt] &= ~SCSI_OPTIONS_WIDE; 7520 } 7521 7522 /* 7523 * reduce xfer rate. if this is the first time, reduce by 7524 * 100%. second time, disable sync and wide. 7525 */ 7526 if (fas->f_offset[tgt] != 0) { 7527 /* 7528 * do not reset the bit in f_nosync because that 7529 * would not force a renegotiation of sync 7530 */ 7531 if (fas->f_backoff & tshift) { 7532 if ((fas->f_nosync & tshift) == 0) { 7533 fas_log(fas, CE_WARN, 7534 "Target %d reverting to async. mode", 7535 tgt); 7536 } 7537 fas->f_target_scsi_options[tgt] &= 7538 ~(SCSI_OPTIONS_SYNC | SCSI_OPTIONS_FAST); 7539 } else { 7540 /* increase period by 100% */ 7541 fas->f_neg_period[tgt] *= 2; 7542 7543 fas_log(fas, CE_WARN, 7544 "Target %d reducing sync. transfer rate", tgt); 7545 } 7546 } 7547 fas->f_backoff |= tshift; 7548 7549 /* 7550 * always enable slow cable mode, if not already enabled 7551 */ 7552 if ((fas->f_fasconf & FAS_CONF_SLOWMODE) == 0) { 7553 fas->f_fasconf |= FAS_CONF_SLOWMODE; 7554 fas_reg_write(fas, &fas->f_reg->fas_conf, fas->f_fasconf); 7555 IPRINTF("Reverting to slow SCSI cable mode\n"); 7556 } 7557 7558 /* 7559 * Force sync renegotiation and update properties 7560 */ 7561 fas_force_renegotiation(fas, tgt); 7562 fas->f_props_update |= (1<<tgt); 7563 } 7564 7565 /* 7566 * handle failed negotiations (either reject or bus free condition) 7567 */ 7568 static void 7569 fas_reset_sync_wide(struct fas *fas) 7570 { 7571 struct fas_cmd *sp = fas->f_current_sp; 7572 int tgt = Tgt(sp); 7573 7574 if (fas->f_wdtr_sent) { 7575 IPRINTF("wide neg message rejected or bus free\n"); 7576 fas->f_nowide |= (1<<tgt); 7577 fas->f_fasconf3[tgt] &= ~FAS_CONF3_WIDE; 7578 fas_reg_write(fas, &fas->f_reg->fas_conf3, 7579 fas->f_fasconf3[tgt]); 7580 /* 7581 * clear offset just in case it goes to 7582 * data phase 7583 */ 7584 fas_reg_write(fas, 7585 (uchar_t *)&fas->f_reg->fas_sync_offset, 0); 7586 } else if (fas->f_sdtr_sent) { 7587 volatile struct fasreg *fasreg = 7588 fas->f_reg; 7589 IPRINTF("sync neg message rejected or bus free\n"); 7590 fas->f_nosync |= (1<<tgt); 7591 fas->f_offset[tgt] = 0; 7592 fas->f_sync_period[tgt] = 0; 7593 fas_reg_write(fas, 7594 (uchar_t *)&fasreg->fas_sync_period, 0); 7595 fas_reg_write(fas, 7596 (uchar_t *)&fasreg->fas_sync_offset, 0); 7597 fas->f_offset[tgt] = 0; 7598 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI; 7599 fas_reg_write(fas, &fasreg->fas_conf3, 7600 fas->f_fasconf3[tgt]); 7601 } 7602 7603 fas_force_renegotiation(fas, tgt); 7604 } 7605 7606 /* 7607 * force wide and sync renegotiation 7608 */ 7609 static void 7610 fas_force_renegotiation(struct fas *fas, int target) 7611 { 7612 ushort_t tshift = 1<<target; 7613 fas->f_sync_known &= ~tshift; 7614 fas->f_sync_enabled &= ~tshift; 7615 fas->f_wide_known &= ~tshift; 7616 fas->f_wide_enabled &= ~tshift; 7617 } 7618 7619 /* 7620 * update conf3 register for wide negotiation 7621 */ 7622 static void 7623 fas_set_wide_conf3(struct fas *fas, int target, int width) 7624 { 7625 ASSERT(width <= 1); 7626 switch (width) { 7627 case 0: 7628 fas->f_fasconf3[target] &= ~FAS_CONF3_WIDE; 7629 break; 7630 case 1: 7631 fas->f_fasconf3[target] |= FAS_CONF3_WIDE; 7632 fas->f_wide_enabled |= (1<<target); 7633 break; 7634 } 7635 7636 fas_reg_write(fas, &fas->f_reg->fas_conf3, fas->f_fasconf3[target]); 7637 fas->f_fasconf3_reg_last = fas->f_fasconf3[target]; 7638 } 7639 7640 /* 7641 * Abort command handling 7642 * 7643 * abort current cmd, either by device reset or immediately with bus reset 7644 * (usually an abort msg doesn't completely solve the problem, therefore 7645 * a device or bus reset is recommended) 7646 */ 7647 static int 7648 fas_abort_curcmd(struct fas *fas) 7649 { 7650 if (fas->f_current_sp) { 7651 return (fas_abort_cmd(fas, fas->f_current_sp, 7652 fas->f_current_sp->cmd_slot)); 7653 } else { 7654 return (fas_reset_bus(fas)); 7655 } 7656 } 7657 7658 static int 7659 fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot) 7660 { 7661 struct scsi_address ap; 7662 7663 ap.a_hba_tran = fas->f_tran; 7664 ap.a_target = slot / NLUNS_PER_TARGET; 7665 ap.a_lun = slot % NLUNS_PER_TARGET; 7666 7667 IPRINTF1("abort cmd 0x%p\n", (void *)sp); 7668 7669 /* 7670 * attempting to abort a connected cmd is usually fruitless, so 7671 * only try disconnected cmds 7672 * a reset is preferable over an abort (see 1161701) 7673 */ 7674 if ((fas->f_current_sp && (fas->f_current_sp->cmd_slot != slot)) || 7675 (fas->f_state == STATE_FREE)) { 7676 IPRINTF2("attempting to reset target %d.%d\n", 7677 ap.a_target, ap.a_lun); 7678 if (fas_do_scsi_reset(&ap, RESET_TARGET)) { 7679 return (ACTION_SEARCH); 7680 } 7681 } 7682 7683 /* 7684 * if the target won't listen, then a retry is useless 7685 * there is also the possibility that the cmd still completed while 7686 * we were trying to reset and the target driver may have done a 7687 * device reset which has blown away this sp. 7688 * well, we've tried, now pull the chain 7689 */ 7690 IPRINTF("aborting all cmds by bus reset\n"); 7691 return (fas_reset_bus(fas)); 7692 } 7693 7694 /* 7695 * fas_do_scsi_abort() assumes that we already have the mutex. 7696 * during the abort, we hold the mutex and prevent callbacks by setting 7697 * completion pointer to NULL. this will also avoid that a target driver 7698 * attempts to do a scsi_abort/reset while we are aborting. 7699 * because the completion pointer is NULL we can still update the 7700 * packet after completion 7701 * the throttle for this slot is cleared either by fas_abort_connected_cmd 7702 * or fas_runpoll which prevents new cmds from starting while aborting 7703 */ 7704 static int 7705 fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 7706 { 7707 struct fas *fas = ADDR2FAS(ap); 7708 struct fas_cmd *sp; 7709 int rval = FALSE; 7710 short slot; 7711 struct fas_cmd *cur_sp = fas->f_current_sp; 7712 void (*cur_savec)(), (*sp_savec)(); 7713 int sp_tagged_flag, abort_msg; 7714 7715 if (pkt) { 7716 sp = PKT2CMD(pkt); 7717 slot = sp->cmd_slot; 7718 ASSERT(slot == ((ap->a_target * NLUNS_PER_TARGET) | ap->a_lun)); 7719 } else { 7720 sp = NULL; 7721 slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun; 7722 } 7723 7724 fas_move_waitQ_to_readyQ(fas); 7725 7726 /* 7727 * If no specific command was passed, all cmds here will be aborted 7728 * If a specific command was passed as an argument (to be aborted) 7729 * only the specified command will be aborted 7730 */ 7731 ASSERT(mutex_owned(FAS_MUTEX(fas))); 7732 IPRINTF4("fas_scsi_abort for slot %x, " 7733 "sp=0x%p, pkt_flags=%x, cur_sp=0x%p\n", 7734 slot, (void *)sp, (sp? sp->cmd_pkt_flags : 0), (void *)cur_sp); 7735 7736 /* 7737 * first check if the cmd is in the ready queue or 7738 * in the active queue 7739 */ 7740 if (sp) { 7741 IPRINTF3("aborting one command 0x%p for %d.%d\n", 7742 (void *)sp, ap->a_target, ap->a_lun); 7743 rval = fas_remove_from_readyQ(fas, sp, slot); 7744 if (rval) { 7745 IPRINTF("aborted one ready cmd\n"); 7746 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED); 7747 fas_decrement_ncmds(fas, sp); 7748 fas_call_pkt_comp(fas, sp); 7749 goto exit; 7750 7751 } else if ((sp != 7752 fas->f_active[slot]->f_slot[sp->cmd_tag[1]])) { 7753 IPRINTF("cmd doesn't exist here\n"); 7754 rval = TRUE; 7755 goto exit; 7756 } 7757 } 7758 7759 /* 7760 * hold off any new commands while attempting to abort 7761 * an active cmd 7762 */ 7763 fas_set_throttles(fas, slot, 1, HOLD_THROTTLE); 7764 7765 if (cur_sp) { 7766 /* 7767 * prevent completion on current cmd 7768 */ 7769 cur_savec = cur_sp->cmd_pkt->pkt_comp; 7770 cur_sp->cmd_pkt->pkt_comp = NULL; 7771 } 7772 7773 if (sp) { 7774 /* 7775 * the cmd exists here. is it connected or disconnected? 7776 * if connected but still selecting then can't abort now. 7777 * prevent completion on this cmd 7778 */ 7779 sp_tagged_flag = (sp->cmd_pkt_flags & FLAG_TAGMASK); 7780 abort_msg = (sp_tagged_flag? MSG_ABORT_TAG : MSG_ABORT); 7781 sp_savec = sp->cmd_pkt->pkt_comp; 7782 sp->cmd_pkt->pkt_comp = NULL; 7783 7784 /* connected but not selecting? */ 7785 if ((sp == cur_sp) && (fas->f_state != STATE_FREE) && 7786 (sp->cmd_pkt->pkt_state)) { 7787 rval = fas_abort_connected_cmd(fas, sp, abort_msg); 7788 } 7789 7790 /* if abort connected cmd failed, try abort disconnected */ 7791 if ((rval == 0) && 7792 (sp->cmd_flags & CFLAG_CMDDISC) && 7793 ((sp->cmd_flags & CFLAG_COMPLETED) == 0)) { 7794 rval = fas_abort_disconnected_cmd(fas, ap, sp, 7795 abort_msg, slot); 7796 } 7797 7798 if (rval) { 7799 sp->cmd_flags |= CFLAG_COMPLETED; 7800 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED); 7801 } 7802 7803 sp->cmd_pkt->pkt_comp = sp_savec; 7804 7805 } else { 7806 IPRINTF2("aborting all commands for %d.%d\n", 7807 ap->a_target, ap->a_lun); 7808 abort_msg = MSG_ABORT; 7809 7810 /* active and not selecting ? */ 7811 if (cur_sp && (fas->f_state != STATE_FREE) && 7812 (cur_sp->cmd_slot == slot) && 7813 cur_sp->cmd_pkt->pkt_state) { 7814 rval = fas_abort_connected_cmd(fas, cur_sp, 7815 abort_msg); 7816 } 7817 if (rval == 0) { 7818 rval = fas_abort_disconnected_cmd(fas, ap, 7819 NULL, abort_msg, slot); 7820 } 7821 } 7822 7823 done: 7824 /* complete the current sp */ 7825 if (cur_sp) { 7826 cur_sp->cmd_pkt->pkt_comp = cur_savec; 7827 if (cur_sp->cmd_flags & CFLAG_COMPLETED) { 7828 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT); 7829 cur_sp->cmd_flags &= ~CFLAG_COMPLETED; 7830 fas_decrement_ncmds(fas, cur_sp); 7831 fas_call_pkt_comp(fas, cur_sp); 7832 } 7833 } 7834 7835 /* complete the sp passed as 2nd arg */ 7836 if (sp && (sp != cur_sp) && (sp->cmd_flags & CFLAG_COMPLETED)) { 7837 sp->cmd_flags &= ~CFLAG_COMPLETED; 7838 fas_remove_cmd(fas, sp, NEW_TIMEOUT); 7839 fas_decrement_ncmds(fas, sp); 7840 fas_call_pkt_comp(fas, sp); 7841 } 7842 7843 /* clean up all cmds for this slot */ 7844 if (rval && (abort_msg == MSG_ABORT)) { 7845 /* 7846 * mark all commands here as aborted 7847 * abort msg has been accepted, now cleanup queues; 7848 */ 7849 fas_mark_packets(fas, slot, CMD_ABORTED, STAT_ABORTED); 7850 fas_flush_tagQ(fas, slot); 7851 fas_flush_readyQ(fas, slot); 7852 } 7853 fas_set_throttles(fas, slot, 1, MAX_THROTTLE); 7854 7855 exit: 7856 if (fas->f_state == STATE_FREE) { 7857 (void) fas_ustart(fas); 7858 } 7859 7860 ASSERT(mutex_owned(FAS_MUTEX(fas))); 7861 7862 #ifdef FASDEBUG 7863 if (rval && fas_test_stop) { 7864 debug_enter("abort succeeded"); 7865 } 7866 #endif 7867 return (rval); 7868 } 7869 7870 /* 7871 * mark all packets with new reason and update statistics 7872 */ 7873 static void 7874 fas_mark_packets(struct fas *fas, int slot, uchar_t reason, uint_t stat) 7875 { 7876 struct fas_cmd *sp = fas->f_readyf[slot]; 7877 7878 while (sp != 0) { 7879 fas_set_pkt_reason(fas, sp, reason, STAT_ABORTED); 7880 sp = sp->cmd_forw; 7881 } 7882 if (fas->f_tcmds[slot]) { 7883 int n = 0; 7884 ushort_t tag; 7885 7886 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) { 7887 if ((sp = fas->f_active[slot]->f_slot[tag]) != 0) { 7888 fas_set_pkt_reason(fas, sp, reason, stat); 7889 n++; 7890 } 7891 } 7892 ASSERT(fas->f_tcmds[slot] == n); 7893 } 7894 } 7895 7896 /* 7897 * set pkt_reason and OR in pkt_statistics flag 7898 */ 7899 static void 7900 fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp, uchar_t reason, 7901 uint_t stat) 7902 { 7903 if (sp) { 7904 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) { 7905 sp->cmd_pkt->pkt_reason = reason; 7906 } 7907 sp->cmd_pkt->pkt_statistics |= stat; 7908 IPRINTF3("sp=0x%p, pkt_reason=%x, pkt_stat=%x\n", 7909 (void *)sp, reason, sp->cmd_pkt->pkt_statistics); 7910 } 7911 } 7912 7913 /* 7914 * delete specified cmd from the ready queue 7915 */ 7916 static int 7917 fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp, int slot) 7918 { 7919 struct fas_cmd *ssp, *psp; 7920 7921 /* 7922 * command has not been started yet and is still in the ready queue 7923 */ 7924 if (sp) { 7925 ASSERT(fas->f_ncmds > 0); 7926 /* 7927 * find packet on the ready queue and remove it 7928 */ 7929 for (psp = NULL, ssp = fas->f_readyf[slot]; ssp != NULL; 7930 psp = ssp, ssp = ssp->cmd_forw) { 7931 if (ssp == sp) { 7932 if (fas->f_readyf[slot] == sp) { 7933 fas->f_readyf[slot] = sp->cmd_forw; 7934 } else { 7935 psp->cmd_forw = sp->cmd_forw; 7936 } 7937 if (fas->f_readyb[slot] == sp) { 7938 fas->f_readyb[slot] = psp; 7939 } 7940 return (TRUE); 7941 } 7942 } 7943 } 7944 return (FALSE); 7945 } 7946 7947 /* 7948 * add cmd to to head of the readyQ 7949 * due to tag allocation failure or preemption we have to return 7950 * this cmd to the readyQ 7951 */ 7952 static void 7953 fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp) 7954 { 7955 /* 7956 * never return a NOINTR pkt to the readyQ 7957 * (fas_runpoll will resubmit) 7958 */ 7959 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) { 7960 struct fas_cmd *dp; 7961 int slot = sp->cmd_slot; 7962 7963 dp = fas->f_readyf[slot]; 7964 fas->f_readyf[slot] = sp; 7965 sp->cmd_forw = dp; 7966 if (fas->f_readyb[slot] == NULL) { 7967 fas->f_readyb[slot] = sp; 7968 } 7969 } 7970 } 7971 7972 /* 7973 * flush cmds in ready queue 7974 */ 7975 static void 7976 fas_flush_readyQ(struct fas *fas, int slot) 7977 { 7978 if (fas->f_readyf[slot]) { 7979 struct fas_cmd *sp, *nsp; 7980 7981 IPRINTF1("flushing ready queue, slot=%x\n", slot); 7982 ASSERT(fas->f_ncmds > 0); 7983 7984 sp = fas->f_readyf[slot]; 7985 fas->f_readyf[slot] = fas->f_readyb[slot] = NULL; 7986 7987 while (sp != 0) { 7988 /* 7989 * save the forward pointer before calling 7990 * the completion routine 7991 */ 7992 nsp = sp->cmd_forw; 7993 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0); 7994 ASSERT(Tgt(sp) == slot/NLUNS_PER_TARGET); 7995 fas_decrement_ncmds(fas, sp); 7996 fas_call_pkt_comp(fas, sp); 7997 sp = nsp; 7998 } 7999 fas_check_ncmds(fas); 8000 } 8001 } 8002 8003 /* 8004 * cleanup the tag queue 8005 * preserve some order by starting with the oldest tag 8006 */ 8007 static void 8008 fas_flush_tagQ(struct fas *fas, int slot) 8009 { 8010 ushort_t tag, starttag; 8011 struct fas_cmd *sp; 8012 struct f_slots *tagque = fas->f_active[slot]; 8013 8014 if (tagque == NULL) { 8015 return; 8016 } 8017 8018 DPRINTF2("flushing entire tag queue, slot=%x, tcmds=%x\n", 8019 slot, fas->f_tcmds[slot]); 8020 8021 #ifdef FASDEBUG 8022 { 8023 int n = 0; 8024 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) { 8025 if ((sp = tagque->f_slot[tag]) != 0) { 8026 n++; 8027 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0); 8028 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) { 8029 if ((sp->cmd_flags & CFLAG_FINISHED) == 8030 0) { 8031 debug_enter("fas_flush_tagQ"); 8032 } 8033 } 8034 } 8035 } 8036 ASSERT(fas->f_tcmds[slot] == n); 8037 } 8038 #endif 8039 tag = starttag = fas->f_active[slot]->f_tags; 8040 8041 do { 8042 if ((sp = tagque->f_slot[tag]) != 0) { 8043 fas_flush_cmd(fas, sp, 0, 0); 8044 } 8045 tag = ((ushort_t)(tag + 1)) % 8046 (ushort_t)fas->f_active[slot]->f_n_slots; 8047 } while (tag != starttag); 8048 8049 ASSERT(fas->f_tcmds[slot] == 0); 8050 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc); 8051 fas_check_ncmds(fas); 8052 } 8053 8054 /* 8055 * cleanup one active command 8056 */ 8057 static void 8058 fas_flush_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t reason, 8059 uint_t stat) 8060 { 8061 short slot = sp->cmd_slot; 8062 8063 ASSERT(fas->f_ncmds > 0); 8064 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0); 8065 ASSERT(sp == fas->f_active[slot]->f_slot[sp->cmd_tag[1]]); 8066 8067 fas_remove_cmd(fas, sp, NEW_TIMEOUT); 8068 fas_decrement_ncmds(fas, sp); 8069 fas_set_pkt_reason(fas, sp, reason, stat); 8070 fas_call_pkt_comp(fas, sp); 8071 8072 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc); 8073 fas_check_ncmds(fas); 8074 } 8075 8076 /* 8077 * prepare a proxy cmd (a cmd sent on behalf of the target driver, 8078 * usually for error recovery or abort/reset) 8079 */ 8080 static void 8081 fas_makeproxy_cmd(struct fas_cmd *sp, struct scsi_address *ap, 8082 struct scsi_pkt *pkt, int nmsgs, ...) 8083 { 8084 va_list vap; 8085 int i; 8086 8087 ASSERT(nmsgs <= (CDB_GROUP5 - CDB_GROUP0 - 3)); 8088 8089 bzero(sp, sizeof (*sp)); 8090 bzero(pkt, scsi_pkt_size()); 8091 8092 pkt->pkt_address = *ap; 8093 pkt->pkt_cdbp = (opaque_t)&sp->cmd_cdb[0]; 8094 pkt->pkt_scbp = (opaque_t)&sp->cmd_scb; 8095 pkt->pkt_ha_private = (opaque_t)sp; 8096 sp->cmd_pkt = pkt; 8097 sp->cmd_scblen = 1; 8098 sp->cmd_pkt_flags = pkt->pkt_flags = FLAG_NOINTR; 8099 sp->cmd_flags = CFLAG_CMDPROXY; 8100 sp->cmd_cdb[FAS_PROXY_TYPE] = FAS_PROXY_SNDMSG; 8101 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE; 8102 sp->cmd_cdb[FAS_PROXY_DATA] = (char)nmsgs; 8103 8104 va_start(vap, nmsgs); 8105 for (i = 0; i < nmsgs; i++) { 8106 sp->cmd_cdb[FAS_PROXY_DATA + 1 + i] = (uchar_t)va_arg(vap, int); 8107 } 8108 va_end(vap); 8109 } 8110 8111 /* 8112 * send a proxy cmd and check the result 8113 */ 8114 static int 8115 fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp, 8116 struct scsi_address *ap, char *what) 8117 { 8118 int rval; 8119 8120 IPRINTF3("Sending proxy %s message to %d.%d\n", what, 8121 ap->a_target, ap->a_lun); 8122 if (fas_accept_pkt(fas, sp, TRAN_BUSY_OK) == TRAN_ACCEPT && 8123 sp->cmd_pkt->pkt_reason == CMD_CMPLT && 8124 sp->cmd_cdb[FAS_PROXY_RESULT] == TRUE) { 8125 IPRINTF3("Proxy %s succeeded for %d.%d\n", what, 8126 ap->a_target, ap->a_lun); 8127 ASSERT(fas->f_current_sp != sp); 8128 rval = TRUE; 8129 } else { 8130 IPRINTF5( 8131 "Proxy %s failed for %d.%d, result=%x, reason=%x\n", what, 8132 ap->a_target, ap->a_lun, sp->cmd_cdb[FAS_PROXY_RESULT], 8133 sp->cmd_pkt->pkt_reason); 8134 ASSERT(fas->f_current_sp != sp); 8135 rval = FALSE; 8136 } 8137 return (rval); 8138 } 8139 8140 /* 8141 * abort a connected command by sending an abort msg; hold off on 8142 * starting new cmds by setting throttles to HOLD_THROTTLE 8143 */ 8144 static int 8145 fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t msg) 8146 { 8147 int rval = FALSE; 8148 int flags = sp->cmd_pkt_flags; 8149 8150 /* 8151 * if reset delay active we cannot access the target. 8152 */ 8153 if (fas->f_reset_delay[Tgt(sp)]) { 8154 return (rval); 8155 } 8156 8157 /* 8158 * only abort while in data phase; otherwise we mess up msg phase 8159 */ 8160 if (!((fas->f_state == ACTS_DATA) || 8161 (fas->f_state == ACTS_DATA_DONE))) { 8162 return (rval); 8163 } 8164 8165 8166 IPRINTF3("Sending abort message %s to connected %d.%d\n", 8167 scsi_mname(msg), Tgt(sp), Lun(sp)); 8168 8169 8170 fas->f_abort_msg_sent = 0; 8171 fas->f_omsglen = 1; 8172 fas->f_cur_msgout[0] = msg; 8173 sp->cmd_pkt_flags |= FLAG_NOINTR; 8174 fas_assert_atn(fas); 8175 8176 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT); 8177 8178 /* 8179 * now check if the msg was taken 8180 * e_abort is set in fas_handle_msg_out_done when the abort 8181 * msg has actually gone out (ie. msg out phase occurred 8182 */ 8183 if (fas->f_abort_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) { 8184 IPRINTF2("target %d.%d aborted\n", 8185 Tgt(sp), Lun(sp)); 8186 rval = TRUE; 8187 } else { 8188 IPRINTF2("target %d.%d did not abort\n", 8189 Tgt(sp), Lun(sp)); 8190 } 8191 sp->cmd_pkt_flags = flags; 8192 fas->f_omsglen = 0; 8193 return (rval); 8194 } 8195 8196 /* 8197 * abort a disconnected command; if it is a tagged command, we need 8198 * to include the tag 8199 */ 8200 static int 8201 fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap, 8202 struct fas_cmd *sp, uchar_t msg, int slot) 8203 { 8204 auto struct fas_cmd local; 8205 struct fas_cmd *proxy_cmdp = &local; 8206 struct scsi_pkt *pkt; 8207 int rval; 8208 int target = ap->a_target; 8209 8210 /* 8211 * if reset delay is active, we cannot start a selection 8212 * and there shouldn't be a cmd outstanding 8213 */ 8214 if (fas->f_reset_delay[target] != 0) { 8215 return (FALSE); 8216 } 8217 8218 if (sp) 8219 ASSERT(sp->cmd_slot == slot); 8220 8221 IPRINTF1("aborting disconnected tagged cmd(s) with %s\n", 8222 scsi_mname(msg)); 8223 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP); 8224 if (sp && (TAGGED(target) && (msg == MSG_ABORT_TAG))) { 8225 int tag = sp->cmd_tag[1]; 8226 ASSERT(sp == fas->f_active[slot]->f_slot[tag]); 8227 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 3, 8228 MSG_SIMPLE_QTAG, tag, msg); 8229 } else { 8230 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 1, msg); 8231 } 8232 8233 rval = fas_do_proxy_cmd(fas, proxy_cmdp, ap, scsi_mname(msg)); 8234 kmem_free(pkt, scsi_pkt_size()); 8235 return (rval); 8236 } 8237 8238 /* 8239 * reset handling: 8240 * fas_do_scsi_reset assumes that we have already entered the mutex 8241 */ 8242 static int 8243 fas_do_scsi_reset(struct scsi_address *ap, int level) 8244 { 8245 int rval = FALSE; 8246 struct fas *fas = ADDR2FAS(ap); 8247 short slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun; 8248 8249 ASSERT(mutex_owned(FAS_MUTEX(fas))); 8250 IPRINTF3("fas_scsi_reset for slot %x, level=%x, tcmds=%x\n", 8251 slot, level, fas->f_tcmds[slot]); 8252 8253 fas_move_waitQ_to_readyQ(fas); 8254 8255 if (level == RESET_ALL) { 8256 /* 8257 * We know that fas_reset_bus() returns ACTION_RETURN. 8258 */ 8259 (void) fas_reset_bus(fas); 8260 8261 /* 8262 * Now call fas_dopoll() to field the reset interrupt 8263 * which will then call fas_reset_recovery which will 8264 * call the completion function for all commands. 8265 */ 8266 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) { 8267 /* 8268 * reset fas 8269 */ 8270 fas_internal_reset(fas, FAS_RESET_FAS); 8271 (void) fas_reset_bus(fas); 8272 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) { 8273 fas_log(fas, 8274 CE_WARN, "reset scsi bus failed"); 8275 New_state(fas, STATE_FREE); 8276 } else { 8277 rval = TRUE; 8278 } 8279 } else { 8280 rval = TRUE; 8281 } 8282 8283 } else { 8284 struct fas_cmd *cur_sp = fas->f_current_sp; 8285 void (*savec)() = NULL; 8286 8287 /* 8288 * prevent new commands from starting 8289 */ 8290 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE); 8291 8292 /* 8293 * zero pkt_comp so it won't complete during the reset and 8294 * we can still update the packet after the reset. 8295 */ 8296 if (cur_sp) { 8297 savec = cur_sp->cmd_pkt->pkt_comp; 8298 cur_sp->cmd_pkt->pkt_comp = NULL; 8299 } 8300 8301 /* 8302 * is this a connected cmd but not selecting? 8303 */ 8304 if (cur_sp && (fas->f_state != STATE_FREE) && 8305 (cur_sp->cmd_pkt->pkt_state != 0) && 8306 (ap->a_target == (Tgt(cur_sp)))) { 8307 rval = fas_reset_connected_cmd(fas, ap); 8308 } 8309 8310 /* 8311 * if not connected or fas_reset_connected_cmd() failed, 8312 * attempt a reset_disconnected_cmd 8313 */ 8314 if (rval == FALSE) { 8315 rval = fas_reset_disconnected_cmd(fas, ap); 8316 } 8317 8318 /* 8319 * cleanup if reset was successful 8320 * complete the current sp first. 8321 */ 8322 if (cur_sp) { 8323 cur_sp->cmd_pkt->pkt_comp = savec; 8324 if (cur_sp->cmd_flags & CFLAG_COMPLETED) { 8325 if (ap->a_target == (Tgt(cur_sp))) { 8326 fas_set_pkt_reason(fas, cur_sp, 8327 CMD_RESET, STAT_DEV_RESET); 8328 } 8329 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT); 8330 cur_sp->cmd_flags &= ~CFLAG_COMPLETED; 8331 fas_decrement_ncmds(fas, cur_sp); 8332 fas_call_pkt_comp(fas, cur_sp); 8333 } 8334 } 8335 8336 if (rval == TRUE) { 8337 fas_reset_cleanup(fas, slot); 8338 } else { 8339 IPRINTF1("fas_scsi_reset failed for slot %x\n", slot); 8340 8341 /* 8342 * restore throttles to max throttle, regardless 8343 * of what it was (fas_set_throttles() will deal 8344 * with reset delay active) 8345 * restoring to the old throttle is not 8346 * a such a good idea 8347 */ 8348 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE); 8349 8350 } 8351 8352 if (fas->f_state == STATE_FREE) { 8353 (void) fas_ustart(fas); 8354 } 8355 } 8356 exit: 8357 ASSERT(mutex_owned(FAS_MUTEX(fas))); 8358 ASSERT(fas->f_ncmds >= fas->f_ndisc); 8359 8360 #ifdef FASDEBUG 8361 if (rval && fas_test_stop) { 8362 debug_enter("reset succeeded"); 8363 } 8364 #endif 8365 return (rval); 8366 } 8367 8368 /* 8369 * reset delay is handled by a separate watchdog; this ensures that 8370 * regardless of fas_scsi_watchdog_tick, the reset delay will not change 8371 */ 8372 static void 8373 fas_start_watch_reset_delay(struct fas *fas) 8374 { 8375 mutex_enter(&fas_global_mutex); 8376 if ((fas_reset_watch == 0) && FAS_CAN_SCHED) { 8377 fas_reset_watch = timeout(fas_watch_reset_delay, NULL, 8378 drv_usectohz((clock_t)FAS_WATCH_RESET_DELAY_TICK * 1000)); 8379 } 8380 ASSERT((fas_reset_watch != 0) || (fas->f_flags & FAS_FLG_NOTIMEOUTS)); 8381 mutex_exit(&fas_global_mutex); 8382 } 8383 8384 /* 8385 * set throttles to HOLD and set reset_delay for all target/luns 8386 */ 8387 static void 8388 fas_setup_reset_delay(struct fas *fas) 8389 { 8390 if (!ddi_in_panic()) { 8391 int i; 8392 8393 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE); 8394 for (i = 0; i < NTARGETS_WIDE; i++) { 8395 fas->f_reset_delay[i] = fas->f_scsi_reset_delay; 8396 } 8397 fas_start_watch_reset_delay(fas); 8398 } else { 8399 drv_usecwait(fas->f_scsi_reset_delay * 1000); 8400 } 8401 } 8402 8403 /* 8404 * fas_watch_reset_delay(_subr) is invoked by timeout() and checks every 8405 * fas instance for active reset delays 8406 */ 8407 /*ARGSUSED*/ 8408 static void 8409 fas_watch_reset_delay(void *arg) 8410 { 8411 struct fas *fas; 8412 struct fas *lfas; /* last not_done fas */ 8413 int not_done = 0; 8414 8415 mutex_enter(&fas_global_mutex); 8416 fas_reset_watch = 0; 8417 mutex_exit(&fas_global_mutex); 8418 8419 rw_enter(&fas_global_rwlock, RW_READER); 8420 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) { 8421 if (fas->f_tran == 0) { 8422 continue; 8423 } 8424 mutex_enter(FAS_MUTEX(fas)); 8425 not_done += fas_watch_reset_delay_subr(fas); 8426 lfas = fas; 8427 fas_check_waitQ_and_mutex_exit(fas); 8428 } 8429 rw_exit(&fas_global_rwlock); 8430 if (not_done) { 8431 ASSERT(lfas != NULL); 8432 fas_start_watch_reset_delay(lfas); 8433 } 8434 } 8435 8436 static int 8437 fas_watch_reset_delay_subr(struct fas *fas) 8438 { 8439 short slot, s; 8440 int start_slot = -1; 8441 int done = 0; 8442 8443 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) { 8444 8445 /* 8446 * check if a reset delay is active; if so back to full throttle 8447 * which will unleash the cmds in the ready Q 8448 */ 8449 s = slot/NLUNS_PER_TARGET; 8450 if (fas->f_reset_delay[s] != 0) { 8451 EPRINTF2("target%d: reset delay=%d\n", s, 8452 fas->f_reset_delay[s]); 8453 fas->f_reset_delay[s] -= FAS_WATCH_RESET_DELAY_TICK; 8454 if (fas->f_reset_delay[s] <= 0) { 8455 /* 8456 * clear throttle for all luns on this target 8457 */ 8458 fas->f_reset_delay[s] = 0; 8459 fas_set_all_lun_throttles(fas, 8460 slot, MAX_THROTTLE); 8461 IPRINTF1("reset delay completed, slot=%x\n", 8462 slot); 8463 if (start_slot == -1) { 8464 start_slot = slot; 8465 } 8466 } else { 8467 done = -1; 8468 } 8469 } 8470 } 8471 8472 /* 8473 * start a cmd if a reset delay expired 8474 */ 8475 if (start_slot != -1 && fas->f_state == STATE_FREE) { 8476 (void) fas_ustart(fas); 8477 } 8478 return (done); 8479 } 8480 8481 /* 8482 * cleanup after a device reset. this affects all target's luns 8483 */ 8484 static void 8485 fas_reset_cleanup(struct fas *fas, int slot) 8486 { 8487 /* 8488 * reset msg has been accepted, now cleanup queues; 8489 * for all luns of this target 8490 */ 8491 int i, start, end; 8492 int target = slot/NLUNS_PER_TARGET; 8493 8494 start = slot & ~(NLUNS_PER_TARGET-1); 8495 end = start + NLUNS_PER_TARGET; 8496 IPRINTF4("fas_reset_cleanup: slot %x, start=%x, end=%x, tcmds=%x\n", 8497 slot, start, end, fas->f_tcmds[slot]); 8498 8499 ASSERT(!(fas->f_current_sp && 8500 (fas->f_current_sp->cmd_slot == slot) && 8501 (fas->f_state & STATE_SELECTING))); 8502 8503 /* 8504 * if we are not in panic set up a reset delay for this target, 8505 * a zero throttle forces all new requests into the ready Q 8506 */ 8507 if (!ddi_in_panic()) { 8508 fas_set_all_lun_throttles(fas, start, HOLD_THROTTLE); 8509 fas->f_reset_delay[target] = fas->f_scsi_reset_delay; 8510 fas_start_watch_reset_delay(fas); 8511 } else { 8512 drv_usecwait(fas->f_scsi_reset_delay * 1000); 8513 } 8514 8515 for (i = start; i < end; i++) { 8516 fas_mark_packets(fas, i, CMD_RESET, STAT_DEV_RESET); 8517 fas_flush_tagQ(fas, i); 8518 fas_flush_readyQ(fas, i); 8519 if (fas->f_arq_pkt[i]) { 8520 struct fas_cmd *sp = fas->f_arq_pkt[i]; 8521 struct arq_private_data *arq_data = 8522 (struct arq_private_data *)(sp->cmd_pkt->pkt_private); 8523 if (sp->cmd_pkt->pkt_comp) { 8524 ASSERT(arq_data->arq_save_sp == NULL); 8525 } 8526 } 8527 ASSERT(fas->f_tcmds[i] == 0); 8528 } 8529 ASSERT(fas->f_ncmds >= fas->f_ndisc); 8530 8531 fas_force_renegotiation(fas, target); 8532 } 8533 8534 /* 8535 * reset a currently disconnected target 8536 */ 8537 static int 8538 fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap) 8539 { 8540 auto struct fas_cmd local; 8541 struct fas_cmd *sp = &local; 8542 struct scsi_pkt *pkt; 8543 int rval; 8544 8545 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP); 8546 fas_makeproxy_cmd(sp, ap, pkt, 1, MSG_DEVICE_RESET); 8547 rval = fas_do_proxy_cmd(fas, sp, ap, scsi_mname(MSG_DEVICE_RESET)); 8548 kmem_free(pkt, scsi_pkt_size()); 8549 return (rval); 8550 } 8551 8552 /* 8553 * reset a target with a currently connected command 8554 * Assert ATN and send MSG_DEVICE_RESET, zero throttles temporarily 8555 * to prevent new cmds from starting regardless of the outcome 8556 */ 8557 static int 8558 fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap) 8559 { 8560 int rval = FALSE; 8561 struct fas_cmd *sp = fas->f_current_sp; 8562 int flags = sp->cmd_pkt_flags; 8563 8564 /* 8565 * only attempt to reset in data phase; during other phases 8566 * asserting ATN may just cause confusion 8567 */ 8568 if (!((fas->f_state == ACTS_DATA) || 8569 (fas->f_state == ACTS_DATA_DONE))) { 8570 return (rval); 8571 } 8572 8573 IPRINTF2("Sending reset message to connected %d.%d\n", 8574 ap->a_target, ap->a_lun); 8575 fas->f_reset_msg_sent = 0; 8576 fas->f_omsglen = 1; 8577 fas->f_cur_msgout[0] = MSG_DEVICE_RESET; 8578 sp->cmd_pkt_flags |= FLAG_NOINTR; 8579 8580 fas_assert_atn(fas); 8581 8582 /* 8583 * poll for interrupts until bus free 8584 */ 8585 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT); 8586 8587 /* 8588 * now check if the msg was taken 8589 * f_reset is set in fas_handle_msg_out_done when 8590 * msg has actually gone out (ie. msg out phase occurred) 8591 */ 8592 if (fas->f_reset_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) { 8593 IPRINTF2("target %d.%d reset\n", ap->a_target, ap->a_lun); 8594 rval = TRUE; 8595 } else { 8596 IPRINTF2("target %d.%d did not reset\n", 8597 ap->a_target, ap->a_lun); 8598 } 8599 sp->cmd_pkt_flags = flags; 8600 fas->f_omsglen = 0; 8601 8602 return (rval); 8603 } 8604 8605 /* 8606 * reset the scsi bus to blow all commands away 8607 */ 8608 static int 8609 fas_reset_bus(struct fas *fas) 8610 { 8611 IPRINTF("fas_reset_bus:\n"); 8612 New_state(fas, ACTS_RESET); 8613 8614 fas_internal_reset(fas, FAS_RESET_SCSIBUS); 8615 8616 /* 8617 * Now that we've reset the SCSI bus, we'll take a SCSI RESET 8618 * interrupt and use that to clean up the state of things. 8619 */ 8620 return (ACTION_RETURN); 8621 } 8622 8623 /* 8624 * fas_reset_recovery is called on the reset interrupt and cleans 8625 * up all cmds (active or waiting) 8626 */ 8627 static int 8628 fas_reset_recovery(struct fas *fas) 8629 { 8630 short slot, start_slot; 8631 int i; 8632 int rval = ACTION_SEARCH; 8633 int max_loop = 0; 8634 8635 IPRINTF("fas_reset_recovery:\n"); 8636 fas_check_ncmds(fas); 8637 8638 /* 8639 * renegotiate wide and sync for all targets 8640 */ 8641 fas->f_sync_known = fas->f_wide_known = 0; 8642 8643 /* 8644 * reset dma engine 8645 */ 8646 FAS_FLUSH_DMA_HARD(fas); 8647 8648 /* 8649 * set throttles and reset delay 8650 */ 8651 fas_setup_reset_delay(fas); 8652 8653 /* 8654 * clear interrupts until they go away 8655 */ 8656 while (INTPENDING(fas) && (max_loop < FAS_RESET_SPIN_MAX_LOOP)) { 8657 volatile struct fasreg *fasreg = fas->f_reg; 8658 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat); 8659 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2); 8660 fas->f_step = fas_reg_read(fas, &fasreg->fas_step); 8661 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr); 8662 drv_usecwait(FAS_RESET_SPIN_DELAY_USEC); 8663 max_loop++; 8664 } 8665 8666 if (max_loop >= FAS_RESET_SPIN_MAX_LOOP) { 8667 fas_log(fas, CE_WARN, "Resetting SCSI bus failed"); 8668 } 8669 8670 fas_reg_cmd_write(fas, CMD_FLUSH); 8671 8672 /* 8673 * reset the chip, this shouldn't be necessary but sometimes 8674 * we get a hang in the next data in phase 8675 */ 8676 fas_internal_reset(fas, FAS_RESET_FAS); 8677 8678 /* 8679 * reset was expected? if not, it must be external bus reset 8680 */ 8681 if (fas->f_state != ACTS_RESET) { 8682 if (fas->f_ncmds) { 8683 fas_log(fas, CE_WARN, "external SCSI bus reset"); 8684 } 8685 } 8686 8687 if (fas->f_ncmds == 0) { 8688 rval = ACTION_RETURN; 8689 goto done; 8690 } 8691 8692 /* 8693 * completely reset the state of the softc data. 8694 */ 8695 fas_internal_reset(fas, FAS_RESET_SOFTC); 8696 8697 /* 8698 * Hold the state of the host adapter open 8699 */ 8700 New_state(fas, ACTS_FROZEN); 8701 8702 /* 8703 * for right now just claim that all 8704 * commands have been destroyed by a SCSI reset 8705 * and let already set reason fields or callers 8706 * decide otherwise for specific commands. 8707 */ 8708 start_slot = fas->f_next_slot; 8709 slot = start_slot; 8710 do { 8711 fas_check_ncmds(fas); 8712 fas_mark_packets(fas, slot, CMD_RESET, STAT_BUS_RESET); 8713 fas_flush_tagQ(fas, slot); 8714 fas_flush_readyQ(fas, slot); 8715 if (fas->f_arq_pkt[slot]) { 8716 struct fas_cmd *sp = fas->f_arq_pkt[slot]; 8717 struct arq_private_data *arq_data = 8718 (struct arq_private_data *)(sp->cmd_pkt->pkt_private); 8719 if (sp->cmd_pkt->pkt_comp) { 8720 ASSERT(arq_data->arq_save_sp == NULL); 8721 } 8722 } 8723 slot = NEXTSLOT(slot, fas->f_dslot); 8724 } while (slot != start_slot); 8725 8726 fas_check_ncmds(fas); 8727 8728 /* 8729 * reset timeouts 8730 */ 8731 for (i = 0; i < N_SLOTS; i++) { 8732 if (fas->f_active[i]) { 8733 fas->f_active[i]->f_timebase = 0; 8734 fas->f_active[i]->f_timeout = 0; 8735 fas->f_active[i]->f_dups = 0; 8736 } 8737 } 8738 8739 done: 8740 /* 8741 * Move the state back to free... 8742 */ 8743 New_state(fas, STATE_FREE); 8744 ASSERT(fas->f_ncmds >= fas->f_ndisc); 8745 8746 /* 8747 * perform the reset notification callbacks that are registered. 8748 */ 8749 (void) scsi_hba_reset_notify_callback(&fas->f_mutex, 8750 &fas->f_reset_notify_listf); 8751 8752 /* 8753 * if reset delay is still active a search is meaningless 8754 * but do it anyway 8755 */ 8756 return (rval); 8757 } 8758 8759 /* 8760 * hba_tran ops for quiesce and unquiesce 8761 */ 8762 static int 8763 fas_scsi_quiesce(dev_info_t *dip) 8764 { 8765 struct fas *fas; 8766 scsi_hba_tran_t *tran; 8767 8768 tran = ddi_get_driver_private(dip); 8769 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) { 8770 return (-1); 8771 } 8772 8773 return (fas_quiesce_bus(fas)); 8774 } 8775 8776 static int 8777 fas_scsi_unquiesce(dev_info_t *dip) 8778 { 8779 struct fas *fas; 8780 scsi_hba_tran_t *tran; 8781 8782 tran = ddi_get_driver_private(dip); 8783 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) { 8784 return (-1); 8785 } 8786 8787 return (fas_unquiesce_bus(fas)); 8788 } 8789 8790 #ifdef FAS_TEST 8791 /* 8792 * torture test functions 8793 */ 8794 static void 8795 fas_test_reset(struct fas *fas, int slot) 8796 { 8797 struct scsi_address ap; 8798 char target = slot/NLUNS_PER_TARGET; 8799 8800 if (fas_rtest & (1 << target)) { 8801 ap.a_hba_tran = fas->f_tran; 8802 ap.a_target = target; 8803 ap.a_lun = 0; 8804 if ((fas_rtest_type == 1) && 8805 (fas->f_state == ACTS_DATA_DONE)) { 8806 if (fas_do_scsi_reset(&ap, RESET_TARGET)) { 8807 fas_rtest = 0; 8808 } 8809 } else if ((fas_rtest_type == 2) && 8810 (fas->f_state == ACTS_DATA_DONE)) { 8811 if (fas_do_scsi_reset(&ap, RESET_ALL)) { 8812 fas_rtest = 0; 8813 } 8814 } else { 8815 if (fas_do_scsi_reset(&ap, RESET_TARGET)) { 8816 fas_rtest = 0; 8817 } 8818 } 8819 } 8820 } 8821 8822 static void 8823 fas_test_abort(struct fas *fas, int slot) 8824 { 8825 struct fas_cmd *sp = fas->f_current_sp; 8826 struct scsi_address ap; 8827 char target = slot/NLUNS_PER_TARGET; 8828 struct scsi_pkt *pkt = NULL; 8829 8830 if (fas_atest & (1 << target)) { 8831 ap.a_hba_tran = fas->f_tran; 8832 ap.a_target = target; 8833 ap.a_lun = 0; 8834 8835 if ((fas_atest_disc == 0) && sp && 8836 (sp->cmd_slot == slot) && 8837 ((sp->cmd_flags & CFLAG_CMDDISC) == 0)) { 8838 pkt = sp->cmd_pkt; 8839 } else if ((fas_atest_disc == 1) && NOTAG(target)) { 8840 sp = fas->f_active[slot]->f_slot[0]; 8841 if (sp && (sp->cmd_flags & CFLAG_CMDDISC)) { 8842 pkt = sp->cmd_pkt; 8843 } 8844 } else if ((fas_atest_disc == 1) && (sp == 0) && 8845 TAGGED(target) && 8846 (fas->f_tcmds[slot] != 0)) { 8847 int tag; 8848 /* 8849 * find the oldest tag 8850 */ 8851 for (tag = NTAGS-1; tag >= 0; tag--) { 8852 if ((sp = fas->f_active[slot]->f_slot[tag]) != 0) 8853 break; 8854 } 8855 if (sp) { 8856 pkt = sp->cmd_pkt; 8857 ASSERT(sp->cmd_slot == slot); 8858 } else { 8859 return; 8860 } 8861 } else if (fas_atest_disc == 2 && (sp == 0) && 8862 (fas->f_tcmds[slot] != 0)) { 8863 pkt = NULL; 8864 } else if (fas_atest_disc == 2 && NOTAG(target)) { 8865 pkt = NULL; 8866 } else if (fas_atest_disc == 3 && fas->f_readyf[slot]) { 8867 pkt = fas->f_readyf[slot]->cmd_pkt; 8868 } else if (fas_atest_disc == 4 && 8869 fas->f_readyf[slot] && fas->f_readyf[slot]->cmd_forw) { 8870 pkt = fas->f_readyf[slot]->cmd_forw->cmd_pkt; 8871 } else if (fas_atest_disc == 5 && fas->f_readyb[slot]) { 8872 pkt = fas->f_readyb[slot]->cmd_pkt; 8873 } else if ((fas_atest_disc == 6) && sp && 8874 (sp->cmd_slot == slot) && 8875 (fas->f_state == ACTS_DATA_DONE)) { 8876 pkt = sp->cmd_pkt; 8877 } else if (fas_atest_disc == 7) { 8878 if (fas_do_scsi_abort(&ap, NULL)) { 8879 if (fas_do_scsi_abort(&ap, NULL)) { 8880 if (fas_do_scsi_reset(&ap, 8881 RESET_TARGET)) { 8882 fas_atest = 0; 8883 } 8884 } 8885 } 8886 return; 8887 } else { 8888 return; 8889 } 8890 8891 fas_log(fas, CE_NOTE, "aborting pkt=0x%p state=%x\n", 8892 (void *)pkt, (pkt != NULL? pkt->pkt_state : 0)); 8893 if (fas_do_scsi_abort(&ap, pkt)) { 8894 fas_atest = 0; 8895 } 8896 } 8897 } 8898 #endif /* FAS_TEST */ 8899 8900 /* 8901 * capability interface 8902 */ 8903 static int 8904 fas_commoncap(struct scsi_address *ap, char *cap, int val, 8905 int tgtonly, int doset) 8906 { 8907 struct fas *fas = ADDR2FAS(ap); 8908 int cidx; 8909 int target = ap->a_target; 8910 ushort_t tshift = (1<<target); 8911 ushort_t ntshift = ~tshift; 8912 int rval = FALSE; 8913 8914 mutex_enter(FAS_MUTEX(fas)); 8915 8916 if (cap == (char *)0) { 8917 goto exit; 8918 } 8919 8920 cidx = scsi_hba_lookup_capstr(cap); 8921 if (cidx == -1) { 8922 rval = UNDEFINED; 8923 } else if (doset) { 8924 /* 8925 * we usually don't allow setting capabilities for 8926 * other targets! 8927 */ 8928 if (!tgtonly) { 8929 goto exit; 8930 } 8931 switch (cidx) { 8932 case SCSI_CAP_DMA_MAX: 8933 case SCSI_CAP_MSG_OUT: 8934 case SCSI_CAP_PARITY: 8935 case SCSI_CAP_INITIATOR_ID: 8936 case SCSI_CAP_LINKED_CMDS: 8937 case SCSI_CAP_UNTAGGED_QING: 8938 case SCSI_CAP_RESET_NOTIFICATION: 8939 /* 8940 * None of these are settable via 8941 * the capability interface. 8942 */ 8943 break; 8944 8945 case SCSI_CAP_DISCONNECT: 8946 if (val) 8947 fas->f_target_scsi_options[ap->a_target] |= 8948 SCSI_OPTIONS_DR; 8949 else 8950 fas->f_target_scsi_options[ap->a_target] &= 8951 ~SCSI_OPTIONS_DR; 8952 8953 break; 8954 8955 case SCSI_CAP_SYNCHRONOUS: 8956 if (val) { 8957 fas->f_force_async &= ~tshift; 8958 } else { 8959 fas->f_force_async |= tshift; 8960 } 8961 fas_force_renegotiation(fas, target); 8962 rval = TRUE; 8963 break; 8964 8965 case SCSI_CAP_TAGGED_QING: 8966 { 8967 int slot = target * NLUNS_PER_TARGET | ap->a_lun; 8968 ushort_t old_notag = fas->f_notag; 8969 8970 /* do not allow with active tgt */ 8971 if (fas->f_tcmds[slot]) { 8972 break; 8973 } 8974 8975 slot = target * NLUNS_PER_TARGET | ap->a_lun; 8976 8977 if (val) { 8978 if (fas->f_target_scsi_options[target] & 8979 SCSI_OPTIONS_TAG) { 8980 IPRINTF1("target %d: TQ enabled\n", 8981 target); 8982 fas->f_notag &= ntshift; 8983 } else { 8984 break; 8985 } 8986 } else { 8987 IPRINTF1("target %d: TQ disabled\n", 8988 target); 8989 fas->f_notag |= tshift; 8990 } 8991 8992 if (val && fas_alloc_active_slots(fas, slot, 8993 KM_NOSLEEP)) { 8994 fas->f_notag = old_notag; 8995 break; 8996 } 8997 8998 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE); 8999 9000 fas_update_props(fas, target); 9001 rval = TRUE; 9002 break; 9003 } 9004 9005 case SCSI_CAP_WIDE_XFER: 9006 if (val) { 9007 if (fas->f_target_scsi_options[target] & 9008 SCSI_OPTIONS_WIDE) { 9009 fas->f_nowide &= ntshift; 9010 fas->f_force_narrow &= ~tshift; 9011 } else { 9012 break; 9013 } 9014 } else { 9015 fas->f_force_narrow |= tshift; 9016 } 9017 fas_force_renegotiation(fas, target); 9018 rval = TRUE; 9019 break; 9020 9021 case SCSI_CAP_ARQ: 9022 if (val) { 9023 if (fas_create_arq_pkt(fas, ap)) { 9024 break; 9025 } 9026 } else { 9027 if (fas_delete_arq_pkt(fas, ap)) { 9028 break; 9029 } 9030 } 9031 rval = TRUE; 9032 break; 9033 9034 case SCSI_CAP_QFULL_RETRIES: 9035 fas->f_qfull_retries[target] = (uchar_t)val; 9036 rval = TRUE; 9037 break; 9038 9039 case SCSI_CAP_QFULL_RETRY_INTERVAL: 9040 fas->f_qfull_retry_interval[target] = 9041 drv_usectohz(val * 1000); 9042 rval = TRUE; 9043 break; 9044 9045 default: 9046 rval = UNDEFINED; 9047 break; 9048 } 9049 9050 } else if (doset == 0) { 9051 int slot = target * NLUNS_PER_TARGET | ap->a_lun; 9052 9053 switch (cidx) { 9054 case SCSI_CAP_DMA_MAX: 9055 /* very high limit because of multiple dma windows */ 9056 rval = 1<<30; 9057 break; 9058 case SCSI_CAP_MSG_OUT: 9059 rval = TRUE; 9060 break; 9061 case SCSI_CAP_DISCONNECT: 9062 if (tgtonly && 9063 (fas->f_target_scsi_options[target] & 9064 SCSI_OPTIONS_DR)) { 9065 rval = TRUE; 9066 } 9067 break; 9068 case SCSI_CAP_SYNCHRONOUS: 9069 if (tgtonly && fas->f_offset[target]) { 9070 rval = TRUE; 9071 } 9072 break; 9073 case SCSI_CAP_PARITY: 9074 rval = TRUE; 9075 break; 9076 case SCSI_CAP_INITIATOR_ID: 9077 rval = MY_ID(fas); 9078 break; 9079 case SCSI_CAP_TAGGED_QING: 9080 if (tgtonly && ((fas->f_notag & tshift) == 0)) { 9081 rval = TRUE; 9082 } 9083 break; 9084 case SCSI_CAP_WIDE_XFER: 9085 if ((tgtonly && (fas->f_nowide & tshift) == 0)) { 9086 rval = TRUE; 9087 } 9088 break; 9089 case SCSI_CAP_UNTAGGED_QING: 9090 rval = TRUE; 9091 break; 9092 case SCSI_CAP_ARQ: 9093 if (tgtonly && fas->f_arq_pkt[slot]) { 9094 rval = TRUE; 9095 } 9096 break; 9097 case SCSI_CAP_LINKED_CMDS: 9098 break; 9099 case SCSI_CAP_RESET_NOTIFICATION: 9100 rval = TRUE; 9101 break; 9102 case SCSI_CAP_QFULL_RETRIES: 9103 rval = fas->f_qfull_retries[target]; 9104 break; 9105 case SCSI_CAP_QFULL_RETRY_INTERVAL: 9106 rval = drv_hztousec( 9107 fas->f_qfull_retry_interval[target]) / 9108 1000; 9109 break; 9110 9111 default: 9112 rval = UNDEFINED; 9113 break; 9114 } 9115 } 9116 exit: 9117 if (val && tgtonly) { 9118 fas_update_props(fas, target); 9119 } 9120 fas_check_waitQ_and_mutex_exit(fas); 9121 9122 if (doset) { 9123 IPRINTF6( 9124 "fas_commoncap:tgt=%x,cap=%s,tgtonly=%x,doset=%x,val=%x,rval=%x\n", 9125 target, cap, tgtonly, doset, val, rval); 9126 } 9127 return (rval); 9128 } 9129 9130 /* 9131 * property management 9132 * fas_update_props: 9133 * create/update sync/wide/TQ/scsi-options properties for this target 9134 */ 9135 static void 9136 fas_update_props(struct fas *fas, int tgt) 9137 { 9138 char property[32]; 9139 uint_t xfer_speed = 0; 9140 uint_t xfer_rate = 0; 9141 int wide_enabled, tq_enabled; 9142 uint_t regval = fas->f_sync_period[tgt]; 9143 int offset = fas->f_offset[tgt]; 9144 9145 wide_enabled = ((fas->f_nowide & (1<<tgt)) == 0); 9146 if (offset && regval) { 9147 xfer_speed = 9148 FAS_SYNC_KBPS((regval * fas->f_clock_cycle) / 1000); 9149 xfer_rate = ((wide_enabled)? 2 : 1) * xfer_speed; 9150 } 9151 (void) sprintf(property, "target%x-sync-speed", tgt); 9152 fas_update_this_prop(fas, property, xfer_rate); 9153 9154 (void) sprintf(property, "target%x-wide", tgt); 9155 fas_update_this_prop(fas, property, wide_enabled); 9156 9157 (void) sprintf(property, "target%x-TQ", tgt); 9158 tq_enabled = ((fas->f_notag & (1<<tgt))? 0 : 1); 9159 fas_update_this_prop(fas, property, tq_enabled); 9160 9161 } 9162 9163 static void 9164 fas_update_this_prop(struct fas *fas, char *property, int value) 9165 { 9166 dev_info_t *dip = fas->f_dev; 9167 9168 IPRINTF2("update prop: %s value=%x\n", property, value); 9169 ASSERT(mutex_owned(FAS_MUTEX(fas))); 9170 /* 9171 * We cannot hold any mutex at this point because the call to 9172 * ddi_prop_update_int() may block. 9173 */ 9174 mutex_exit(FAS_MUTEX(fas)); 9175 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, 9176 property, value) != DDI_PROP_SUCCESS) { 9177 IPRINTF1("cannot modify/create %s property\n", property); 9178 } 9179 mutex_enter(FAS_MUTEX(fas)); 9180 } 9181 9182 /* 9183 * allocate active slots array, size is dependent on whether tagQ enabled 9184 */ 9185 static int 9186 fas_alloc_active_slots(struct fas *fas, int slot, int flag) 9187 { 9188 int target = slot / NLUNS_PER_TARGET; 9189 struct f_slots *old_active = fas->f_active[slot]; 9190 struct f_slots *new_active; 9191 ushort_t size; 9192 int rval = -1; 9193 9194 if (fas->f_tcmds[slot]) { 9195 IPRINTF("cannot change size of active slots array\n"); 9196 return (rval); 9197 } 9198 9199 size = ((NOTAG(target)) ? FAS_F_SLOT_SIZE : FAS_F_SLOTS_SIZE_TQ); 9200 EPRINTF4( 9201 "fas_alloc_active_slots: target=%x size=%x, old=0x%p, oldsize=%x\n", 9202 target, size, (void *)old_active, 9203 ((old_active == NULL) ? -1 : old_active->f_size)); 9204 9205 new_active = kmem_zalloc(size, flag); 9206 if (new_active == NULL) { 9207 IPRINTF("new active alloc failed\n"); 9208 } else { 9209 fas->f_active[slot] = new_active; 9210 fas->f_active[slot]->f_n_slots = (NOTAG(target) ? 1 : NTAGS); 9211 fas->f_active[slot]->f_size = size; 9212 /* 9213 * reserve tag 0 for non-tagged cmds to tagged targets 9214 */ 9215 if (TAGGED(target)) { 9216 fas->f_active[slot]->f_tags = 1; 9217 } 9218 if (old_active) { 9219 kmem_free((caddr_t)old_active, old_active->f_size); 9220 } 9221 rval = 0; 9222 } 9223 return (rval); 9224 } 9225 9226 /* 9227 * Error logging, printing, and debug print routines 9228 */ 9229 static char *fas_label = "fas"; 9230 9231 /*PRINTFLIKE3*/ 9232 static void 9233 fas_log(struct fas *fas, int level, const char *fmt, ...) 9234 { 9235 dev_info_t *dev; 9236 va_list ap; 9237 9238 if (fas) { 9239 dev = fas->f_dev; 9240 } else { 9241 dev = 0; 9242 } 9243 9244 mutex_enter(&fas_log_mutex); 9245 9246 va_start(ap, fmt); 9247 (void) vsprintf(fas_log_buf, fmt, ap); 9248 va_end(ap); 9249 9250 if (level == CE_CONT) { 9251 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf); 9252 } else { 9253 scsi_log(dev, fas_label, level, "%s", fas_log_buf); 9254 } 9255 9256 mutex_exit(&fas_log_mutex); 9257 } 9258 9259 /*PRINTFLIKE2*/ 9260 static void 9261 fas_printf(struct fas *fas, const char *fmt, ...) 9262 { 9263 dev_info_t *dev = 0; 9264 va_list ap; 9265 int level = CE_CONT; 9266 9267 mutex_enter(&fas_log_mutex); 9268 9269 va_start(ap, fmt); 9270 (void) vsprintf(fas_log_buf, fmt, ap); 9271 va_end(ap); 9272 9273 if (fas) { 9274 dev = fas->f_dev; 9275 level = CE_NOTE; 9276 scsi_log(dev, fas_label, level, "%s", fas_log_buf); 9277 } else { 9278 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf); 9279 } 9280 9281 mutex_exit(&fas_log_mutex); 9282 } 9283 9284 #ifdef FASDEBUG 9285 /*PRINTFLIKE2*/ 9286 void 9287 fas_dprintf(struct fas *fas, const char *fmt, ...) 9288 { 9289 dev_info_t *dev = 0; 9290 va_list ap; 9291 9292 if (fas) { 9293 dev = fas->f_dev; 9294 } 9295 9296 mutex_enter(&fas_log_mutex); 9297 9298 va_start(ap, fmt); 9299 (void) vsprintf(fas_log_buf, fmt, ap); 9300 va_end(ap); 9301 9302 scsi_log(dev, fas_label, SCSI_DEBUG, "%s", fas_log_buf); 9303 9304 mutex_exit(&fas_log_mutex); 9305 } 9306 #endif 9307 9308 9309 static void 9310 fas_printstate(struct fas *fas, char *msg) 9311 { 9312 volatile struct fasreg *fasreg = fas->f_reg; 9313 volatile struct dma *dmar = fas->f_dma; 9314 uint_t csr = fas_dma_reg_read(fas, &dmar->dma_csr); 9315 uint_t count = fas_dma_reg_read(fas, &dmar->dma_count); 9316 uint_t addr = fas_dma_reg_read(fas, &dmar->dma_addr); 9317 uint_t test = fas_dma_reg_read(fas, &dmar->dma_test); 9318 uint_t fas_cnt; 9319 9320 fas_log(fas, CE_WARN, "%s: current fas state:", msg); 9321 fas_printf(NULL, "Latched stat=0x%b intr=0x%b", 9322 fas->f_stat, FAS_STAT_BITS, fas->f_intr, FAS_INT_BITS); 9323 fas_printf(NULL, "last msgout: %s, last msgin: %s", 9324 scsi_mname(fas->f_last_msgout), scsi_mname(fas->f_last_msgin)); 9325 fas_printf(NULL, "DMA csr=0x%b", csr, dma_bits); 9326 fas_printf(NULL, 9327 "addr=%x dmacnt=%x test=%x last=%x last_cnt=%x", 9328 addr, count, test, fas->f_lastdma, fas->f_lastcount); 9329 9330 GET_FAS_COUNT(fasreg, fas_cnt); 9331 fas_printf(NULL, "fas state:"); 9332 fas_printf(NULL, "\tcount(32)=%x cmd=%x stat=%x stat2=%x intr=%x", 9333 fas_cnt, fasreg->fas_cmd, fasreg->fas_stat, fasreg->fas_stat2, 9334 fasreg->fas_intr); 9335 fas_printf(NULL, 9336 "\tstep=%x fifoflag=%x conf=%x test=%x conf2=%x conf3=%x", 9337 fasreg->fas_step, fasreg->fas_fifo_flag, fasreg->fas_conf, 9338 fasreg->fas_test, fasreg->fas_conf2, fasreg->fas_conf3); 9339 9340 if (fas->f_current_sp) { 9341 fas_dump_cmd(fas, fas->f_current_sp); 9342 } 9343 } 9344 9345 /* 9346 * dump all we know about a cmd 9347 */ 9348 static void 9349 fas_dump_cmd(struct fas *fas, struct fas_cmd *sp) 9350 { 9351 int i; 9352 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp; 9353 auto char buf[128]; 9354 9355 buf[0] = '\0'; 9356 fas_printf(NULL, "Cmd dump for Target %d Lun %d:", 9357 Tgt(sp), Lun(sp)); 9358 (void) sprintf(&buf[0], " cdb=["); 9359 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) { 9360 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++); 9361 } 9362 (void) sprintf(&buf[strlen(buf)], " ]"); 9363 fas_printf(NULL, buf); 9364 fas_printf(NULL, "State=%s Last State=%s", 9365 fas_state_name(fas->f_state), fas_state_name(fas->f_laststate)); 9366 fas_printf(NULL, 9367 "pkt_state=0x%b pkt_flags=0x%x pkt_statistics=0x%x", 9368 sp->cmd_pkt->pkt_state, scsi_state_bits, sp->cmd_pkt_flags, 9369 sp->cmd_pkt->pkt_statistics); 9370 if (sp->cmd_pkt->pkt_state & STATE_GOT_STATUS) { 9371 fas_printf(NULL, "Status=0x%x\n", sp->cmd_pkt->pkt_scbp[0]); 9372 } 9373 } 9374 9375 /*ARGSUSED*/ 9376 static void 9377 fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp) 9378 { 9379 int i; 9380 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp; 9381 auto char buf[128]; 9382 9383 buf[0] = '\0'; 9384 (void) sprintf(&buf[0], "?%d.%d: cdb=[", Tgt(sp), Lun(sp)); 9385 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) { 9386 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++); 9387 } 9388 (void) sprintf(&buf[strlen(buf)], " ]"); 9389 fas_printf(NULL, buf); 9390 } 9391 9392 /* 9393 * state decoding for error messages 9394 */ 9395 static char * 9396 fas_state_name(ushort_t state) 9397 { 9398 if (state == STATE_FREE) { 9399 return ("FREE"); 9400 } else if (state & STATE_SELECTING) { 9401 if (state == STATE_SELECT_NORMAL) 9402 return ("SELECT"); 9403 else if (state == STATE_SELECT_N_STOP) 9404 return ("SEL&STOP"); 9405 else if (state == STATE_SELECT_N_SENDMSG) 9406 return ("SELECT_SNDMSG"); 9407 else 9408 return ("SEL_NO_ATN"); 9409 } else { 9410 static struct { 9411 char *sname; 9412 char state; 9413 } names[] = { 9414 "CMD_START", ACTS_CMD_START, 9415 "CMD_DONE", ACTS_CMD_DONE, 9416 "MSG_OUT", ACTS_MSG_OUT, 9417 "MSG_OUT_DONE", ACTS_MSG_OUT_DONE, 9418 "MSG_IN", ACTS_MSG_IN, 9419 "MSG_IN_MORE", ACTS_MSG_IN_MORE, 9420 "MSG_IN_DONE", ACTS_MSG_IN_DONE, 9421 "CLEARING", ACTS_CLEARING, 9422 "DATA", ACTS_DATA, 9423 "DATA_DONE", ACTS_DATA_DONE, 9424 "CMD_CMPLT", ACTS_C_CMPLT, 9425 "UNKNOWN", ACTS_UNKNOWN, 9426 "RESEL", ACTS_RESEL, 9427 "ENDVEC", ACTS_ENDVEC, 9428 "RESET", ACTS_RESET, 9429 "ABORTING", ACTS_ABORTING, 9430 "FROZEN", ACTS_FROZEN, 9431 0 9432 }; 9433 int i; 9434 for (i = 0; names[i].sname; i++) { 9435 if (names[i].state == state) 9436 return (names[i].sname); 9437 } 9438 } 9439 return ("<BAD>"); 9440 } 9441