1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2002 Justin T. Gibbs. 5 * Copyright (c) 2000-2003 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#190 $ 41 * 42 * $FreeBSD$ 43 */ 44 45 #ifdef __linux__ 46 #include "aic79xx_osm.h" 47 #include "aic79xx_inline.h" 48 #include "aicasm/aicasm_insformat.h" 49 #else 50 #include <dev/aic7xxx/aic79xx_osm.h> 51 #include <dev/aic7xxx/aic79xx_inline.h> 52 #include <dev/aic7xxx/aicasm/aicasm_insformat.h> 53 #endif 54 55 /******************************** Globals *************************************/ 56 struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq); 57 58 /***************************** Lookup Tables **********************************/ 59 char *ahd_chip_names[] = 60 { 61 "NONE", 62 "aic7901", 63 "aic7902", 64 "aic7901A" 65 }; 66 static const u_int num_chip_names = NUM_ELEMENTS(ahd_chip_names); 67 68 /* 69 * Hardware error codes. 70 */ 71 struct ahd_hard_error_entry { 72 uint8_t errno; 73 char *errmesg; 74 }; 75 76 static struct ahd_hard_error_entry ahd_hard_errors[] = { 77 { DSCTMOUT, "Discard Timer has timed out" }, 78 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 79 { SQPARERR, "Sequencer Parity Error" }, 80 { DPARERR, "Data-path Parity Error" }, 81 { MPARERR, "Scratch or SCB Memory Parity Error" }, 82 { CIOPARERR, "CIOBUS Parity Error" }, 83 }; 84 static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors); 85 86 static struct ahd_phase_table_entry ahd_phase_table[] = 87 { 88 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 89 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 90 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 91 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 92 { P_COMMAND, MSG_NOOP, "in Command phase" }, 93 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 94 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 95 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 96 { P_BUSFREE, MSG_NOOP, "while idle" }, 97 { 0, MSG_NOOP, "in unknown phase" } 98 }; 99 100 /* 101 * In most cases we only wish to itterate over real phases, so 102 * exclude the last element from the count. 103 */ 104 static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1; 105 106 /* Our Sequencer Program */ 107 #include "aic79xx_seq.h" 108 109 /**************************** Function Declarations ***************************/ 110 static void ahd_handle_transmission_error(struct ahd_softc *ahd); 111 static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, 112 u_int lqistat1); 113 static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, 114 u_int busfreetime); 115 static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); 116 static void ahd_handle_proto_violation(struct ahd_softc *ahd); 117 static void ahd_force_renegotiation(struct ahd_softc *ahd, 118 struct ahd_devinfo *devinfo); 119 120 static struct ahd_tmode_tstate* 121 ahd_alloc_tstate(struct ahd_softc *ahd, 122 u_int scsi_id, char channel); 123 #ifdef AHD_TARGET_MODE 124 static void ahd_free_tstate(struct ahd_softc *ahd, 125 u_int scsi_id, char channel, int force); 126 #endif 127 static void ahd_devlimited_syncrate(struct ahd_softc *ahd, 128 struct ahd_initiator_tinfo *, 129 u_int *period, 130 u_int *ppr_options, 131 role_t role); 132 static void ahd_update_neg_table(struct ahd_softc *ahd, 133 struct ahd_devinfo *devinfo, 134 struct ahd_transinfo *tinfo); 135 static void ahd_update_pending_scbs(struct ahd_softc *ahd); 136 static void ahd_fetch_devinfo(struct ahd_softc *ahd, 137 struct ahd_devinfo *devinfo); 138 static void ahd_scb_devinfo(struct ahd_softc *ahd, 139 struct ahd_devinfo *devinfo, 140 struct scb *scb); 141 static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, 142 struct ahd_devinfo *devinfo, 143 struct scb *scb); 144 static void ahd_build_transfer_msg(struct ahd_softc *ahd, 145 struct ahd_devinfo *devinfo); 146 static void ahd_construct_sdtr(struct ahd_softc *ahd, 147 struct ahd_devinfo *devinfo, 148 u_int period, u_int offset); 149 static void ahd_construct_wdtr(struct ahd_softc *ahd, 150 struct ahd_devinfo *devinfo, 151 u_int bus_width); 152 static void ahd_construct_ppr(struct ahd_softc *ahd, 153 struct ahd_devinfo *devinfo, 154 u_int period, u_int offset, 155 u_int bus_width, u_int ppr_options); 156 static void ahd_clear_msg_state(struct ahd_softc *ahd); 157 static void ahd_handle_message_phase(struct ahd_softc *ahd); 158 typedef enum { 159 AHDMSG_1B, 160 AHDMSG_2B, 161 AHDMSG_EXT 162 } ahd_msgtype; 163 static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, 164 u_int msgval, int full); 165 static int ahd_parse_msg(struct ahd_softc *ahd, 166 struct ahd_devinfo *devinfo); 167 static int ahd_handle_msg_reject(struct ahd_softc *ahd, 168 struct ahd_devinfo *devinfo); 169 static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, 170 struct ahd_devinfo *devinfo); 171 static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); 172 static void ahd_handle_devreset(struct ahd_softc *ahd, 173 struct ahd_devinfo *devinfo, 174 u_int lun, cam_status status, 175 char *message, int verbose_level); 176 #if AHD_TARGET_MODE 177 static void ahd_setup_target_msgin(struct ahd_softc *ahd, 178 struct ahd_devinfo *devinfo, 179 struct scb *scb); 180 #endif 181 182 static u_int ahd_sglist_size(struct ahd_softc *ahd); 183 static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); 184 static bus_dmamap_callback_t 185 ahd_dmamap_cb; 186 static void ahd_initialize_hscbs(struct ahd_softc *ahd); 187 static int ahd_init_scbdata(struct ahd_softc *ahd); 188 static void ahd_fini_scbdata(struct ahd_softc *ahd); 189 static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); 190 static void ahd_iocell_first_selection(struct ahd_softc *ahd); 191 static void ahd_add_col_list(struct ahd_softc *ahd, 192 struct scb *scb, u_int col_idx); 193 static void ahd_rem_col_list(struct ahd_softc *ahd, 194 struct scb *scb); 195 static void ahd_chip_init(struct ahd_softc *ahd); 196 static void ahd_qinfifo_requeue(struct ahd_softc *ahd, 197 struct scb *prev_scb, 198 struct scb *scb); 199 static int ahd_qinfifo_count(struct ahd_softc *ahd); 200 static int ahd_search_scb_list(struct ahd_softc *ahd, int target, 201 char channel, int lun, u_int tag, 202 role_t role, uint32_t status, 203 ahd_search_action action, 204 u_int *list_head, u_int tid); 205 static void ahd_stitch_tid_list(struct ahd_softc *ahd, 206 u_int tid_prev, u_int tid_cur, 207 u_int tid_next); 208 static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, 209 u_int scbid); 210 static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, 211 u_int prev, u_int next, u_int tid); 212 static void ahd_reset_current_bus(struct ahd_softc *ahd); 213 static ahd_callback_t ahd_reset_poll; 214 static ahd_callback_t ahd_stat_timer; 215 #ifdef AHD_DUMP_SEQ 216 static void ahd_dumpseq(struct ahd_softc *ahd); 217 #endif 218 static void ahd_loadseq(struct ahd_softc *ahd); 219 static int ahd_check_patch(struct ahd_softc *ahd, 220 struct patch **start_patch, 221 u_int start_instr, u_int *skip_addr); 222 static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, 223 u_int address); 224 static void ahd_download_instr(struct ahd_softc *ahd, 225 u_int instrptr, uint8_t *dconsts); 226 static int ahd_probe_stack_size(struct ahd_softc *ahd); 227 #ifdef AHD_TARGET_MODE 228 static void ahd_queue_lstate_event(struct ahd_softc *ahd, 229 struct ahd_tmode_lstate *lstate, 230 u_int initiator_id, 231 u_int event_type, 232 u_int event_arg); 233 static void ahd_update_scsiid(struct ahd_softc *ahd, 234 u_int targid_mask); 235 static int ahd_handle_target_cmd(struct ahd_softc *ahd, 236 struct target_cmd *cmd); 237 #endif 238 239 /******************************** Private Inlines *****************************/ 240 static __inline void ahd_assert_atn(struct ahd_softc *ahd); 241 static __inline int ahd_currently_packetized(struct ahd_softc *ahd); 242 static __inline int ahd_set_active_fifo(struct ahd_softc *ahd); 243 244 static __inline void 245 ahd_assert_atn(struct ahd_softc *ahd) 246 { 247 ahd_outb(ahd, SCSISIGO, ATNO); 248 } 249 250 /* 251 * Determine if the current connection has a packetized 252 * agreement. This does not necessarily mean that we 253 * are currently in a packetized transfer. We could 254 * just as easily be sending or receiving a message. 255 */ 256 static __inline int 257 ahd_currently_packetized(struct ahd_softc *ahd) 258 { 259 ahd_mode_state saved_modes; 260 int packetized; 261 262 saved_modes = ahd_save_modes(ahd); 263 if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { 264 /* 265 * The packetized bit refers to the last 266 * connection, not the current one. Check 267 * for non-zero LQISTATE instead. 268 */ 269 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 270 packetized = ahd_inb(ahd, LQISTATE) != 0; 271 } else { 272 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 273 packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; 274 } 275 ahd_restore_modes(ahd, saved_modes); 276 return (packetized); 277 } 278 279 static __inline int 280 ahd_set_active_fifo(struct ahd_softc *ahd) 281 { 282 u_int active_fifo; 283 284 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 285 active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; 286 switch (active_fifo) { 287 case 0: 288 case 1: 289 ahd_set_modes(ahd, active_fifo, active_fifo); 290 return (1); 291 default: 292 return (0); 293 } 294 } 295 296 /************************* Sequencer Execution Control ************************/ 297 /* 298 * Restart the sequencer program from address zero 299 */ 300 void 301 ahd_restart(struct ahd_softc *ahd) 302 { 303 304 ahd_pause(ahd); 305 306 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 307 308 /* No more pending messages */ 309 ahd_clear_msg_state(ahd); 310 ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ 311 ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ 312 ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); 313 ahd_outb(ahd, SEQINTCTL, 0); 314 ahd_outb(ahd, LASTPHASE, P_BUSFREE); 315 ahd_outb(ahd, SEQ_FLAGS, 0); 316 ahd_outb(ahd, SAVED_SCSIID, 0xFF); 317 ahd_outb(ahd, SAVED_LUN, 0xFF); 318 319 /* 320 * Ensure that the sequencer's idea of TQINPOS 321 * matches our own. The sequencer increments TQINPOS 322 * only after it sees a DMA complete and a reset could 323 * occur before the increment leaving the kernel to believe 324 * the command arrived but the sequencer to not. 325 */ 326 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); 327 328 /* Always allow reselection */ 329 ahd_outb(ahd, SCSISEQ1, 330 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 331 /* Ensure that no DMA operations are in progress */ 332 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 333 ahd_outb(ahd, SCBHCNT, 0); 334 ahd_outb(ahd, CCSCBCTL, CCSCBRESET); 335 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); 336 ahd_unpause(ahd); 337 } 338 339 void 340 ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) 341 { 342 ahd_mode_state saved_modes; 343 344 #ifdef AHD_DEBUG 345 if ((ahd_debug & AHD_SHOW_FIFOS) != 0) 346 printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); 347 #endif 348 saved_modes = ahd_save_modes(ahd); 349 ahd_set_modes(ahd, fifo, fifo); 350 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); 351 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) 352 ahd_outb(ahd, CCSGCTL, CCSGRESET); 353 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 354 ahd_outb(ahd, SG_STATE, 0); 355 ahd_restore_modes(ahd, saved_modes); 356 } 357 358 /************************* Input/Output Queues ********************************/ 359 /* 360 * Flush and completed commands that are sitting in the command 361 * complete queues down on the chip but have yet to be dma'ed back up. 362 */ 363 void 364 ahd_flush_qoutfifo(struct ahd_softc *ahd) 365 { 366 struct scb *scb; 367 ahd_mode_state saved_modes; 368 u_int saved_scbptr; 369 u_int ccscbctl; 370 u_int scbid; 371 u_int next_scbid; 372 373 saved_modes = ahd_save_modes(ahd); 374 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 375 saved_scbptr = ahd_get_scbptr(ahd); 376 377 /* 378 * Wait for any inprogress DMA to complete and clear DMA state 379 * if this if for an SCB in the qinfifo. 380 */ 381 while ((ccscbctl = ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) { 382 383 if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { 384 if ((ccscbctl & ARRDONE) != 0) 385 break; 386 } else if ((ccscbctl & CCSCBDONE) != 0) 387 break; 388 ahd_delay(200); 389 } 390 if ((ccscbctl & CCSCBDIR) != 0) 391 ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); 392 393 /* 394 * Complete any SCBs that just finished being 395 * DMA'ed into the qoutfifo. 396 */ 397 ahd_run_qoutfifo(ahd); 398 399 /* 400 * Manually update/complete any completed SCBs that are waiting to be 401 * DMA'ed back up to the host. 402 */ 403 scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 404 while (!SCBID_IS_NULL(scbid)) { 405 uint8_t *hscb_ptr; 406 u_int i; 407 408 ahd_set_scbptr(ahd, scbid); 409 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 410 scb = ahd_lookup_scb(ahd, scbid); 411 if (scb == NULL) { 412 printf("%s: Warning - DMA-up and complete " 413 "SCB %d invalid\n", ahd_name(ahd), scbid); 414 continue; 415 } 416 hscb_ptr = (uint8_t *)scb->hscb; 417 for (i = 0; i < sizeof(struct hardware_scb); i++) 418 *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); 419 420 ahd_complete_scb(ahd, scb); 421 scbid = next_scbid; 422 } 423 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); 424 425 scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); 426 while (!SCBID_IS_NULL(scbid)) { 427 428 ahd_set_scbptr(ahd, scbid); 429 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 430 scb = ahd_lookup_scb(ahd, scbid); 431 if (scb == NULL) { 432 printf("%s: Warning - Complete SCB %d invalid\n", 433 ahd_name(ahd), scbid); 434 continue; 435 } 436 437 ahd_complete_scb(ahd, scb); 438 scbid = next_scbid; 439 } 440 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); 441 ahd_set_scbptr(ahd, saved_scbptr); 442 443 /* 444 * Flush the good status FIFO for compelted packetized commands. 445 */ 446 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 447 while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { 448 scbid = (ahd_inb(ahd, GSFIFO+1) << 8) 449 | ahd_inb(ahd, GSFIFO); 450 scb = ahd_lookup_scb(ahd, scbid); 451 if (scb == NULL) { 452 printf("%s: Warning - GSFIFO SCB %d invalid\n", 453 ahd_name(ahd), scbid); 454 continue; 455 } 456 ahd_complete_scb(ahd, scb); 457 } 458 459 /* 460 * Restore state. 461 */ 462 ahd_restore_modes(ahd, saved_modes); 463 ahd->flags |= AHD_UPDATE_PEND_CMDS; 464 } 465 466 void 467 ahd_run_qoutfifo(struct ahd_softc *ahd) 468 { 469 struct scb *scb; 470 u_int scb_index; 471 472 if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) 473 panic("ahd_run_qoutfifo recursion"); 474 ahd->flags |= AHD_RUNNING_QOUTFIFO; 475 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); 476 while ((ahd->qoutfifo[ahd->qoutfifonext] 477 & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag) { 478 479 scb_index = ahd_le16toh(ahd->qoutfifo[ahd->qoutfifonext] 480 & ~QOUTFIFO_ENTRY_VALID_LE); 481 scb = ahd_lookup_scb(ahd, scb_index); 482 if (scb == NULL) { 483 printf("%s: WARNING no command for scb %d " 484 "(cmdcmplt)\nQOUTPOS = %d\n", 485 ahd_name(ahd), scb_index, 486 ahd->qoutfifonext); 487 ahd_dump_card_state(ahd); 488 } else 489 ahd_complete_scb(ahd, scb); 490 491 ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); 492 if (ahd->qoutfifonext == 0) 493 ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID_LE; 494 } 495 ahd->flags &= ~AHD_RUNNING_QOUTFIFO; 496 } 497 498 /************************* Interrupt Handling *********************************/ 499 void 500 ahd_handle_hwerrint(struct ahd_softc *ahd) 501 { 502 /* 503 * Some catastrophic hardware error has occurred. 504 * Print it for the user and disable the controller. 505 */ 506 int i; 507 int error; 508 509 error = ahd_inb(ahd, ERROR); 510 for (i = 0; i < num_errors; i++) { 511 if ((error & ahd_hard_errors[i].errno) != 0) 512 printf("%s: hwerrint, %s\n", 513 ahd_name(ahd), ahd_hard_errors[i].errmesg); 514 } 515 516 ahd_dump_card_state(ahd); 517 panic("BRKADRINT"); 518 519 /* Tell everyone that this HBA is no longer available */ 520 ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, 521 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 522 CAM_NO_HBA); 523 524 /* Tell the system that this controller has gone away. */ 525 ahd_free(ahd); 526 } 527 528 void 529 ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) 530 { 531 u_int seqintcode; 532 533 /* 534 * Save the sequencer interrupt code and clear the SEQINT 535 * bit. We will unpause the sequencer, if appropriate, 536 * after servicing the request. 537 */ 538 seqintcode = ahd_inb(ahd, SEQINTCODE); 539 ahd_outb(ahd, CLRINT, CLRSEQINT); 540 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 541 /* 542 * Unpause the sequencer and let it clear 543 * SEQINT by writing NO_SEQINT to it. This 544 * will cause the sequencer to be paused again, 545 * which is the expected state of this routine. 546 */ 547 ahd_unpause(ahd); 548 while (!ahd_is_paused(ahd)) 549 ; 550 ahd_outb(ahd, CLRINT, CLRSEQINT); 551 } 552 ahd_update_modes(ahd); 553 #ifdef AHD_DEBUG 554 if ((ahd_debug & AHD_SHOW_MISC) != 0) 555 printf("%s: Handle Seqint Called for code %d\n", 556 ahd_name(ahd), seqintcode); 557 #endif 558 switch (seqintcode) { 559 case BAD_SCB_STATUS: 560 { 561 struct scb *scb; 562 u_int scbid; 563 int cmds_pending; 564 565 scbid = ahd_get_scbptr(ahd); 566 scb = ahd_lookup_scb(ahd, scbid); 567 if (scb != NULL) { 568 ahd_complete_scb(ahd, scb); 569 } else { 570 printf("%s: WARNING no command for scb %d " 571 "(bad status)\n", ahd_name(ahd), scbid); 572 ahd_dump_card_state(ahd); 573 } 574 cmds_pending = ahd_inw(ahd, CMDS_PENDING); 575 if (cmds_pending > 0) 576 ahd_outw(ahd, CMDS_PENDING, cmds_pending - 1); 577 break; 578 } 579 case ENTERING_NONPACK: 580 { 581 struct scb *scb; 582 u_int scbid; 583 584 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 585 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 586 scbid = ahd_get_scbptr(ahd); 587 scb = ahd_lookup_scb(ahd, scbid); 588 if (scb == NULL) { 589 /* 590 * Somehow need to know if this 591 * is from a selection or reselection. 592 * From that, we can termine target 593 * ID so we at least have an I_T nexus. 594 */ 595 } else { 596 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); 597 ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); 598 ahd_outb(ahd, SEQ_FLAGS, 0x0); 599 } 600 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 601 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { 602 /* 603 * Phase change after read stream with 604 * CRC error with P0 asserted on last 605 * packet. 606 */ 607 #ifdef AHD_DEBUG 608 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 609 printf("%s: Assuming LQIPHASE_NLQ with " 610 "P0 assertion\n", ahd_name(ahd)); 611 #endif 612 } 613 #ifdef AHD_DEBUG 614 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 615 printf("%s: Entering NONPACK\n", ahd_name(ahd)); 616 #endif 617 break; 618 } 619 case INVALID_SEQINT: 620 printf("%s: Invalid Sequencer interrupt occurred.\n", 621 ahd_name(ahd)); 622 ahd_dump_card_state(ahd); 623 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 624 break; 625 case STATUS_OVERRUN: 626 { 627 struct scb *scb; 628 u_int scbid; 629 630 scbid = ahd_get_scbptr(ahd); 631 scb = ahd_lookup_scb(ahd, scbid); 632 if (scb != NULL) 633 ahd_print_path(ahd, scb); 634 else 635 printf("%s: ", ahd_name(ahd)); 636 printf("SCB %d Packetized Status Overrun", scbid); 637 ahd_dump_card_state(ahd); 638 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 639 break; 640 } 641 case CFG4ISTAT_INTR: 642 { 643 struct scb *scb; 644 u_int scbid; 645 646 scbid = ahd_get_scbptr(ahd); 647 scb = ahd_lookup_scb(ahd, scbid); 648 if (scb == NULL) { 649 ahd_dump_card_state(ahd); 650 printf("CFG4ISTAT: Free SCB %d referenced", scbid); 651 panic("For safety"); 652 } 653 ahd_outq(ahd, HADDR, scb->sense_busaddr); 654 ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); 655 ahd_outb(ahd, HCNT + 2, 0); 656 ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); 657 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); 658 break; 659 } 660 case ILLEGAL_PHASE: 661 { 662 u_int bus_phase; 663 664 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 665 printf("%s: ILLEGAL_PHASE 0x%x\n", 666 ahd_name(ahd), bus_phase); 667 668 switch (bus_phase) { 669 case P_DATAOUT: 670 case P_DATAIN: 671 case P_DATAOUT_DT: 672 case P_DATAIN_DT: 673 case P_MESGOUT: 674 case P_STATUS: 675 case P_MESGIN: 676 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 677 printf("%s: Issued Bus Reset.\n", ahd_name(ahd)); 678 break; 679 case P_COMMAND: 680 { 681 struct ahd_devinfo devinfo; 682 struct scb *scb; 683 struct ahd_initiator_tinfo *targ_info; 684 struct ahd_tmode_tstate *tstate; 685 struct ahd_transinfo *tinfo; 686 u_int scbid; 687 688 /* 689 * If a target takes us into the command phase 690 * assume that it has been externally reset and 691 * has thus lost our previous packetized negotiation 692 * agreement. Since we have not sent an identify 693 * message and may not have fully qualified the 694 * connection, we change our command to TUR, assert 695 * ATN and ABORT the task when we go to message in 696 * phase. The OSM will see the REQUEUE_REQUEST 697 * status and retry the command. 698 */ 699 scbid = ahd_get_scbptr(ahd); 700 scb = ahd_lookup_scb(ahd, scbid); 701 if (scb == NULL) { 702 printf("Invalid phase with no valid SCB. " 703 "Resetting bus.\n"); 704 ahd_reset_channel(ahd, 'A', 705 /*Initiate Reset*/TRUE); 706 break; 707 } 708 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), 709 SCB_GET_TARGET(ahd, scb), 710 SCB_GET_LUN(scb), 711 SCB_GET_CHANNEL(ahd, scb), 712 ROLE_INITIATOR); 713 targ_info = ahd_fetch_transinfo(ahd, 714 devinfo.channel, 715 devinfo.our_scsiid, 716 devinfo.target, 717 &tstate); 718 tinfo = &targ_info->curr; 719 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 720 AHD_TRANS_ACTIVE, /*paused*/TRUE); 721 ahd_set_syncrate(ahd, &devinfo, /*period*/0, 722 /*offset*/0, /*ppr_options*/0, 723 AHD_TRANS_ACTIVE, /*paused*/TRUE); 724 ahd_outb(ahd, SCB_CDB_STORE, 0); 725 ahd_outb(ahd, SCB_CDB_STORE+1, 0); 726 ahd_outb(ahd, SCB_CDB_STORE+2, 0); 727 ahd_outb(ahd, SCB_CDB_STORE+3, 0); 728 ahd_outb(ahd, SCB_CDB_STORE+4, 0); 729 ahd_outb(ahd, SCB_CDB_STORE+5, 0); 730 ahd_outb(ahd, SCB_CDB_LEN, 6); 731 scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); 732 scb->hscb->control |= MK_MESSAGE; 733 ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); 734 ahd_outb(ahd, MSG_OUT, HOST_MSG); 735 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); 736 /* 737 * The lun is 0, regardless of the SCB's lun 738 * as we have not sent an identify message. 739 */ 740 ahd_outb(ahd, SAVED_LUN, 0); 741 ahd_outb(ahd, SEQ_FLAGS, 0); 742 ahd_assert_atn(ahd); 743 scb->flags &= ~(SCB_PACKETIZED); 744 scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT; 745 ahd_freeze_devq(ahd, scb); 746 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 747 ahd_freeze_scb(scb); 748 749 /* 750 * Allow the sequencer to continue with 751 * non-pack processing. 752 */ 753 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 754 ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); 755 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { 756 ahd_outb(ahd, CLRLQOINT1, 0); 757 } 758 #ifdef AHD_DEBUG 759 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 760 ahd_print_path(ahd, scb); 761 printf("Unexpected command phase from " 762 "packetized target\n"); 763 } 764 #endif 765 break; 766 } 767 } 768 break; 769 } 770 case CFG4OVERRUN: 771 { 772 struct scb *scb; 773 u_int scb_index; 774 775 #ifdef AHD_DEBUG 776 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 777 printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), 778 ahd_inb(ahd, MODE_PTR)); 779 } 780 #endif 781 scb_index = ahd_get_scbptr(ahd); 782 scb = ahd_lookup_scb(ahd, scb_index); 783 if (scb == NULL) { 784 /* 785 * Attempt to transfer to an SCB that is 786 * not outstanding. 787 */ 788 ahd_assert_atn(ahd); 789 ahd_outb(ahd, MSG_OUT, HOST_MSG); 790 ahd->msgout_buf[0] = MSG_ABORT_TASK; 791 ahd->msgout_len = 1; 792 ahd->msgout_index = 0; 793 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 794 /* 795 * Clear status received flag to prevent any 796 * attempt to complete this bogus SCB. 797 */ 798 ahd_outb(ahd, SCB_CONTROL, 799 ahd_inb(ahd, SCB_CONTROL) & ~STATUS_RCVD); 800 } 801 break; 802 } 803 case DUMP_CARD_STATE: 804 { 805 ahd_dump_card_state(ahd); 806 break; 807 } 808 case PDATA_REINIT: 809 { 810 #ifdef AHD_DEBUG 811 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 812 printf("%s: PDATA_REINIT - DFCNTRL = 0x%x " 813 "SG_CACHE_SHADOW = 0x%x\n", 814 ahd_name(ahd), ahd_inb(ahd, DFCNTRL), 815 ahd_inb(ahd, SG_CACHE_SHADOW)); 816 } 817 #endif 818 ahd_reinitialize_dataptrs(ahd); 819 break; 820 } 821 case HOST_MSG_LOOP: 822 { 823 struct ahd_devinfo devinfo; 824 825 /* 826 * The sequencer has encountered a message phase 827 * that requires host assistance for completion. 828 * While handling the message phase(s), we will be 829 * notified by the sequencer after each byte is 830 * transfered so we can track bus phase changes. 831 * 832 * If this is the first time we've seen a HOST_MSG_LOOP 833 * interrupt, initialize the state of the host message 834 * loop. 835 */ 836 ahd_fetch_devinfo(ahd, &devinfo); 837 if (ahd->msg_type == MSG_TYPE_NONE) { 838 struct scb *scb; 839 u_int scb_index; 840 u_int bus_phase; 841 842 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 843 if (bus_phase != P_MESGIN 844 && bus_phase != P_MESGOUT) { 845 printf("ahd_intr: HOST_MSG_LOOP bad " 846 "phase 0x%x\n", bus_phase); 847 /* 848 * Probably transitioned to bus free before 849 * we got here. Just punt the message. 850 */ 851 ahd_dump_card_state(ahd); 852 ahd_clear_intstat(ahd); 853 ahd_restart(ahd); 854 return; 855 } 856 857 scb_index = ahd_get_scbptr(ahd); 858 scb = ahd_lookup_scb(ahd, scb_index); 859 if (devinfo.role == ROLE_INITIATOR) { 860 if (bus_phase == P_MESGOUT) 861 ahd_setup_initiator_msgout(ahd, 862 &devinfo, 863 scb); 864 else { 865 ahd->msg_type = 866 MSG_TYPE_INITIATOR_MSGIN; 867 ahd->msgin_index = 0; 868 } 869 } 870 #if AHD_TARGET_MODE 871 else { 872 if (bus_phase == P_MESGOUT) { 873 ahd->msg_type = 874 MSG_TYPE_TARGET_MSGOUT; 875 ahd->msgin_index = 0; 876 } 877 else 878 ahd_setup_target_msgin(ahd, 879 &devinfo, 880 scb); 881 } 882 #endif 883 } 884 885 ahd_handle_message_phase(ahd); 886 break; 887 } 888 case NO_MATCH: 889 { 890 /* Ensure we don't leave the selection hardware on */ 891 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 892 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 893 894 printf("%s:%c:%d: no active SCB for reconnecting " 895 "target - issuing BUS DEVICE RESET\n", 896 ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); 897 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 898 "REG0 == 0x%x ACCUM = 0x%x\n", 899 ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), 900 ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); 901 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 902 "SINDEX == 0x%x\n", 903 ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), 904 ahd_find_busy_tcl(ahd, 905 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), 906 ahd_inb(ahd, SAVED_LUN))), 907 ahd_inw(ahd, SINDEX)); 908 printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 909 "SCB_CONTROL == 0x%x\n", 910 ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), 911 ahd_inb_scbram(ahd, SCB_LUN), 912 ahd_inb_scbram(ahd, SCB_CONTROL)); 913 printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", 914 ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); 915 printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); 916 printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); 917 ahd_dump_card_state(ahd); 918 ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; 919 ahd->msgout_len = 1; 920 ahd->msgout_index = 0; 921 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 922 ahd_outb(ahd, MSG_OUT, HOST_MSG); 923 ahd_assert_atn(ahd); 924 break; 925 } 926 case PROTO_VIOLATION: 927 { 928 ahd_handle_proto_violation(ahd); 929 break; 930 } 931 case IGN_WIDE_RES: 932 { 933 struct ahd_devinfo devinfo; 934 935 ahd_fetch_devinfo(ahd, &devinfo); 936 ahd_handle_ign_wide_residue(ahd, &devinfo); 937 break; 938 } 939 case BAD_PHASE: 940 { 941 u_int lastphase; 942 943 lastphase = ahd_inb(ahd, LASTPHASE); 944 printf("%s:%c:%d: unknown scsi bus phase %x, " 945 "lastphase = 0x%x. Attempting to continue\n", 946 ahd_name(ahd), 'A', 947 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 948 lastphase, ahd_inb(ahd, SCSISIGI)); 949 break; 950 } 951 case MISSED_BUSFREE: 952 { 953 u_int lastphase; 954 955 lastphase = ahd_inb(ahd, LASTPHASE); 956 printf("%s:%c:%d: Missed busfree. " 957 "Lastphase = 0x%x, Curphase = 0x%x\n", 958 ahd_name(ahd), 'A', 959 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 960 lastphase, ahd_inb(ahd, SCSISIGI)); 961 ahd_restart(ahd); 962 return; 963 } 964 case DATA_OVERRUN: 965 { 966 /* 967 * When the sequencer detects an overrun, it 968 * places the controller in "BITBUCKET" mode 969 * and allows the target to complete its transfer. 970 * Unfortunately, none of the counters get updated 971 * when the controller is in this mode, so we have 972 * no way of knowing how large the overrun was. 973 */ 974 struct scb *scb; 975 u_int scbindex; 976 #ifdef AHD_DEBUG 977 u_int lastphase; 978 #endif 979 980 scbindex = ahd_get_scbptr(ahd); 981 scb = ahd_lookup_scb(ahd, scbindex); 982 #ifdef AHD_DEBUG 983 lastphase = ahd_inb(ahd, LASTPHASE); 984 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 985 ahd_print_path(ahd, scb); 986 printf("data overrun detected %s. Tag == 0x%x.\n", 987 ahd_lookup_phase_entry(lastphase)->phasemsg, 988 SCB_GET_TAG(scb)); 989 ahd_print_path(ahd, scb); 990 printf("%s seen Data Phase. Length = %ld. " 991 "NumSGs = %d.\n", 992 ahd_inb(ahd, SEQ_FLAGS) & DPHASE 993 ? "Have" : "Haven't", 994 ahd_get_transfer_length(scb), scb->sg_count); 995 ahd_dump_sglist(scb); 996 } 997 #endif 998 999 /* 1000 * Set this and it will take effect when the 1001 * target does a command complete. 1002 */ 1003 ahd_freeze_devq(ahd, scb); 1004 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); 1005 ahd_freeze_scb(scb); 1006 break; 1007 } 1008 case MKMSG_FAILED: 1009 { 1010 struct ahd_devinfo devinfo; 1011 struct scb *scb; 1012 u_int scbid; 1013 1014 ahd_fetch_devinfo(ahd, &devinfo); 1015 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 1016 ahd_name(ahd), devinfo.channel, devinfo.target, 1017 devinfo.lun); 1018 scbid = ahd_get_scbptr(ahd); 1019 scb = ahd_lookup_scb(ahd, scbid); 1020 if (scb != NULL 1021 && (scb->flags & SCB_RECOVERY_SCB) != 0) 1022 /* 1023 * Ensure that we didn't put a second instance of this 1024 * SCB into the QINFIFO. 1025 */ 1026 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 1027 SCB_GET_CHANNEL(ahd, scb), 1028 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 1029 ROLE_INITIATOR, /*status*/0, 1030 SEARCH_REMOVE); 1031 ahd_outb(ahd, SCB_CONTROL, 1032 ahd_inb(ahd, SCB_CONTROL) & ~MK_MESSAGE); 1033 break; 1034 } 1035 case TASKMGMT_FUNC_COMPLETE: 1036 { 1037 u_int scbid; 1038 struct scb *scb; 1039 1040 scbid = ahd_get_scbptr(ahd); 1041 scb = ahd_lookup_scb(ahd, scbid); 1042 if (scb != NULL) { 1043 u_int lun; 1044 u_int tag; 1045 cam_status error; 1046 1047 ahd_print_path(ahd, scb); 1048 printf("Task Management Func 0x%x Complete\n", 1049 scb->hscb->task_management); 1050 lun = CAM_LUN_WILDCARD; 1051 tag = SCB_LIST_NULL; 1052 1053 switch (scb->hscb->task_management) { 1054 case SIU_TASKMGMT_ABORT_TASK: 1055 tag = SCB_GET_TAG(scb); 1056 case SIU_TASKMGMT_ABORT_TASK_SET: 1057 case SIU_TASKMGMT_CLEAR_TASK_SET: 1058 lun = scb->hscb->lun; 1059 error = CAM_REQ_ABORTED; 1060 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 1061 'A', lun, tag, ROLE_INITIATOR, 1062 error); 1063 break; 1064 case SIU_TASKMGMT_LUN_RESET: 1065 lun = scb->hscb->lun; 1066 case SIU_TASKMGMT_TARGET_RESET: 1067 { 1068 struct ahd_devinfo devinfo; 1069 1070 ahd_scb_devinfo(ahd, &devinfo, scb); 1071 error = CAM_BDR_SENT; 1072 ahd_handle_devreset(ahd, &devinfo, lun, 1073 CAM_BDR_SENT, 1074 lun != CAM_LUN_WILDCARD 1075 ? "Lun Reset" 1076 : "Target Reset", 1077 /*verbose_level*/0); 1078 break; 1079 } 1080 default: 1081 panic("Unexpected TaskMgmt Func\n"); 1082 break; 1083 } 1084 } 1085 break; 1086 } 1087 case TASKMGMT_CMD_CMPLT_OKAY: 1088 { 1089 u_int scbid; 1090 struct scb *scb; 1091 1092 /* 1093 * An ABORT TASK TMF failed to be delivered before 1094 * the targeted command completed normally. 1095 */ 1096 scbid = ahd_get_scbptr(ahd); 1097 scb = ahd_lookup_scb(ahd, scbid); 1098 if (scb != NULL) { 1099 /* 1100 * Remove the second instance of this SCB from 1101 * the QINFIFO if it is still there. 1102 */ 1103 ahd_print_path(ahd, scb); 1104 printf("SCB completes before TMF\n"); 1105 /* 1106 * Handle losing the race. Wait until any 1107 * current selection completes. We will then 1108 * set the TMF back to zero in this SCB so that 1109 * the sequencer doesn't bother to issue another 1110 * sequencer interrupt for its completion. 1111 */ 1112 while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 1113 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 1114 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) 1115 ; 1116 ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); 1117 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 1118 SCB_GET_CHANNEL(ahd, scb), 1119 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 1120 ROLE_INITIATOR, /*status*/0, 1121 SEARCH_REMOVE); 1122 } 1123 break; 1124 } 1125 case TRACEPOINT0: 1126 case TRACEPOINT1: 1127 case TRACEPOINT2: 1128 case TRACEPOINT3: 1129 printf("%s: Tracepoint %d\n", ahd_name(ahd), 1130 seqintcode - TRACEPOINT0); 1131 break; 1132 case NO_SEQINT: 1133 break; 1134 case SAW_HWERR: 1135 ahd_handle_hwerrint(ahd); 1136 break; 1137 default: 1138 printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), 1139 seqintcode); 1140 break; 1141 } 1142 /* 1143 * The sequencer is paused immediately on 1144 * a SEQINT, so we should restart it when 1145 * we're done. 1146 */ 1147 ahd_unpause(ahd); 1148 } 1149 1150 void 1151 ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) 1152 { 1153 struct scb *scb; 1154 u_int status0; 1155 u_int status3; 1156 u_int status; 1157 u_int lqistat1; 1158 u_int lqostat0; 1159 u_int scbid; 1160 u_int busfreetime; 1161 1162 ahd_update_modes(ahd); 1163 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1164 1165 status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); 1166 status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); 1167 status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 1168 lqistat1 = ahd_inb(ahd, LQISTAT1); 1169 lqostat0 = ahd_inb(ahd, LQOSTAT0); 1170 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; 1171 if ((status0 & (SELDI|SELDO)) != 0) { 1172 u_int simode0; 1173 1174 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 1175 simode0 = ahd_inb(ahd, SIMODE0); 1176 status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); 1177 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1178 } 1179 scbid = ahd_get_scbptr(ahd); 1180 scb = ahd_lookup_scb(ahd, scbid); 1181 if (scb != NULL 1182 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 1183 scb = NULL; 1184 1185 /* Make sure the sequencer is in a safe location. */ 1186 ahd_clear_critical_section(ahd); 1187 1188 if ((status0 & IOERR) != 0) { 1189 u_int now_lvd; 1190 1191 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; 1192 printf("%s: Transceiver State Has Changed to %s mode\n", 1193 ahd_name(ahd), now_lvd ? "LVD" : "SE"); 1194 ahd_outb(ahd, CLRSINT0, CLRIOERR); 1195 /* 1196 * A change in I/O mode is equivalent to a bus reset. 1197 */ 1198 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1199 ahd_pause(ahd); 1200 ahd_setup_iocell_workaround(ahd); 1201 ahd_unpause(ahd); 1202 } else if ((status0 & OVERRUN) != 0) { 1203 printf("%s: SCSI offset overrun detected. Resetting bus.\n", 1204 ahd_name(ahd)); 1205 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1206 } else if ((status & SCSIRSTI) != 0) { 1207 printf("%s: Someone reset channel A\n", ahd_name(ahd)); 1208 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); 1209 } else if ((status & SCSIPERR) != 0) { 1210 ahd_handle_transmission_error(ahd); 1211 } else if (lqostat0 != 0) { 1212 printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); 1213 ahd_outb(ahd, CLRLQOINT0, lqostat0); 1214 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { 1215 ahd_outb(ahd, CLRLQOINT1, 0); 1216 } 1217 } else if ((status & SELTO) != 0) { 1218 u_int scbid; 1219 1220 /* Stop the selection */ 1221 ahd_outb(ahd, SCSISEQ0, 0); 1222 1223 /* No more pending messages */ 1224 ahd_clear_msg_state(ahd); 1225 1226 /* Clear interrupt state */ 1227 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1228 1229 /* 1230 * Although the driver does not care about the 1231 * 'Selection in Progress' status bit, the busy 1232 * LED does. SELINGO is only cleared by a sucessfull 1233 * selection, so we must manually clear it to insure 1234 * the LED turns off just incase no future successful 1235 * selections occur (e.g. no devices on the bus). 1236 */ 1237 ahd_outb(ahd, CLRSINT0, CLRSELINGO); 1238 1239 scbid = ahd_inw(ahd, WAITING_TID_HEAD); 1240 scb = ahd_lookup_scb(ahd, scbid); 1241 if (scb == NULL) { 1242 printf("%s: ahd_intr - referenced scb not " 1243 "valid during SELTO scb(0x%x)\n", 1244 ahd_name(ahd), scbid); 1245 ahd_dump_card_state(ahd); 1246 } else { 1247 struct ahd_devinfo devinfo; 1248 #ifdef AHD_DEBUG 1249 if ((ahd_debug & AHD_SHOW_SELTO) != 0) { 1250 ahd_print_path(ahd, scb); 1251 printf("Saw Selection Timeout for SCB 0x%x\n", 1252 scbid); 1253 } 1254 #endif 1255 /* 1256 * Force a renegotiation with this target just in 1257 * case the cable was pulled and will later be 1258 * re-attached. The target may forget its negotiation 1259 * settings with us should it attempt to reselect 1260 * during the interruption. The target will not issue 1261 * a unit attention in this case, so we must always 1262 * renegotiate. 1263 */ 1264 ahd_scb_devinfo(ahd, &devinfo, scb); 1265 ahd_force_renegotiation(ahd, &devinfo); 1266 ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1267 ahd_freeze_devq(ahd, scb); 1268 } 1269 ahd_outb(ahd, CLRINT, CLRSCSIINT); 1270 ahd_iocell_first_selection(ahd); 1271 ahd_unpause(ahd); 1272 } else if ((status0 & (SELDI|SELDO)) != 0) { 1273 ahd_iocell_first_selection(ahd); 1274 ahd_unpause(ahd); 1275 } else if (status3 != 0) { 1276 printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", 1277 ahd_name(ahd), status3); 1278 ahd_outb(ahd, CLRSINT3, status3); 1279 } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { 1280 ahd_handle_lqiphase_error(ahd, lqistat1); 1281 } else if ((lqistat1 & LQICRCI_NLQ) != 0) { 1282 /* 1283 * This status can be delayed during some 1284 * streaming operations. The SCSIPHASE 1285 * handler has already dealt with this case 1286 * so just clear the error. 1287 */ 1288 ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); 1289 } else if ((status & BUSFREE) != 0) { 1290 u_int lqostat1; 1291 int restart; 1292 int clear_fifo; 1293 int packetized; 1294 u_int mode; 1295 1296 /* 1297 * Clear our selection hardware as soon as possible. 1298 * We may have an entry in the waiting Q for this target, 1299 * that is affected by this busfree and we don't want to 1300 * go about selecting the target while we handle the event. 1301 */ 1302 ahd_outb(ahd, SCSISEQ0, 0); 1303 1304 /* 1305 * Determine what we were up to at the time of 1306 * the busfree. 1307 */ 1308 mode = AHD_MODE_SCSI; 1309 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; 1310 lqostat1 = ahd_inb(ahd, LQOSTAT1); 1311 switch (busfreetime) { 1312 case BUSFREE_DFF0: 1313 case BUSFREE_DFF1: 1314 { 1315 u_int scbid; 1316 struct scb *scb; 1317 1318 mode = busfreetime == BUSFREE_DFF0 1319 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; 1320 ahd_set_modes(ahd, mode, mode); 1321 scbid = ahd_get_scbptr(ahd); 1322 scb = ahd_lookup_scb(ahd, scbid); 1323 if (scb == NULL) { 1324 printf("%s: Invalid SCB %d in DFF%d " 1325 "during unexpected busfree\n", 1326 ahd_name(ahd), scbid, mode); 1327 packetized = 0; 1328 } else 1329 packetized = (scb->flags & SCB_PACKETIZED) != 0; 1330 clear_fifo = 1; 1331 break; 1332 } 1333 case BUSFREE_LQO: 1334 clear_fifo = 0; 1335 packetized = 1; 1336 break; 1337 default: 1338 clear_fifo = 0; 1339 packetized = (lqostat1 & LQOBUSFREE) != 0; 1340 if (!packetized 1341 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) 1342 packetized = 1; 1343 break; 1344 } 1345 1346 #ifdef AHD_DEBUG 1347 if ((ahd_debug & AHD_SHOW_MISC) != 0) 1348 printf("Saw Busfree. Busfreetime = 0x%x.\n", 1349 busfreetime); 1350 #endif 1351 /* 1352 * Busfrees that occur in non-packetized phases are 1353 * handled by the nonpkt_busfree handler. 1354 */ 1355 if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { 1356 restart = ahd_handle_pkt_busfree(ahd, busfreetime); 1357 } else { 1358 packetized = 0; 1359 restart = ahd_handle_nonpkt_busfree(ahd); 1360 } 1361 /* 1362 * Clear the busfree interrupt status. The setting of 1363 * the interrupt is a pulse, so in a perfect world, we 1364 * would not need to muck with the ENBUSFREE logic. This 1365 * would ensure that if the bus moves on to another 1366 * connection, busfree protection is still in force. If 1367 * BUSFREEREV is broken, however, we must manually clear 1368 * the ENBUSFREE if the busfree occurred during a non-pack 1369 * connection so that we don't get false positives during 1370 * future, packetized, connections. 1371 */ 1372 ahd_outb(ahd, CLRSINT1, CLRBUSFREE); 1373 if (packetized == 0 1374 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) 1375 ahd_outb(ahd, SIMODE1, 1376 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); 1377 1378 if (clear_fifo) 1379 ahd_clear_fifo(ahd, mode); 1380 1381 ahd_clear_msg_state(ahd); 1382 ahd_outb(ahd, CLRINT, CLRSCSIINT); 1383 if (restart) { 1384 ahd_restart(ahd); 1385 } else { 1386 ahd_unpause(ahd); 1387 } 1388 } else { 1389 printf("%s: Missing case in ahd_handle_scsiint. status = %x\n", 1390 ahd_name(ahd), status); 1391 ahd_dump_card_state(ahd); 1392 ahd_clear_intstat(ahd); 1393 ahd_unpause(ahd); 1394 } 1395 } 1396 1397 static void 1398 ahd_handle_transmission_error(struct ahd_softc *ahd) 1399 { 1400 struct scb *scb; 1401 u_int scbid; 1402 u_int lqistat1; 1403 u_int lqistat2; 1404 u_int msg_out; 1405 u_int curphase; 1406 u_int lastphase; 1407 u_int perrdiag; 1408 u_int cur_col; 1409 int silent; 1410 1411 scb = NULL; 1412 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1413 lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); 1414 lqistat2 = ahd_inb(ahd, LQISTAT2); 1415 if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 1416 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { 1417 u_int lqistate; 1418 1419 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 1420 lqistate = ahd_inb(ahd, LQISTATE); 1421 if ((lqistate >= 0x1E && lqistate <= 0x24) 1422 || (lqistate == 0x29)) { 1423 #ifdef AHD_DEBUG 1424 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 1425 printf("%s: NLQCRC found via LQISTATE\n", 1426 ahd_name(ahd)); 1427 } 1428 #endif 1429 lqistat1 |= LQICRCI_NLQ; 1430 } 1431 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1432 } 1433 1434 ahd_outb(ahd, CLRLQIINT1, lqistat1); 1435 lastphase = ahd_inb(ahd, LASTPHASE); 1436 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 1437 perrdiag = ahd_inb(ahd, PERRDIAG); 1438 msg_out = MSG_INITIATOR_DET_ERR; 1439 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); 1440 1441 /* 1442 * Try to find the SCB associated with this error. 1443 */ 1444 silent = FALSE; 1445 if (lqistat1 == 0 1446 || (lqistat1 & LQICRCI_NLQ) != 0) { 1447 if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) 1448 ahd_set_active_fifo(ahd); 1449 scbid = ahd_get_scbptr(ahd); 1450 scb = ahd_lookup_scb(ahd, scbid); 1451 if (scb != NULL && SCB_IS_SILENT(scb)) 1452 silent = TRUE; 1453 } 1454 1455 cur_col = 0; 1456 if (silent == FALSE) { 1457 printf("%s: Transmission error detected\n", ahd_name(ahd)); 1458 ahd_lqistat1_print(lqistat1, &cur_col, 50); 1459 ahd_lastphase_print(lastphase, &cur_col, 50); 1460 ahd_scsisigi_print(curphase, &cur_col, 50); 1461 ahd_perrdiag_print(perrdiag, &cur_col, 50); 1462 printf("\n"); 1463 ahd_dump_card_state(ahd); 1464 } 1465 1466 if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { 1467 if (silent == FALSE) { 1468 printf("%s: Gross protocol error during incoming " 1469 "packet. lqistat1 == 0x%x. Resetting bus.\n", 1470 ahd_name(ahd), lqistat1); 1471 } 1472 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1473 return; 1474 } else if ((lqistat1 & LQICRCI_LQ) != 0) { 1475 /* 1476 * A CRC error has been detected on an incoming LQ. 1477 * The bus is currently hung on the last ACK. 1478 * Hit LQIRETRY to release the last ack, and 1479 * wait for the sequencer to determine that ATNO 1480 * is asserted while in message out to take us 1481 * to our host message loop. No NONPACKREQ or 1482 * LQIPHASE type errors will occur in this 1483 * scenario. After this first LQIRETRY, the LQI 1484 * manager will be in ISELO where it will 1485 * happily sit until another packet phase begins. 1486 * Unexpected bus free detection is enabled 1487 * through any phases that occur after we release 1488 * this last ack until the LQI manager sees a 1489 * packet phase. This implies we may have to 1490 * ignore a perfectly valid "unexected busfree" 1491 * after our "initiator detected error" message is 1492 * sent. A busfree is the expected response after 1493 * we tell the target that it's L_Q was corrupted. 1494 * (SPI4R09 10.7.3.3.3) 1495 */ 1496 ahd_outb(ahd, LQCTL2, LQIRETRY); 1497 printf("LQIRetry for LQICRCI_LQ to release ACK\n"); 1498 } else if ((lqistat1 & LQICRCI_NLQ) != 0) { 1499 /* 1500 * We detected a CRC error in a NON-LQ packet. 1501 * The hardware has varying behavior in this situation 1502 * depending on whether this packet was part of a 1503 * stream or not. 1504 * 1505 * PKT by PKT mode: 1506 * The hardware has already acked the complete packet. 1507 * If the target honors our outstanding ATN condition, 1508 * we should be (or soon will be) in MSGOUT phase. 1509 * This will trigger the LQIPHASE_LQ status bit as the 1510 * hardware was expecting another LQ. Unexpected 1511 * busfree detection is enabled. Once LQIPHASE_LQ is 1512 * true (first entry into host message loop is much 1513 * the same), we must clear LQIPHASE_LQ and hit 1514 * LQIRETRY so the hardware is ready to handle 1515 * a future LQ. NONPACKREQ will not be asserted again 1516 * once we hit LQIRETRY until another packet is 1517 * processed. The target may either go busfree 1518 * or start another packet in response to our message. 1519 * 1520 * Read Streaming P0 asserted: 1521 * If we raise ATN and the target completes the entire 1522 * stream (P0 asserted during the last packet), the 1523 * hardware will ack all data and return to the ISTART 1524 * state. When the target reponds to our ATN condition, 1525 * LQIPHASE_LQ will be asserted. We should respond to 1526 * this with an LQIRETRY to prepare for any future 1527 * packets. NONPACKREQ will not be asserted again 1528 * once we hit LQIRETRY until another packet is 1529 * processed. The target may either go busfree or 1530 * start another packet in response to our message. 1531 * Busfree detection is enabled. 1532 * 1533 * Read Streaming P0 not asserted: 1534 * If we raise ATN and the target transitions to 1535 * MSGOUT in or after a packet where P0 is not 1536 * asserted, the hardware will assert LQIPHASE_NLQ. 1537 * We should respond to the LQIPHASE_NLQ with an 1538 * LQIRETRY. Should the target stay in a non-pkt 1539 * phase after we send our message, the hardware 1540 * will assert LQIPHASE_LQ. Recovery is then just as 1541 * listed above for the read streaming with P0 asserted. 1542 * Busfree detection is enabled. 1543 */ 1544 if (silent == FALSE) 1545 printf("LQICRC_NLQ\n"); 1546 if (scb == NULL) { 1547 printf("%s: No SCB valid for LQICRC_NLQ. " 1548 "Resetting bus\n", ahd_name(ahd)); 1549 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1550 return; 1551 } 1552 } else if ((lqistat1 & LQIBADLQI) != 0) { 1553 printf("Need to handle BADLQI!\n"); 1554 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1555 return; 1556 } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { 1557 if ((curphase & ~P_DATAIN_DT) != 0) { 1558 /* Ack the byte. So we can continue. */ 1559 if (silent == FALSE) 1560 printf("Acking %s to clear perror\n", 1561 ahd_lookup_phase_entry(curphase)->phasemsg); 1562 ahd_inb(ahd, SCSIDAT); 1563 } 1564 1565 if (curphase == P_MESGIN) 1566 msg_out = MSG_PARITY_ERROR; 1567 } 1568 1569 /* 1570 * We've set the hardware to assert ATN if we 1571 * get a parity error on "in" phases, so all we 1572 * need to do is stuff the message buffer with 1573 * the appropriate message. "In" phases have set 1574 * mesg_out to something other than MSG_NOP. 1575 */ 1576 ahd->send_msg_perror = msg_out; 1577 if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR) 1578 scb->flags |= SCB_TRANSMISSION_ERROR; 1579 ahd_outb(ahd, MSG_OUT, HOST_MSG); 1580 ahd_outb(ahd, CLRINT, CLRSCSIINT); 1581 ahd_unpause(ahd); 1582 } 1583 1584 static void 1585 ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) 1586 { 1587 /* 1588 * Clear the sources of the interrupts. 1589 */ 1590 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1591 ahd_outb(ahd, CLRLQIINT1, lqistat1); 1592 1593 /* 1594 * If the "illegal" phase changes were in response 1595 * to our ATN to flag a CRC error, AND we ended up 1596 * on packet boundaries, clear the error, restart the 1597 * LQI manager as appropriate, and go on our merry 1598 * way toward sending the message. Otherwise, reset 1599 * the bus to clear the error. 1600 */ 1601 ahd_set_active_fifo(ahd); 1602 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 1603 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { 1604 if ((lqistat1 & LQIPHASE_LQ) != 0) { 1605 printf("LQIRETRY for LQIPHASE_LQ\n"); 1606 ahd_outb(ahd, LQCTL2, LQIRETRY); 1607 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { 1608 printf("LQIRETRY for LQIPHASE_NLQ\n"); 1609 ahd_outb(ahd, LQCTL2, LQIRETRY); 1610 } else 1611 panic("ahd_handle_lqiphase_error: No phase errors\n"); 1612 ahd_dump_card_state(ahd); 1613 ahd_outb(ahd, CLRINT, CLRSCSIINT); 1614 ahd_unpause(ahd); 1615 } else { 1616 printf("Reseting Channel for LQI Phase error\n"); 1617 ahd_dump_card_state(ahd); 1618 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1619 } 1620 } 1621 1622 /* 1623 * Packetized unexpected or expected busfree. 1624 * Entered in mode based on busfreetime. 1625 */ 1626 static int 1627 ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) 1628 { 1629 u_int lqostat1; 1630 1631 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 1632 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 1633 lqostat1 = ahd_inb(ahd, LQOSTAT1); 1634 if ((lqostat1 & LQOBUSFREE) != 0) { 1635 struct scb *scb; 1636 u_int scbid; 1637 u_int saved_scbptr; 1638 u_int waiting_h; 1639 u_int waiting_t; 1640 u_int next; 1641 1642 if ((busfreetime & BUSFREE_LQO) == 0) 1643 printf("%s: Warning, BUSFREE time is 0x%x. " 1644 "Expected BUSFREE_LQO.\n", 1645 ahd_name(ahd), busfreetime); 1646 /* 1647 * The LQO manager detected an unexpected busfree 1648 * either: 1649 * 1650 * 1) During an outgoing LQ. 1651 * 2) After an outgoing LQ but before the first 1652 * REQ of the command packet. 1653 * 3) During an outgoing command packet. 1654 * 1655 * In all cases, CURRSCB is pointing to the 1656 * SCB that encountered the failure. Clean 1657 * up the queue, clear SELDO and LQOBUSFREE, 1658 * and allow the sequencer to restart the select 1659 * out at its lesure. 1660 */ 1661 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1662 scbid = ahd_inw(ahd, CURRSCB); 1663 scb = ahd_lookup_scb(ahd, scbid); 1664 if (scb == NULL) 1665 panic("SCB not valid during LQOBUSFREE"); 1666 /* 1667 * Clear the status. 1668 */ 1669 ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); 1670 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 1671 ahd_outb(ahd, CLRLQOINT1, 0); 1672 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 1673 ahd_flush_device_writes(ahd); 1674 ahd_outb(ahd, CLRSINT0, CLRSELDO); 1675 1676 /* 1677 * Return the LQO manager to its idle loop. It will 1678 * not do this automatically if the busfree occurs 1679 * after the first REQ of either the LQ or command 1680 * packet or between the LQ and command packet. 1681 */ 1682 ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); 1683 1684 /* 1685 * Update the waiting for selection queue so 1686 * we restart on the correct SCB. 1687 */ 1688 waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); 1689 saved_scbptr = ahd_get_scbptr(ahd); 1690 if (waiting_h != scbid) { 1691 1692 ahd_outw(ahd, WAITING_TID_HEAD, scbid); 1693 waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); 1694 if (waiting_t == waiting_h) { 1695 ahd_outw(ahd, WAITING_TID_TAIL, scbid); 1696 next = SCB_LIST_NULL; 1697 } else { 1698 ahd_set_scbptr(ahd, waiting_h); 1699 next = ahd_inw_scbram(ahd, SCB_NEXT2); 1700 } 1701 ahd_set_scbptr(ahd, scbid); 1702 ahd_outw(ahd, SCB_NEXT2, next); 1703 } 1704 ahd_set_scbptr(ahd, saved_scbptr); 1705 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { 1706 if (SCB_IS_SILENT(scb) == FALSE) { 1707 ahd_print_path(ahd, scb); 1708 printf("Probable outgoing LQ CRC error. " 1709 "Retrying command\n"); 1710 } 1711 scb->crc_retry_count++; 1712 } else { 1713 ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); 1714 ahd_freeze_scb(scb); 1715 ahd_freeze_devq(ahd, scb); 1716 } 1717 /* Return unpausing the sequencer. */ 1718 return (0); 1719 } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { 1720 /* 1721 * Ignore what are really parity errors that 1722 * occur on the last REQ of a free running 1723 * clock prior to going busfree. Some drives 1724 * do not properly active negate just before 1725 * going busfree resulting in a parity glitch. 1726 */ 1727 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); 1728 #ifdef AHD_DEBUG 1729 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) 1730 printf("%s: Parity on last REQ detected " 1731 "during busfree phase.\n", 1732 ahd_name(ahd)); 1733 #endif 1734 /* Return unpausing the sequencer. */ 1735 return (0); 1736 } 1737 if (ahd->src_mode != AHD_MODE_SCSI) { 1738 u_int scbid; 1739 struct scb *scb; 1740 1741 scbid = ahd_get_scbptr(ahd); 1742 scb = ahd_lookup_scb(ahd, scbid); 1743 ahd_print_path(ahd, scb); 1744 printf("Unexpected PKT busfree condition\n"); 1745 ahd_dump_card_state(ahd); 1746 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', 1747 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 1748 ROLE_INITIATOR, CAM_UNEXP_BUSFREE); 1749 1750 /* Return restarting the sequencer. */ 1751 return (1); 1752 } 1753 printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); 1754 ahd_dump_card_state(ahd); 1755 /* Restart the sequencer. */ 1756 return (1); 1757 } 1758 1759 /* 1760 * Non-packetized unexpected or expected busfree. 1761 */ 1762 static int 1763 ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) 1764 { 1765 struct ahd_devinfo devinfo; 1766 struct scb *scb; 1767 u_int lastphase; 1768 u_int saved_scsiid; 1769 u_int saved_lun; 1770 u_int target; 1771 u_int initiator_role_id; 1772 u_int scbid; 1773 u_int ppr_busfree; 1774 int printerror; 1775 1776 /* 1777 * Look at what phase we were last in. If its message out, 1778 * chances are pretty good that the busfree was in response 1779 * to one of our abort requests. 1780 */ 1781 lastphase = ahd_inb(ahd, LASTPHASE); 1782 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); 1783 saved_lun = ahd_inb(ahd, SAVED_LUN); 1784 target = SCSIID_TARGET(ahd, saved_scsiid); 1785 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1786 ahd_compile_devinfo(&devinfo, initiator_role_id, 1787 target, saved_lun, 'A', ROLE_INITIATOR); 1788 printerror = 1; 1789 1790 scbid = ahd_get_scbptr(ahd); 1791 scb = ahd_lookup_scb(ahd, scbid); 1792 if (scb != NULL 1793 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 1794 scb = NULL; 1795 1796 ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; 1797 if (lastphase == P_MESGOUT) { 1798 u_int tag; 1799 1800 tag = SCB_LIST_NULL; 1801 if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) 1802 || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { 1803 int found; 1804 int sent_msg; 1805 1806 if (scb == NULL) { 1807 ahd_print_devinfo(ahd, &devinfo); 1808 printf("Abort for unidentified " 1809 "connection completed.\n"); 1810 /* restart the sequencer. */ 1811 return (1); 1812 } 1813 sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; 1814 ahd_print_path(ahd, scb); 1815 printf("SCB %d - Abort%s Completed.\n", 1816 SCB_GET_TAG(scb), 1817 sent_msg == MSG_ABORT_TAG ? "" : " Tag"); 1818 1819 if (sent_msg == MSG_ABORT_TAG) 1820 tag = SCB_GET_TAG(scb); 1821 1822 if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) { 1823 /* 1824 * This abort is in response to an 1825 * unexpected switch to command phase 1826 * for a packetized connection. Since 1827 * the identify message was never sent, 1828 * "saved lun" is 0. We really want to 1829 * abort only the SCB that encountered 1830 * this error, which could have a different 1831 * lun. The SCB will be retried so the OS 1832 * will see the UA after renegotiating to 1833 * packetized. 1834 */ 1835 tag = SCB_GET_TAG(scb); 1836 saved_lun = scb->hscb->lun; 1837 } 1838 found = ahd_abort_scbs(ahd, target, 'A', saved_lun, 1839 tag, ROLE_INITIATOR, 1840 CAM_REQ_ABORTED); 1841 printf("found == 0x%x\n", found); 1842 printerror = 0; 1843 } else if (ahd_sent_msg(ahd, AHDMSG_1B, 1844 MSG_BUS_DEV_RESET, TRUE)) { 1845 #ifdef __FreeBSD__ 1846 /* 1847 * Don't mark the user's request for this BDR 1848 * as completing with CAM_BDR_SENT. CAM3 1849 * specifies CAM_REQ_CMP. 1850 */ 1851 if (scb != NULL 1852 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1853 && ahd_match_scb(ahd, scb, target, 'A', 1854 CAM_LUN_WILDCARD, SCB_LIST_NULL, 1855 ROLE_INITIATOR)) 1856 ahd_set_transaction_status(scb, CAM_REQ_CMP); 1857 #endif 1858 ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, 1859 CAM_BDR_SENT, "Bus Device Reset", 1860 /*verbose_level*/0); 1861 printerror = 0; 1862 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE) 1863 && ppr_busfree == 0) { 1864 struct ahd_initiator_tinfo *tinfo; 1865 struct ahd_tmode_tstate *tstate; 1866 1867 /* 1868 * PPR Rejected. Try non-ppr negotiation 1869 * and retry command. 1870 */ 1871 #ifdef AHD_DEBUG 1872 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 1873 printf("PPR negotiation rejected busfree.\n"); 1874 #endif 1875 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 1876 devinfo.our_scsiid, 1877 devinfo.target, &tstate); 1878 tinfo->curr.transport_version = 2; 1879 tinfo->goal.transport_version = 2; 1880 tinfo->goal.ppr_options = 0; 1881 ahd_qinfifo_requeue_tail(ahd, scb); 1882 printerror = 0; 1883 } else if ((ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) 1884 || ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)) 1885 && ppr_busfree == 0) { 1886 /* 1887 * Negotiation Rejected. Go-async and 1888 * retry command. 1889 */ 1890 #ifdef AHD_DEBUG 1891 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 1892 printf("Negotiation rejected busfree.\n"); 1893 #endif 1894 ahd_set_width(ahd, &devinfo, 1895 MSG_EXT_WDTR_BUS_8_BIT, 1896 AHD_TRANS_CUR|AHD_TRANS_GOAL, 1897 /*paused*/TRUE); 1898 ahd_set_syncrate(ahd, &devinfo, 1899 /*period*/0, /*offset*/0, 1900 /*ppr_options*/0, 1901 AHD_TRANS_CUR|AHD_TRANS_GOAL, 1902 /*paused*/TRUE); 1903 ahd_qinfifo_requeue_tail(ahd, scb); 1904 printerror = 0; 1905 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 1906 && ahd_sent_msg(ahd, AHDMSG_1B, 1907 MSG_INITIATOR_DET_ERR, TRUE)) { 1908 1909 #ifdef AHD_DEBUG 1910 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 1911 printf("Expected IDE Busfree\n"); 1912 #endif 1913 printerror = 0; 1914 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) 1915 && ahd_sent_msg(ahd, AHDMSG_1B, 1916 MSG_MESSAGE_REJECT, TRUE)) { 1917 1918 #ifdef AHD_DEBUG 1919 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 1920 printf("Expected QAS Reject Busfree\n"); 1921 #endif 1922 printerror = 0; 1923 } 1924 } 1925 1926 /* 1927 * The busfree required flag is honored at the end of 1928 * the message phases. We check it last in case we 1929 * had to send some other message that caused a busfree. 1930 */ 1931 if (printerror != 0 1932 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 1933 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 1934 1935 ahd_freeze_devq(ahd, scb); 1936 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 1937 ahd_freeze_scb(scb); 1938 if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { 1939 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 1940 SCB_GET_CHANNEL(ahd, scb), 1941 SCB_GET_LUN(scb), SCB_LIST_NULL, 1942 ROLE_INITIATOR, CAM_REQ_ABORTED); 1943 } else { 1944 #ifdef AHD_DEBUG 1945 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 1946 printf("PPR Negotiation Busfree.\n"); 1947 #endif 1948 ahd_done(ahd, scb); 1949 } 1950 printerror = 0; 1951 } 1952 if (printerror != 0) { 1953 int aborted; 1954 1955 aborted = 0; 1956 if (scb != NULL) { 1957 u_int tag; 1958 1959 if ((scb->hscb->control & TAG_ENB) != 0) 1960 tag = SCB_GET_TAG(scb); 1961 else 1962 tag = SCB_LIST_NULL; 1963 ahd_print_path(ahd, scb); 1964 aborted = ahd_abort_scbs(ahd, target, 'A', 1965 SCB_GET_LUN(scb), tag, 1966 ROLE_INITIATOR, 1967 CAM_UNEXP_BUSFREE); 1968 } else { 1969 /* 1970 * We had not fully identified this connection, 1971 * so we cannot abort anything. 1972 */ 1973 printf("%s: ", ahd_name(ahd)); 1974 } 1975 if (lastphase != P_BUSFREE) 1976 ahd_force_renegotiation(ahd, &devinfo); 1977 printf("Unexpected busfree %s, %d SCBs aborted, " 1978 "PRGMCNT == 0x%x\n", 1979 ahd_lookup_phase_entry(lastphase)->phasemsg, 1980 aborted, 1981 ahd_inb(ahd, PRGMCNT) 1982 | (ahd_inb(ahd, PRGMCNT+1) << 8)); 1983 ahd_dump_card_state(ahd); 1984 } 1985 /* Always restart the sequencer. */ 1986 return (1); 1987 } 1988 1989 static void 1990 ahd_handle_proto_violation(struct ahd_softc *ahd) 1991 { 1992 struct ahd_devinfo devinfo; 1993 struct scb *scb; 1994 u_int scbid; 1995 u_int seq_flags; 1996 u_int curphase; 1997 u_int lastphase; 1998 int found; 1999 2000 ahd_fetch_devinfo(ahd, &devinfo); 2001 scbid = ahd_get_scbptr(ahd); 2002 scb = ahd_lookup_scb(ahd, scbid); 2003 seq_flags = ahd_inb(ahd, SEQ_FLAGS); 2004 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 2005 lastphase = ahd_inb(ahd, LASTPHASE); 2006 if ((seq_flags & NOT_IDENTIFIED) != 0) { 2007 2008 /* 2009 * The reconnecting target either did not send an 2010 * identify message, or did, but we didn't find an SCB 2011 * to match. 2012 */ 2013 ahd_print_devinfo(ahd, &devinfo); 2014 printf("Target did not send an IDENTIFY message. " 2015 "LASTPHASE = 0x%x.\n", lastphase); 2016 scb = NULL; 2017 } else if (scb == NULL) { 2018 /* 2019 * We don't seem to have an SCB active for this 2020 * transaction. Print an error and reset the bus. 2021 */ 2022 ahd_print_devinfo(ahd, &devinfo); 2023 printf("No SCB found during protocol violation\n"); 2024 goto proto_violation_reset; 2025 } else { 2026 ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 2027 if ((seq_flags & NO_CDB_SENT) != 0) { 2028 ahd_print_path(ahd, scb); 2029 printf("No or incomplete CDB sent to device.\n"); 2030 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) 2031 & STATUS_RCVD) == 0) { 2032 /* 2033 * The target never bothered to provide status to 2034 * us prior to completing the command. Since we don't 2035 * know the disposition of this command, we must attempt 2036 * to abort it. Assert ATN and prepare to send an abort 2037 * message. 2038 */ 2039 ahd_print_path(ahd, scb); 2040 printf("Completed command without status.\n"); 2041 } else { 2042 ahd_print_path(ahd, scb); 2043 printf("Unknown protocol violation.\n"); 2044 ahd_dump_card_state(ahd); 2045 } 2046 } 2047 if ((lastphase & ~P_DATAIN_DT) == 0 2048 || lastphase == P_COMMAND) { 2049 proto_violation_reset: 2050 /* 2051 * Target either went directly to data 2052 * phase or didn't respond to our ATN. 2053 * The only safe thing to do is to blow 2054 * it away with a bus reset. 2055 */ 2056 found = ahd_reset_channel(ahd, 'A', TRUE); 2057 printf("%s: Issued Channel %c Bus Reset. " 2058 "%d SCBs aborted\n", ahd_name(ahd), 'A', found); 2059 } else { 2060 /* 2061 * Leave the selection hardware off in case 2062 * this abort attempt will affect yet to 2063 * be sent commands. 2064 */ 2065 ahd_outb(ahd, SCSISEQ0, 2066 ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 2067 ahd_assert_atn(ahd); 2068 ahd_outb(ahd, MSG_OUT, HOST_MSG); 2069 if (scb == NULL) { 2070 ahd_print_devinfo(ahd, &devinfo); 2071 ahd->msgout_buf[0] = MSG_ABORT_TASK; 2072 ahd->msgout_len = 1; 2073 ahd->msgout_index = 0; 2074 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2075 } else { 2076 ahd_print_path(ahd, scb); 2077 scb->flags |= SCB_ABORT; 2078 } 2079 printf("Protocol violation %s. Attempting to abort.\n", 2080 ahd_lookup_phase_entry(curphase)->phasemsg); 2081 } 2082 } 2083 2084 /* 2085 * Force renegotiation to occur the next time we initiate 2086 * a command to the current device. 2087 */ 2088 static void 2089 ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 2090 { 2091 struct ahd_initiator_tinfo *targ_info; 2092 struct ahd_tmode_tstate *tstate; 2093 2094 #ifdef AHD_DEBUG 2095 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 2096 ahd_print_devinfo(ahd, devinfo); 2097 printf("Forcing renegotiation\n"); 2098 } 2099 #endif 2100 targ_info = ahd_fetch_transinfo(ahd, 2101 devinfo->channel, 2102 devinfo->our_scsiid, 2103 devinfo->target, 2104 &tstate); 2105 ahd_update_neg_request(ahd, devinfo, tstate, 2106 targ_info, AHD_NEG_IF_NON_ASYNC); 2107 } 2108 2109 #define AHD_MAX_STEPS 2000 2110 void 2111 ahd_clear_critical_section(struct ahd_softc *ahd) 2112 { 2113 ahd_mode_state saved_modes; 2114 int stepping; 2115 int steps; 2116 int first_instr; 2117 u_int simode0; 2118 u_int simode1; 2119 u_int simode3; 2120 u_int lqimode0; 2121 u_int lqimode1; 2122 u_int lqomode0; 2123 u_int lqomode1; 2124 2125 if (ahd->num_critical_sections == 0) 2126 return; 2127 2128 stepping = FALSE; 2129 steps = 0; 2130 first_instr = 0; 2131 simode0 = 0; 2132 simode1 = 0; 2133 simode3 = 0; 2134 lqimode0 = 0; 2135 lqimode1 = 0; 2136 lqomode0 = 0; 2137 lqomode1 = 0; 2138 saved_modes = ahd_save_modes(ahd); 2139 for (;;) { 2140 struct cs *cs; 2141 u_int seqaddr; 2142 u_int i; 2143 2144 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2145 seqaddr = ahd_inb(ahd, CURADDR) 2146 | (ahd_inb(ahd, CURADDR+1) << 8); 2147 2148 cs = ahd->critical_sections; 2149 for (i = 0; i < ahd->num_critical_sections; i++, cs++) { 2150 2151 if (cs->begin < seqaddr && cs->end >= seqaddr) 2152 break; 2153 } 2154 2155 if (i == ahd->num_critical_sections) 2156 break; 2157 2158 if (steps > AHD_MAX_STEPS) { 2159 printf("%s: Infinite loop in critical section\n" 2160 "%s: First Instruction 0x%x now 0x%x\n", 2161 ahd_name(ahd), ahd_name(ahd), first_instr, 2162 seqaddr); 2163 ahd_dump_card_state(ahd); 2164 panic("critical section loop"); 2165 } 2166 2167 steps++; 2168 #ifdef AHD_DEBUG 2169 if ((ahd_debug & AHD_SHOW_MISC) != 0) 2170 printf("%s: Single stepping at 0x%x\n", ahd_name(ahd), 2171 seqaddr); 2172 #endif 2173 if (stepping == FALSE) { 2174 2175 first_instr = seqaddr; 2176 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 2177 simode0 = ahd_inb(ahd, SIMODE0); 2178 simode3 = ahd_inb(ahd, SIMODE3); 2179 lqimode0 = ahd_inb(ahd, LQIMODE0); 2180 lqimode1 = ahd_inb(ahd, LQIMODE1); 2181 lqomode0 = ahd_inb(ahd, LQOMODE0); 2182 lqomode1 = ahd_inb(ahd, LQOMODE1); 2183 ahd_outb(ahd, SIMODE0, 0); 2184 ahd_outb(ahd, SIMODE3, 0); 2185 ahd_outb(ahd, LQIMODE0, 0); 2186 ahd_outb(ahd, LQIMODE1, 0); 2187 ahd_outb(ahd, LQOMODE0, 0); 2188 ahd_outb(ahd, LQOMODE1, 0); 2189 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2190 simode1 = ahd_inb(ahd, SIMODE1); 2191 ahd_outb(ahd, SIMODE1, ENBUSFREE); 2192 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); 2193 stepping = TRUE; 2194 } 2195 ahd_outb(ahd, CLRSINT1, CLRBUSFREE); 2196 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2197 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 2198 ahd_outb(ahd, HCNTRL, ahd->unpause); 2199 do { 2200 ahd_delay(200); 2201 } while (!ahd_is_paused(ahd)); 2202 ahd_update_modes(ahd); 2203 } 2204 if (stepping) { 2205 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 2206 ahd_outb(ahd, SIMODE0, simode0); 2207 ahd_outb(ahd, SIMODE3, simode3); 2208 ahd_outb(ahd, LQIMODE0, lqimode0); 2209 ahd_outb(ahd, LQIMODE1, lqimode1); 2210 ahd_outb(ahd, LQOMODE0, lqomode0); 2211 ahd_outb(ahd, LQOMODE1, lqomode1); 2212 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2213 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); 2214 ahd_outb(ahd, SIMODE1, simode1); 2215 /* 2216 * SCSIINT seems to glitch occassionally when 2217 * the interrupt masks are restored. Clear SCSIINT 2218 * one more time so that only persistent errors 2219 * are seen as a real interrupt. 2220 */ 2221 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2222 } 2223 ahd_restore_modes(ahd, saved_modes); 2224 } 2225 2226 /* 2227 * Clear any pending interrupt status. 2228 */ 2229 void 2230 ahd_clear_intstat(struct ahd_softc *ahd) 2231 { 2232 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 2233 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 2234 /* Clear any interrupt conditions this may have caused */ 2235 ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 2236 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); 2237 ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT 2238 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI 2239 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); 2240 ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ 2241 |CLRLQOATNPKT|CLRLQOTCRC); 2242 ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS 2243 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); 2244 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { 2245 ahd_outb(ahd, CLRLQOINT0, 0); 2246 ahd_outb(ahd, CLRLQOINT1, 0); 2247 } 2248 ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); 2249 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 2250 |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); 2251 ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO 2252 |CLRIOERR|CLROVERRUN); 2253 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2254 } 2255 2256 /**************************** Debugging Routines ******************************/ 2257 #ifdef AHD_DEBUG 2258 uint32_t ahd_debug = AHD_DEBUG_OPTS; 2259 #endif 2260 void 2261 ahd_print_scb(struct scb *scb) 2262 { 2263 struct hardware_scb *hscb; 2264 int i; 2265 2266 hscb = scb->hscb; 2267 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 2268 (void *)scb, 2269 hscb->control, 2270 hscb->scsiid, 2271 hscb->lun, 2272 hscb->cdb_len); 2273 printf("Shared Data: "); 2274 for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) 2275 printf("%#02x", hscb->shared_data.idata.cdb[i]); 2276 printf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", 2277 (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), 2278 (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), 2279 ahd_le32toh(hscb->datacnt), 2280 ahd_le32toh(hscb->sgptr), 2281 SCB_GET_TAG(scb)); 2282 ahd_dump_sglist(scb); 2283 } 2284 2285 void 2286 ahd_dump_sglist(struct scb *scb) 2287 { 2288 int i; 2289 2290 if (scb->sg_count > 0) { 2291 if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { 2292 struct ahd_dma64_seg *sg_list; 2293 2294 sg_list = (struct ahd_dma64_seg*)scb->sg_list; 2295 for (i = 0; i < scb->sg_count; i++) { 2296 uint64_t addr; 2297 uint32_t len; 2298 2299 addr = ahd_le64toh(sg_list[i].addr); 2300 len = ahd_le32toh(sg_list[i].len); 2301 printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", 2302 i, 2303 (uint32_t)((addr >> 32) & 0xFFFFFFFF), 2304 (uint32_t)(addr & 0xFFFFFFFF), 2305 sg_list[i].len & AHD_SG_LEN_MASK, 2306 (sg_list[i].len & AHD_DMA_LAST_SEG) 2307 ? " Last" : ""); 2308 } 2309 } else { 2310 struct ahd_dma_seg *sg_list; 2311 2312 sg_list = (struct ahd_dma_seg*)scb->sg_list; 2313 for (i = 0; i < scb->sg_count; i++) { 2314 uint32_t len; 2315 2316 len = ahd_le32toh(sg_list[i].len); 2317 printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", 2318 i, 2319 (len >> 24) & SG_HIGH_ADDR_BITS, 2320 ahd_le32toh(sg_list[i].addr), 2321 len & AHD_SG_LEN_MASK, 2322 len & AHD_DMA_LAST_SEG ? " Last" : ""); 2323 } 2324 } 2325 } 2326 } 2327 2328 /************************* Transfer Negotiation *******************************/ 2329 /* 2330 * Allocate per target mode instance (ID we respond to as a target) 2331 * transfer negotiation data structures. 2332 */ 2333 static struct ahd_tmode_tstate * 2334 ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) 2335 { 2336 struct ahd_tmode_tstate *master_tstate; 2337 struct ahd_tmode_tstate *tstate; 2338 int i; 2339 2340 master_tstate = ahd->enabled_targets[ahd->our_id]; 2341 if (ahd->enabled_targets[scsi_id] != NULL 2342 && ahd->enabled_targets[scsi_id] != master_tstate) 2343 panic("%s: ahd_alloc_tstate - Target already allocated", 2344 ahd_name(ahd)); 2345 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 2346 if (tstate == NULL) 2347 return (NULL); 2348 2349 /* 2350 * If we have allocated a master tstate, copy user settings from 2351 * the master tstate (taken from SRAM or the EEPROM) for this 2352 * channel, but reset our current and goal settings to async/narrow 2353 * until an initiator talks to us. 2354 */ 2355 if (master_tstate != NULL) { 2356 memcpy(tstate, master_tstate, sizeof(*tstate)); 2357 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 2358 for (i = 0; i < 16; i++) { 2359 memset(&tstate->transinfo[i].curr, 0, 2360 sizeof(tstate->transinfo[i].curr)); 2361 memset(&tstate->transinfo[i].goal, 0, 2362 sizeof(tstate->transinfo[i].goal)); 2363 } 2364 } else 2365 memset(tstate, 0, sizeof(*tstate)); 2366 ahd->enabled_targets[scsi_id] = tstate; 2367 return (tstate); 2368 } 2369 2370 #ifdef AHD_TARGET_MODE 2371 /* 2372 * Free per target mode instance (ID we respond to as a target) 2373 * transfer negotiation data structures. 2374 */ 2375 static void 2376 ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) 2377 { 2378 struct ahd_tmode_tstate *tstate; 2379 2380 /* 2381 * Don't clean up our "master" tstate. 2382 * It has our default user settings. 2383 */ 2384 if (scsi_id == ahd->our_id 2385 && force == FALSE) 2386 return; 2387 2388 tstate = ahd->enabled_targets[scsi_id]; 2389 if (tstate != NULL) 2390 free(tstate, M_DEVBUF); 2391 ahd->enabled_targets[scsi_id] = NULL; 2392 } 2393 #endif 2394 2395 /* 2396 * Called when we have an active connection to a target on the bus, 2397 * this function finds the nearest period to the input period limited 2398 * by the capabilities of the bus connectivity of and sync settings for 2399 * the target. 2400 */ 2401 void 2402 ahd_devlimited_syncrate(struct ahd_softc *ahd, 2403 struct ahd_initiator_tinfo *tinfo, 2404 u_int *period, u_int *ppr_options, role_t role) 2405 { 2406 struct ahd_transinfo *transinfo; 2407 u_int maxsync; 2408 2409 if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 2410 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { 2411 maxsync = AHD_SYNCRATE_PACED; 2412 } else { 2413 maxsync = AHD_SYNCRATE_ULTRA; 2414 /* Can't do DT related options on an SE bus */ 2415 *ppr_options &= MSG_EXT_PPR_QAS_REQ; 2416 } 2417 /* 2418 * Never allow a value higher than our current goal 2419 * period otherwise we may allow a target initiated 2420 * negotiation to go above the limit as set by the 2421 * user. In the case of an initiator initiated 2422 * sync negotiation, we limit based on the user 2423 * setting. This allows the system to still accept 2424 * incoming negotiations even if target initiated 2425 * negotiation is not performed. 2426 */ 2427 if (role == ROLE_TARGET) 2428 transinfo = &tinfo->user; 2429 else 2430 transinfo = &tinfo->goal; 2431 *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); 2432 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 2433 maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2); 2434 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 2435 } 2436 if (transinfo->period == 0) { 2437 *period = 0; 2438 *ppr_options = 0; 2439 } else { 2440 *period = MAX(*period, transinfo->period); 2441 ahd_find_syncrate(ahd, period, ppr_options, maxsync); 2442 } 2443 } 2444 2445 /* 2446 * Look up the valid period to SCSIRATE conversion in our table. 2447 * Return the period and offset that should be sent to the target 2448 * if this was the beginning of an SDTR. 2449 */ 2450 void 2451 ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, 2452 u_int *ppr_options, u_int maxsync) 2453 { 2454 if (*period < maxsync) 2455 *period = maxsync; 2456 2457 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 2458 && *period > AHD_SYNCRATE_MIN_DT) 2459 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 2460 2461 if (*period > AHD_SYNCRATE_MIN) 2462 *period = 0; 2463 2464 /* Honor PPR option conformance rules. */ 2465 if (*period > AHD_SYNCRATE_PACED) 2466 *ppr_options &= ~MSG_EXT_PPR_RTI; 2467 2468 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) 2469 *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); 2470 2471 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) 2472 *ppr_options &= MSG_EXT_PPR_QAS_REQ; 2473 2474 /* Skip all PACED only entries if IU is not available */ 2475 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 2476 && *period < AHD_SYNCRATE_DT) 2477 *period = AHD_SYNCRATE_DT; 2478 2479 /* Skip all DT only entries if DT is not available */ 2480 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 2481 && *period < AHD_SYNCRATE_ULTRA2) 2482 *period = AHD_SYNCRATE_ULTRA2; 2483 } 2484 2485 /* 2486 * Truncate the given synchronous offset to a value the 2487 * current adapter type and syncrate are capable of. 2488 */ 2489 void 2490 ahd_validate_offset(struct ahd_softc *ahd, 2491 struct ahd_initiator_tinfo *tinfo, 2492 u_int period, u_int *offset, int wide, 2493 role_t role) 2494 { 2495 u_int maxoffset; 2496 2497 /* Limit offset to what we can do */ 2498 if (period == 0) 2499 maxoffset = 0; 2500 else if (period <= AHD_SYNCRATE_PACED) { 2501 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) 2502 maxoffset = MAX_OFFSET_PACED_BUG; 2503 else 2504 maxoffset = MAX_OFFSET_PACED; 2505 } else 2506 maxoffset = MAX_OFFSET_NON_PACED; 2507 *offset = MIN(*offset, maxoffset); 2508 if (tinfo != NULL) { 2509 if (role == ROLE_TARGET) 2510 *offset = MIN(*offset, tinfo->user.offset); 2511 else 2512 *offset = MIN(*offset, tinfo->goal.offset); 2513 } 2514 } 2515 2516 /* 2517 * Truncate the given transfer width parameter to a value the 2518 * current adapter type is capable of. 2519 */ 2520 void 2521 ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, 2522 u_int *bus_width, role_t role) 2523 { 2524 switch (*bus_width) { 2525 default: 2526 if (ahd->features & AHD_WIDE) { 2527 /* Respond Wide */ 2528 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2529 break; 2530 } 2531 /* FALLTHROUGH */ 2532 case MSG_EXT_WDTR_BUS_8_BIT: 2533 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2534 break; 2535 } 2536 if (tinfo != NULL) { 2537 if (role == ROLE_TARGET) 2538 *bus_width = MIN(tinfo->user.width, *bus_width); 2539 else 2540 *bus_width = MIN(tinfo->goal.width, *bus_width); 2541 } 2542 } 2543 2544 /* 2545 * Update the bitmask of targets for which the controller should 2546 * negotiate with at the next convenient oportunity. This currently 2547 * means the next time we send the initial identify messages for 2548 * a new transaction. 2549 */ 2550 int 2551 ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 2552 struct ahd_tmode_tstate *tstate, 2553 struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) 2554 { 2555 u_int auto_negotiate_orig; 2556 2557 auto_negotiate_orig = tstate->auto_negotiate; 2558 if (neg_type == AHD_NEG_ALWAYS) { 2559 /* 2560 * Force our "current" settings to be 2561 * unknown so that unless a bus reset 2562 * occurs the need to renegotiate is 2563 * recorded persistently. 2564 */ 2565 if ((ahd->features & AHD_WIDE) != 0) 2566 tinfo->curr.width = AHD_WIDTH_UNKNOWN; 2567 tinfo->curr.period = AHD_PERIOD_UNKNOWN; 2568 tinfo->curr.offset = AHD_OFFSET_UNKNOWN; 2569 } 2570 if (tinfo->curr.period != tinfo->goal.period 2571 || tinfo->curr.width != tinfo->goal.width 2572 || tinfo->curr.offset != tinfo->goal.offset 2573 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 2574 || (neg_type == AHD_NEG_IF_NON_ASYNC 2575 && (tinfo->goal.offset != 0 2576 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 2577 || tinfo->goal.ppr_options != 0))) 2578 tstate->auto_negotiate |= devinfo->target_mask; 2579 else 2580 tstate->auto_negotiate &= ~devinfo->target_mask; 2581 2582 return (auto_negotiate_orig != tstate->auto_negotiate); 2583 } 2584 2585 /* 2586 * Update the user/goal/curr tables of synchronous negotiation 2587 * parameters as well as, in the case of a current or active update, 2588 * any data structures on the host controller. In the case of an 2589 * active update, the specified target is currently talking to us on 2590 * the bus, so the transfer parameter update must take effect 2591 * immediately. 2592 */ 2593 void 2594 ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 2595 u_int period, u_int offset, u_int ppr_options, 2596 u_int type, int paused) 2597 { 2598 struct ahd_initiator_tinfo *tinfo; 2599 struct ahd_tmode_tstate *tstate; 2600 u_int old_period; 2601 u_int old_offset; 2602 u_int old_ppr; 2603 int active; 2604 int update_needed; 2605 2606 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; 2607 update_needed = 0; 2608 2609 if (period == 0 || offset == 0) { 2610 period = 0; 2611 offset = 0; 2612 } 2613 2614 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 2615 devinfo->target, &tstate); 2616 2617 if ((type & AHD_TRANS_USER) != 0) { 2618 tinfo->user.period = period; 2619 tinfo->user.offset = offset; 2620 tinfo->user.ppr_options = ppr_options; 2621 } 2622 2623 if ((type & AHD_TRANS_GOAL) != 0) { 2624 tinfo->goal.period = period; 2625 tinfo->goal.offset = offset; 2626 tinfo->goal.ppr_options = ppr_options; 2627 } 2628 2629 old_period = tinfo->curr.period; 2630 old_offset = tinfo->curr.offset; 2631 old_ppr = tinfo->curr.ppr_options; 2632 2633 if ((type & AHD_TRANS_CUR) != 0 2634 && (old_period != period 2635 || old_offset != offset 2636 || old_ppr != ppr_options)) { 2637 2638 update_needed++; 2639 2640 tinfo->curr.period = period; 2641 tinfo->curr.offset = offset; 2642 tinfo->curr.ppr_options = ppr_options; 2643 2644 ahd_send_async(ahd, devinfo->channel, devinfo->target, 2645 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2646 if (bootverbose) { 2647 if (offset != 0) { 2648 int options; 2649 2650 printf("%s: target %d synchronous with " 2651 "period = 0x%x, offset = 0x%x", 2652 ahd_name(ahd), devinfo->target, 2653 period, offset); 2654 options = 0; 2655 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { 2656 printf("(RDSTRM"); 2657 options++; 2658 } 2659 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { 2660 printf("%s", options ? "|DT" : "(DT"); 2661 options++; 2662 } 2663 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 2664 printf("%s", options ? "|IU" : "(IU"); 2665 options++; 2666 } 2667 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { 2668 printf("%s", options ? "|RTI" : "(RTI"); 2669 options++; 2670 } 2671 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { 2672 printf("%s", options ? "|QAS" : "(QAS"); 2673 options++; 2674 } 2675 if (options != 0) 2676 printf(")\n"); 2677 else 2678 printf("\n"); 2679 } else { 2680 printf("%s: target %d using " 2681 "asynchronous transfers%s\n", 2682 ahd_name(ahd), devinfo->target, 2683 (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 2684 ? "(QAS)" : ""); 2685 } 2686 } 2687 } 2688 /* 2689 * Always refresh the neg-table to handle the case of the 2690 * sequencer setting the ENATNO bit for a MK_MESSAGE request. 2691 * We will always renegotiate in that case if this is a 2692 * packetized request. Also manage the busfree expected flag 2693 * from this common routine so that we catch changes due to 2694 * WDTR or SDTR messages. 2695 */ 2696 if ((type & AHD_TRANS_CUR) != 0) { 2697 if (!paused) 2698 ahd_pause(ahd); 2699 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); 2700 if (!paused) 2701 ahd_unpause(ahd); 2702 if (ahd->msg_type != MSG_TYPE_NONE) { 2703 if ((old_ppr & MSG_EXT_PPR_IU_REQ) 2704 != (ppr_options & MSG_EXT_PPR_IU_REQ)) { 2705 #ifdef AHD_DEBUG 2706 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 2707 ahd_print_devinfo(ahd, devinfo); 2708 printf("Expecting IU Change busfree\n"); 2709 } 2710 #endif 2711 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE 2712 | MSG_FLAG_IU_REQ_CHANGED; 2713 } 2714 if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { 2715 #ifdef AHD_DEBUG 2716 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 2717 printf("PPR with IU_REQ outstanding\n"); 2718 #endif 2719 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; 2720 } 2721 } 2722 } 2723 2724 update_needed += ahd_update_neg_request(ahd, devinfo, tstate, 2725 tinfo, AHD_NEG_TO_GOAL); 2726 2727 if (update_needed && active) 2728 ahd_update_pending_scbs(ahd); 2729 } 2730 2731 /* 2732 * Update the user/goal/curr tables of wide negotiation 2733 * parameters as well as, in the case of a current or active update, 2734 * any data structures on the host controller. In the case of an 2735 * active update, the specified target is currently talking to us on 2736 * the bus, so the transfer parameter update must take effect 2737 * immediately. 2738 */ 2739 void 2740 ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 2741 u_int width, u_int type, int paused) 2742 { 2743 struct ahd_initiator_tinfo *tinfo; 2744 struct ahd_tmode_tstate *tstate; 2745 u_int oldwidth; 2746 int active; 2747 int update_needed; 2748 2749 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; 2750 update_needed = 0; 2751 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 2752 devinfo->target, &tstate); 2753 2754 if ((type & AHD_TRANS_USER) != 0) 2755 tinfo->user.width = width; 2756 2757 if ((type & AHD_TRANS_GOAL) != 0) 2758 tinfo->goal.width = width; 2759 2760 oldwidth = tinfo->curr.width; 2761 if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { 2762 2763 update_needed++; 2764 2765 tinfo->curr.width = width; 2766 ahd_send_async(ahd, devinfo->channel, devinfo->target, 2767 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2768 if (bootverbose) { 2769 printf("%s: target %d using %dbit transfers\n", 2770 ahd_name(ahd), devinfo->target, 2771 8 * (0x01 << width)); 2772 } 2773 } 2774 2775 if ((type & AHD_TRANS_CUR) != 0) { 2776 if (!paused) 2777 ahd_pause(ahd); 2778 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); 2779 if (!paused) 2780 ahd_unpause(ahd); 2781 } 2782 2783 update_needed += ahd_update_neg_request(ahd, devinfo, tstate, 2784 tinfo, AHD_NEG_TO_GOAL); 2785 if (update_needed && active) 2786 ahd_update_pending_scbs(ahd); 2787 2788 } 2789 2790 /* 2791 * Update the current state of tagged queuing for a given target. 2792 */ 2793 void 2794 ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 2795 ahd_queue_alg alg) 2796 { 2797 ahd_platform_set_tags(ahd, devinfo, alg); 2798 ahd_send_async(ahd, devinfo->channel, devinfo->target, 2799 devinfo->lun, AC_TRANSFER_NEG, &alg); 2800 } 2801 2802 static void 2803 ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 2804 struct ahd_transinfo *tinfo) 2805 { 2806 ahd_mode_state saved_modes; 2807 u_int period; 2808 u_int ppr_opts; 2809 u_int con_opts; 2810 u_int offset; 2811 u_int saved_negoaddr; 2812 uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; 2813 2814 saved_modes = ahd_save_modes(ahd); 2815 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2816 2817 saved_negoaddr = ahd_inb(ahd, NEGOADDR); 2818 ahd_outb(ahd, NEGOADDR, devinfo->target); 2819 period = tinfo->period; 2820 offset = tinfo->offset; 2821 memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); 2822 ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ 2823 |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); 2824 con_opts = 0; 2825 if (period == 0) 2826 period = AHD_SYNCRATE_ASYNC; 2827 if (period == AHD_SYNCRATE_160) { 2828 2829 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { 2830 /* 2831 * When the SPI4 spec was finalized, PACE transfers 2832 * was not made a configurable option in the PPR 2833 * message. Instead it is assumed to be enabled for 2834 * any syncrate faster than 80MHz. Nevertheless, 2835 * Harpoon2A4 allows this to be configurable. 2836 * 2837 * Harpoon2A4 also assumes at most 2 data bytes per 2838 * negotiated REQ/ACK offset. Paced transfers take 2839 * 4, so we must adjust our offset. 2840 */ 2841 ppr_opts |= PPROPT_PACE; 2842 offset *= 2; 2843 2844 /* 2845 * Harpoon2A assumed that there would be a 2846 * fallback rate between 160MHz and 80Mhz, 2847 * so 7 is used as the period factor rather 2848 * than 8 for 160MHz. 2849 */ 2850 period = AHD_SYNCRATE_REVA_160; 2851 } 2852 if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) 2853 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= 2854 ~AHD_PRECOMP_MASK; 2855 } else { 2856 /* 2857 * Precomp should be disabled for non-paced transfers. 2858 */ 2859 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; 2860 2861 if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 2862 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0) { 2863 /* 2864 * Slow down our CRC interval to be 2865 * compatible with devices that can't 2866 * handle a CRC at full speed. 2867 */ 2868 con_opts |= ENSLOWCRC; 2869 } 2870 } 2871 2872 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); 2873 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); 2874 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); 2875 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); 2876 2877 ahd_outb(ahd, NEGPERIOD, period); 2878 ahd_outb(ahd, NEGPPROPTS, ppr_opts); 2879 ahd_outb(ahd, NEGOFFSET, offset); 2880 2881 if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) 2882 con_opts |= WIDEXFER; 2883 2884 /* 2885 * During packetized transfers, the target will 2886 * give us the oportunity to send command packets 2887 * without us asserting attention. 2888 */ 2889 if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) 2890 con_opts |= ENAUTOATNO; 2891 ahd_outb(ahd, NEGCONOPTS, con_opts); 2892 ahd_outb(ahd, NEGOADDR, saved_negoaddr); 2893 ahd_restore_modes(ahd, saved_modes); 2894 } 2895 2896 /* 2897 * When the transfer settings for a connection change, setup for 2898 * negotiation in pending SCBs to effect the change as quickly as 2899 * possible. We also cancel any negotiations that are scheduled 2900 * for inflight SCBs that have not been started yet. 2901 */ 2902 static void 2903 ahd_update_pending_scbs(struct ahd_softc *ahd) 2904 { 2905 struct scb *pending_scb; 2906 int pending_scb_count; 2907 int i; 2908 int paused; 2909 u_int saved_scbptr; 2910 ahd_mode_state saved_modes; 2911 2912 /* 2913 * Traverse the pending SCB list and ensure that all of the 2914 * SCBs there have the proper settings. We can only safely 2915 * clear the negotiation required flag (setting requires the 2916 * execution queue to be modified) and this is only possible 2917 * if we are not already attempting to select out for this 2918 * SCB. For this reason, all callers only call this routine 2919 * if we are changing the negotiation settings for the currently 2920 * active transaction on the bus. 2921 */ 2922 pending_scb_count = 0; 2923 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { 2924 struct ahd_devinfo devinfo; 2925 struct hardware_scb *pending_hscb; 2926 struct ahd_initiator_tinfo *tinfo; 2927 struct ahd_tmode_tstate *tstate; 2928 2929 ahd_scb_devinfo(ahd, &devinfo, pending_scb); 2930 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 2931 devinfo.our_scsiid, 2932 devinfo.target, &tstate); 2933 pending_hscb = pending_scb->hscb; 2934 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2935 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2936 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2937 pending_hscb->control &= ~MK_MESSAGE; 2938 } 2939 ahd_sync_scb(ahd, pending_scb, 2940 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2941 pending_scb_count++; 2942 } 2943 2944 if (pending_scb_count == 0) 2945 return; 2946 2947 if (ahd_is_paused(ahd)) { 2948 paused = 1; 2949 } else { 2950 paused = 0; 2951 ahd_pause(ahd); 2952 } 2953 2954 /* 2955 * Force the sequencer to reinitialize the selection for 2956 * the command at the head of the execution queue if it 2957 * has already been setup. The negotiation changes may 2958 * effect whether we select-out with ATN. 2959 */ 2960 saved_modes = ahd_save_modes(ahd); 2961 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2962 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 2963 saved_scbptr = ahd_get_scbptr(ahd); 2964 /* Ensure that the hscbs down on the card match the new information */ 2965 for (i = 0; i < ahd->scb_data.maxhscbs; i++) { 2966 struct hardware_scb *pending_hscb; 2967 u_int control; 2968 u_int scb_tag; 2969 2970 ahd_set_scbptr(ahd, i); 2971 scb_tag = i; 2972 pending_scb = ahd_lookup_scb(ahd, scb_tag); 2973 if (pending_scb == NULL) 2974 continue; 2975 2976 pending_hscb = pending_scb->hscb; 2977 control = ahd_inb_scbram(ahd, SCB_CONTROL); 2978 control &= ~MK_MESSAGE; 2979 control |= pending_hscb->control & MK_MESSAGE; 2980 ahd_outb(ahd, SCB_CONTROL, control); 2981 } 2982 ahd_set_scbptr(ahd, saved_scbptr); 2983 ahd_restore_modes(ahd, saved_modes); 2984 2985 if (paused == 0) 2986 ahd_unpause(ahd); 2987 } 2988 2989 /**************************** Pathing Information *****************************/ 2990 static void 2991 ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 2992 { 2993 ahd_mode_state saved_modes; 2994 u_int saved_scsiid; 2995 role_t role; 2996 int our_id; 2997 2998 saved_modes = ahd_save_modes(ahd); 2999 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3000 3001 if (ahd_inb(ahd, SSTAT0) & TARGET) 3002 role = ROLE_TARGET; 3003 else 3004 role = ROLE_INITIATOR; 3005 3006 if (role == ROLE_TARGET 3007 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 3008 /* We were selected, so pull our id from TARGIDIN */ 3009 our_id = ahd_inb(ahd, TARGIDIN) & OID; 3010 } else if (role == ROLE_TARGET) 3011 our_id = ahd_inb(ahd, TOWNID); 3012 else 3013 our_id = ahd_inb(ahd, IOWNID); 3014 3015 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); 3016 ahd_compile_devinfo(devinfo, 3017 our_id, 3018 SCSIID_TARGET(ahd, saved_scsiid), 3019 ahd_inb(ahd, SAVED_LUN), 3020 SCSIID_CHANNEL(ahd, saved_scsiid), 3021 role); 3022 ahd_restore_modes(ahd, saved_modes); 3023 } 3024 3025 void 3026 ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 3027 { 3028 printf("%s:%c:%d:%d: ", ahd_name(ahd), 'A', 3029 devinfo->target, devinfo->lun); 3030 } 3031 3032 struct ahd_phase_table_entry* 3033 ahd_lookup_phase_entry(int phase) 3034 { 3035 struct ahd_phase_table_entry *entry; 3036 struct ahd_phase_table_entry *last_entry; 3037 3038 /* 3039 * num_phases doesn't include the default entry which 3040 * will be returned if the phase doesn't match. 3041 */ 3042 last_entry = &ahd_phase_table[num_phases]; 3043 for (entry = ahd_phase_table; entry < last_entry; entry++) { 3044 if (phase == entry->phase) 3045 break; 3046 } 3047 return (entry); 3048 } 3049 3050 void 3051 ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, 3052 u_int lun, char channel, role_t role) 3053 { 3054 devinfo->our_scsiid = our_id; 3055 devinfo->target = target; 3056 devinfo->lun = lun; 3057 devinfo->target_offset = target; 3058 devinfo->channel = channel; 3059 devinfo->role = role; 3060 if (channel == 'B') 3061 devinfo->target_offset += 8; 3062 devinfo->target_mask = (0x01 << devinfo->target_offset); 3063 } 3064 3065 static void 3066 ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3067 struct scb *scb) 3068 { 3069 role_t role; 3070 int our_id; 3071 3072 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 3073 role = ROLE_INITIATOR; 3074 if ((scb->hscb->control & TARGET_SCB) != 0) 3075 role = ROLE_TARGET; 3076 ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), 3077 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); 3078 } 3079 3080 3081 /************************ Message Phase Processing ****************************/ 3082 /* 3083 * When an initiator transaction with the MK_MESSAGE flag either reconnects 3084 * or enters the initial message out phase, we are interrupted. Fill our 3085 * outgoing message buffer with the appropriate message and beging handing 3086 * the message phase(s) manually. 3087 */ 3088 static void 3089 ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3090 struct scb *scb) 3091 { 3092 /* 3093 * To facilitate adding multiple messages together, 3094 * each routine should increment the index and len 3095 * variables instead of setting them explicitly. 3096 */ 3097 ahd->msgout_index = 0; 3098 ahd->msgout_len = 0; 3099 3100 if (ahd_currently_packetized(ahd)) 3101 ahd->msg_flags |= MSG_FLAG_PACKETIZED; 3102 3103 if (ahd->send_msg_perror 3104 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { 3105 ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; 3106 ahd->msgout_len++; 3107 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3108 #ifdef AHD_DEBUG 3109 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3110 printf("Setting up for Parity Error delivery\n"); 3111 #endif 3112 return; 3113 } else if (scb == NULL) { 3114 printf("%s: WARNING. No pending message for " 3115 "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); 3116 ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; 3117 ahd->msgout_len++; 3118 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3119 return; 3120 } 3121 3122 if ((scb->flags & SCB_DEVICE_RESET) == 0 3123 && (scb->flags & SCB_PACKETIZED) == 0 3124 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { 3125 u_int identify_msg; 3126 3127 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 3128 if ((scb->hscb->control & DISCENB) != 0) 3129 identify_msg |= MSG_IDENTIFY_DISCFLAG; 3130 ahd->msgout_buf[ahd->msgout_index++] = identify_msg; 3131 ahd->msgout_len++; 3132 3133 if ((scb->hscb->control & TAG_ENB) != 0) { 3134 ahd->msgout_buf[ahd->msgout_index++] = 3135 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 3136 ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); 3137 ahd->msgout_len += 2; 3138 } 3139 } 3140 3141 if (scb->flags & SCB_DEVICE_RESET) { 3142 ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; 3143 ahd->msgout_len++; 3144 ahd_print_path(ahd, scb); 3145 printf("Bus Device Reset Message Sent\n"); 3146 /* 3147 * Clear our selection hardware in advance of 3148 * the busfree. We may have an entry in the waiting 3149 * Q for this target, and we don't want to go about 3150 * selecting while we handle the busfree and blow it 3151 * away. 3152 */ 3153 ahd_outb(ahd, SCSISEQ0, 0); 3154 } else if ((scb->flags & SCB_ABORT) != 0) { 3155 3156 if ((scb->hscb->control & TAG_ENB) != 0) { 3157 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; 3158 } else { 3159 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; 3160 } 3161 ahd->msgout_len++; 3162 ahd_print_path(ahd, scb); 3163 printf("Abort%s Message Sent\n", 3164 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 3165 /* 3166 * Clear our selection hardware in advance of 3167 * the busfree. We may have an entry in the waiting 3168 * Q for this target, and we don't want to go about 3169 * selecting while we handle the busfree and blow it 3170 * away. 3171 */ 3172 ahd_outb(ahd, SCSISEQ0, 0); 3173 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 3174 ahd_build_transfer_msg(ahd, devinfo); 3175 /* 3176 * Clear our selection hardware in advance of potential 3177 * PPR IU status change busfree. We may have an entry in 3178 * the waiting Q for this target, and we don't want to go 3179 * about selecting while we handle the busfree and blow 3180 * it away. 3181 */ 3182 ahd_outb(ahd, SCSISEQ0, 0); 3183 } else { 3184 printf("ahd_intr: AWAITING_MSG for an SCB that " 3185 "does not have a waiting message\n"); 3186 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 3187 devinfo->target_mask); 3188 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " 3189 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, 3190 ahd_inb(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), 3191 scb->flags); 3192 } 3193 3194 /* 3195 * Clear the MK_MESSAGE flag from the SCB so we aren't 3196 * asked to send this message again. 3197 */ 3198 ahd_outb(ahd, SCB_CONTROL, 3199 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); 3200 scb->hscb->control &= ~MK_MESSAGE; 3201 ahd->msgout_index = 0; 3202 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3203 } 3204 3205 /* 3206 * Build an appropriate transfer negotiation message for the 3207 * currently active target. 3208 */ 3209 static void 3210 ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 3211 { 3212 /* 3213 * We need to initiate transfer negotiations. 3214 * If our current and goal settings are identical, 3215 * we want to renegotiate due to a check condition. 3216 */ 3217 struct ahd_initiator_tinfo *tinfo; 3218 struct ahd_tmode_tstate *tstate; 3219 int dowide; 3220 int dosync; 3221 int doppr; 3222 u_int period; 3223 u_int ppr_options; 3224 u_int offset; 3225 3226 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 3227 devinfo->target, &tstate); 3228 /* 3229 * Filter our period based on the current connection. 3230 * If we can't perform DT transfers on this segment (not in LVD 3231 * mode for instance), then our decision to issue a PPR message 3232 * may change. 3233 */ 3234 period = tinfo->goal.period; 3235 ppr_options = tinfo->goal.ppr_options; 3236 /* Target initiated PPR is not allowed in the SCSI spec */ 3237 if (devinfo->role == ROLE_TARGET) 3238 ppr_options = 0; 3239 ahd_devlimited_syncrate(ahd, tinfo, &period, 3240 &ppr_options, devinfo->role); 3241 dowide = tinfo->curr.width != tinfo->goal.width; 3242 dosync = tinfo->curr.period != period; 3243 /* 3244 * Only use PPR if we have options that need it, even if the device 3245 * claims to support it. There might be an expander in the way 3246 * that doesn't. 3247 */ 3248 doppr = ppr_options != 0; 3249 3250 if (!dowide && !dosync && !doppr) { 3251 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 3252 dosync = tinfo->goal.period != 0; 3253 } 3254 3255 if (!dowide && !dosync && !doppr) { 3256 /* 3257 * Force async with a WDTR message if we have a wide bus, 3258 * or just issue an SDTR with a 0 offset. 3259 */ 3260 if ((ahd->features & AHD_WIDE) != 0) 3261 dowide = 1; 3262 else 3263 dosync = 1; 3264 3265 if (bootverbose) { 3266 ahd_print_devinfo(ahd, devinfo); 3267 printf("Ensuring async\n"); 3268 } 3269 } 3270 /* Target initiated PPR is not allowed in the SCSI spec */ 3271 if (devinfo->role == ROLE_TARGET) 3272 doppr = 0; 3273 3274 /* 3275 * Both the PPR message and SDTR message require the 3276 * goal syncrate to be limited to what the target device 3277 * is capable of handling (based on whether an LVD->SE 3278 * expander is on the bus), so combine these two cases. 3279 * Regardless, guarantee that if we are using WDTR and SDTR 3280 * messages that WDTR comes first. 3281 */ 3282 if (doppr || (dosync && !dowide)) { 3283 3284 offset = tinfo->goal.offset; 3285 ahd_validate_offset(ahd, tinfo, period, &offset, 3286 doppr ? tinfo->goal.width 3287 : tinfo->curr.width, 3288 devinfo->role); 3289 if (doppr) { 3290 ahd_construct_ppr(ahd, devinfo, period, offset, 3291 tinfo->goal.width, ppr_options); 3292 } else { 3293 ahd_construct_sdtr(ahd, devinfo, period, offset); 3294 } 3295 } else { 3296 ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); 3297 } 3298 } 3299 3300 /* 3301 * Build a synchronous negotiation message in our message 3302 * buffer based on the input parameters. 3303 */ 3304 static void 3305 ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3306 u_int period, u_int offset) 3307 { 3308 if (offset == 0) 3309 period = AHD_ASYNC_XFER_PERIOD; 3310 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; 3311 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN; 3312 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR; 3313 ahd->msgout_buf[ahd->msgout_index++] = period; 3314 ahd->msgout_buf[ahd->msgout_index++] = offset; 3315 ahd->msgout_len += 5; 3316 if (bootverbose) { 3317 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 3318 ahd_name(ahd), devinfo->channel, devinfo->target, 3319 devinfo->lun, period, offset); 3320 } 3321 } 3322 3323 /* 3324 * Build a wide negotiateion message in our message 3325 * buffer based on the input parameters. 3326 */ 3327 static void 3328 ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3329 u_int bus_width) 3330 { 3331 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; 3332 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN; 3333 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR; 3334 ahd->msgout_buf[ahd->msgout_index++] = bus_width; 3335 ahd->msgout_len += 4; 3336 if (bootverbose) { 3337 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 3338 ahd_name(ahd), devinfo->channel, devinfo->target, 3339 devinfo->lun, bus_width); 3340 } 3341 } 3342 3343 /* 3344 * Build a parallel protocol request message in our message 3345 * buffer based on the input parameters. 3346 */ 3347 static void 3348 ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3349 u_int period, u_int offset, u_int bus_width, 3350 u_int ppr_options) 3351 { 3352 /* 3353 * Always request precompensation from 3354 * the other target if we are running 3355 * at paced syncrates. 3356 */ 3357 if (period <= AHD_SYNCRATE_PACED) 3358 ppr_options |= MSG_EXT_PPR_PCOMP_EN; 3359 if (offset == 0) 3360 period = AHD_ASYNC_XFER_PERIOD; 3361 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; 3362 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN; 3363 ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR; 3364 ahd->msgout_buf[ahd->msgout_index++] = period; 3365 ahd->msgout_buf[ahd->msgout_index++] = 0; 3366 ahd->msgout_buf[ahd->msgout_index++] = offset; 3367 ahd->msgout_buf[ahd->msgout_index++] = bus_width; 3368 ahd->msgout_buf[ahd->msgout_index++] = ppr_options; 3369 ahd->msgout_len += 8; 3370 if (bootverbose) { 3371 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 3372 "offset %x, ppr_options %x\n", ahd_name(ahd), 3373 devinfo->channel, devinfo->target, devinfo->lun, 3374 bus_width, period, offset, ppr_options); 3375 } 3376 } 3377 3378 /* 3379 * Clear any active message state. 3380 */ 3381 static void 3382 ahd_clear_msg_state(struct ahd_softc *ahd) 3383 { 3384 ahd_mode_state saved_modes; 3385 3386 saved_modes = ahd_save_modes(ahd); 3387 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3388 ahd->send_msg_perror = 0; 3389 ahd->msg_flags = MSG_FLAG_NONE; 3390 ahd->msgout_len = 0; 3391 ahd->msgin_index = 0; 3392 ahd->msg_type = MSG_TYPE_NONE; 3393 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { 3394 /* 3395 * The target didn't care to respond to our 3396 * message request, so clear ATN. 3397 */ 3398 ahd_outb(ahd, CLRSINT1, CLRATNO); 3399 } 3400 ahd_outb(ahd, MSG_OUT, MSG_NOOP); 3401 ahd_outb(ahd, SEQ_FLAGS2, 3402 ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 3403 ahd_restore_modes(ahd, saved_modes); 3404 } 3405 3406 /* 3407 * Manual message loop handler. 3408 */ 3409 static void 3410 ahd_handle_message_phase(struct ahd_softc *ahd) 3411 { 3412 struct ahd_devinfo devinfo; 3413 u_int bus_phase; 3414 int end_session; 3415 3416 ahd_fetch_devinfo(ahd, &devinfo); 3417 end_session = FALSE; 3418 bus_phase = ahd_inb(ahd, LASTPHASE); 3419 3420 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { 3421 printf("LQIRETRY for LQIPHASE_OUTPKT\n"); 3422 ahd_outb(ahd, LQCTL2, LQIRETRY); 3423 } 3424 reswitch: 3425 switch (ahd->msg_type) { 3426 case MSG_TYPE_INITIATOR_MSGOUT: 3427 { 3428 int lastbyte; 3429 int phasemis; 3430 int msgdone; 3431 3432 if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) 3433 panic("HOST_MSG_LOOP interrupt with no active message"); 3434 3435 #ifdef AHD_DEBUG 3436 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3437 ahd_print_devinfo(ahd, &devinfo); 3438 printf("INITIATOR_MSG_OUT"); 3439 } 3440 #endif 3441 phasemis = bus_phase != P_MESGOUT; 3442 if (phasemis) { 3443 #ifdef AHD_DEBUG 3444 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3445 printf(" PHASEMIS %s\n", 3446 ahd_lookup_phase_entry(bus_phase) 3447 ->phasemsg); 3448 } 3449 #endif 3450 if (bus_phase == P_MESGIN) { 3451 /* 3452 * Change gears and see if 3453 * this messages is of interest to 3454 * us or should be passed back to 3455 * the sequencer. 3456 */ 3457 ahd_outb(ahd, CLRSINT1, CLRATNO); 3458 ahd->send_msg_perror = 0; 3459 ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; 3460 ahd->msgin_index = 0; 3461 goto reswitch; 3462 } 3463 end_session = TRUE; 3464 break; 3465 } 3466 3467 if (ahd->send_msg_perror) { 3468 ahd_outb(ahd, CLRSINT1, CLRATNO); 3469 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 3470 #ifdef AHD_DEBUG 3471 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3472 printf(" byte 0x%x\n", ahd->send_msg_perror); 3473 #endif 3474 /* 3475 * If we are notifying the target of a CRC error 3476 * during packetized operations, the target is 3477 * within its rights to acknowledge our message 3478 * with a busfree. 3479 */ 3480 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 3481 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) 3482 ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; 3483 3484 ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); 3485 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); 3486 break; 3487 } 3488 3489 msgdone = ahd->msgout_index == ahd->msgout_len; 3490 if (msgdone) { 3491 /* 3492 * The target has requested a retry. 3493 * Re-assert ATN, reset our message index to 3494 * 0, and try again. 3495 */ 3496 ahd->msgout_index = 0; 3497 ahd_assert_atn(ahd); 3498 } 3499 3500 lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); 3501 if (lastbyte) { 3502 /* Last byte is signified by dropping ATN */ 3503 ahd_outb(ahd, CLRSINT1, CLRATNO); 3504 } 3505 3506 /* 3507 * Clear our interrupt status and present 3508 * the next byte on the bus. 3509 */ 3510 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 3511 #ifdef AHD_DEBUG 3512 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3513 printf(" byte 0x%x\n", 3514 ahd->msgout_buf[ahd->msgout_index]); 3515 #endif 3516 ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); 3517 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); 3518 break; 3519 } 3520 case MSG_TYPE_INITIATOR_MSGIN: 3521 { 3522 int phasemis; 3523 int message_done; 3524 3525 #ifdef AHD_DEBUG 3526 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3527 ahd_print_devinfo(ahd, &devinfo); 3528 printf("INITIATOR_MSG_IN"); 3529 } 3530 #endif 3531 phasemis = bus_phase != P_MESGIN; 3532 if (phasemis) { 3533 #ifdef AHD_DEBUG 3534 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3535 printf(" PHASEMIS %s\n", 3536 ahd_lookup_phase_entry(bus_phase) 3537 ->phasemsg); 3538 } 3539 #endif 3540 ahd->msgin_index = 0; 3541 if (bus_phase == P_MESGOUT 3542 && (ahd->send_msg_perror != 0 3543 || (ahd->msgout_len != 0 3544 && ahd->msgout_index == 0))) { 3545 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3546 goto reswitch; 3547 } 3548 end_session = TRUE; 3549 break; 3550 } 3551 3552 /* Pull the byte in without acking it */ 3553 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); 3554 #ifdef AHD_DEBUG 3555 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3556 printf(" byte 0x%x\n", 3557 ahd->msgin_buf[ahd->msgin_index]); 3558 #endif 3559 3560 message_done = ahd_parse_msg(ahd, &devinfo); 3561 3562 if (message_done) { 3563 /* 3564 * Clear our incoming message buffer in case there 3565 * is another message following this one. 3566 */ 3567 ahd->msgin_index = 0; 3568 3569 /* 3570 * If this message illicited a response, 3571 * assert ATN so the target takes us to the 3572 * message out phase. 3573 */ 3574 if (ahd->msgout_len != 0) { 3575 #ifdef AHD_DEBUG 3576 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3577 ahd_print_devinfo(ahd, &devinfo); 3578 printf("Asserting ATN for response\n"); 3579 } 3580 #endif 3581 ahd_assert_atn(ahd); 3582 } 3583 } else 3584 ahd->msgin_index++; 3585 3586 if (message_done == MSGLOOP_TERMINATED) { 3587 end_session = TRUE; 3588 } else { 3589 /* Ack the byte */ 3590 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 3591 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); 3592 } 3593 break; 3594 } 3595 case MSG_TYPE_TARGET_MSGIN: 3596 { 3597 int msgdone; 3598 int msgout_request; 3599 3600 /* 3601 * By default, the message loop will continue. 3602 */ 3603 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 3604 3605 if (ahd->msgout_len == 0) 3606 panic("Target MSGIN with no active message"); 3607 3608 /* 3609 * If we interrupted a mesgout session, the initiator 3610 * will not know this until our first REQ. So, we 3611 * only honor mesgout requests after we've sent our 3612 * first byte. 3613 */ 3614 if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 3615 && ahd->msgout_index > 0) 3616 msgout_request = TRUE; 3617 else 3618 msgout_request = FALSE; 3619 3620 if (msgout_request) { 3621 3622 /* 3623 * Change gears and see if 3624 * this messages is of interest to 3625 * us or should be passed back to 3626 * the sequencer. 3627 */ 3628 ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; 3629 ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); 3630 ahd->msgin_index = 0; 3631 /* Dummy read to REQ for first byte */ 3632 ahd_inb(ahd, SCSIDAT); 3633 ahd_outb(ahd, SXFRCTL0, 3634 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 3635 break; 3636 } 3637 3638 msgdone = ahd->msgout_index == ahd->msgout_len; 3639 if (msgdone) { 3640 ahd_outb(ahd, SXFRCTL0, 3641 ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); 3642 end_session = TRUE; 3643 break; 3644 } 3645 3646 /* 3647 * Present the next byte on the bus. 3648 */ 3649 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); 3650 ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); 3651 break; 3652 } 3653 case MSG_TYPE_TARGET_MSGOUT: 3654 { 3655 int lastbyte; 3656 int msgdone; 3657 3658 /* 3659 * By default, the message loop will continue. 3660 */ 3661 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 3662 3663 /* 3664 * The initiator signals that this is 3665 * the last byte by dropping ATN. 3666 */ 3667 lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; 3668 3669 /* 3670 * Read the latched byte, but turn off SPIOEN first 3671 * so that we don't inadvertently cause a REQ for the 3672 * next byte. 3673 */ 3674 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); 3675 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); 3676 msgdone = ahd_parse_msg(ahd, &devinfo); 3677 if (msgdone == MSGLOOP_TERMINATED) { 3678 /* 3679 * The message is *really* done in that it caused 3680 * us to go to bus free. The sequencer has already 3681 * been reset at this point, so pull the ejection 3682 * handle. 3683 */ 3684 return; 3685 } 3686 3687 ahd->msgin_index++; 3688 3689 /* 3690 * XXX Read spec about initiator dropping ATN too soon 3691 * and use msgdone to detect it. 3692 */ 3693 if (msgdone == MSGLOOP_MSGCOMPLETE) { 3694 ahd->msgin_index = 0; 3695 3696 /* 3697 * If this message illicited a response, transition 3698 * to the Message in phase and send it. 3699 */ 3700 if (ahd->msgout_len != 0) { 3701 ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); 3702 ahd_outb(ahd, SXFRCTL0, 3703 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 3704 ahd->msg_type = MSG_TYPE_TARGET_MSGIN; 3705 ahd->msgin_index = 0; 3706 break; 3707 } 3708 } 3709 3710 if (lastbyte) 3711 end_session = TRUE; 3712 else { 3713 /* Ask for the next byte. */ 3714 ahd_outb(ahd, SXFRCTL0, 3715 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 3716 } 3717 3718 break; 3719 } 3720 default: 3721 panic("Unknown REQINIT message type"); 3722 } 3723 3724 if (end_session) { 3725 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { 3726 printf("%s: Returning to Idle Loop\n", 3727 ahd_name(ahd)); 3728 ahd_outb(ahd, LASTPHASE, P_BUSFREE); 3729 ahd_clear_msg_state(ahd); 3730 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); 3731 } else { 3732 ahd_clear_msg_state(ahd); 3733 ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); 3734 } 3735 } 3736 } 3737 3738 /* 3739 * See if we sent a particular extended message to the target. 3740 * If "full" is true, return true only if the target saw the full 3741 * message. If "full" is false, return true if the target saw at 3742 * least the first byte of the message. 3743 */ 3744 static int 3745 ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) 3746 { 3747 int found; 3748 u_int index; 3749 3750 found = FALSE; 3751 index = 0; 3752 3753 while (index < ahd->msgout_len) { 3754 if (ahd->msgout_buf[index] == MSG_EXTENDED) { 3755 u_int end_index; 3756 3757 end_index = index + 1 + ahd->msgout_buf[index + 1]; 3758 if (ahd->msgout_buf[index+2] == msgval 3759 && type == AHDMSG_EXT) { 3760 3761 if (full) { 3762 if (ahd->msgout_index > end_index) 3763 found = TRUE; 3764 } else if (ahd->msgout_index > index) 3765 found = TRUE; 3766 } 3767 index = end_index; 3768 } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK 3769 && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 3770 3771 /* Skip tag type and tag id or residue param*/ 3772 index += 2; 3773 } else { 3774 /* Single byte message */ 3775 if (type == AHDMSG_1B 3776 && ahd->msgout_index > index 3777 && (ahd->msgout_buf[index] == msgval 3778 || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 3779 && msgval == MSG_IDENTIFYFLAG))) 3780 found = TRUE; 3781 index++; 3782 } 3783 3784 if (found) 3785 break; 3786 } 3787 return (found); 3788 } 3789 3790 /* 3791 * Wait for a complete incoming message, parse it, and respond accordingly. 3792 */ 3793 static int 3794 ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 3795 { 3796 struct ahd_initiator_tinfo *tinfo; 3797 struct ahd_tmode_tstate *tstate; 3798 int reject; 3799 int done; 3800 int response; 3801 3802 done = MSGLOOP_IN_PROG; 3803 response = FALSE; 3804 reject = FALSE; 3805 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 3806 devinfo->target, &tstate); 3807 3808 /* 3809 * Parse as much of the message as is available, 3810 * rejecting it if we don't support it. When 3811 * the entire message is available and has been 3812 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3813 * that we have parsed an entire message. 3814 * 3815 * In the case of extended messages, we accept the length 3816 * byte outright and perform more checking once we know the 3817 * extended message type. 3818 */ 3819 switch (ahd->msgin_buf[0]) { 3820 case MSG_DISCONNECT: 3821 case MSG_SAVEDATAPOINTER: 3822 case MSG_CMDCOMPLETE: 3823 case MSG_RESTOREPOINTERS: 3824 case MSG_IGN_WIDE_RESIDUE: 3825 /* 3826 * End our message loop as these are messages 3827 * the sequencer handles on its own. 3828 */ 3829 done = MSGLOOP_TERMINATED; 3830 break; 3831 case MSG_MESSAGE_REJECT: 3832 response = ahd_handle_msg_reject(ahd, devinfo); 3833 /* FALLTHROUGH */ 3834 case MSG_NOOP: 3835 done = MSGLOOP_MSGCOMPLETE; 3836 break; 3837 case MSG_EXTENDED: 3838 { 3839 /* Wait for enough of the message to begin validation */ 3840 if (ahd->msgin_index < 2) 3841 break; 3842 switch (ahd->msgin_buf[2]) { 3843 case MSG_EXT_SDTR: 3844 { 3845 u_int period; 3846 u_int ppr_options; 3847 u_int offset; 3848 u_int saved_offset; 3849 3850 if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3851 reject = TRUE; 3852 break; 3853 } 3854 3855 /* 3856 * Wait until we have both args before validating 3857 * and acting on this message. 3858 * 3859 * Add one to MSG_EXT_SDTR_LEN to account for 3860 * the extended message preamble. 3861 */ 3862 if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3863 break; 3864 3865 period = ahd->msgin_buf[3]; 3866 ppr_options = 0; 3867 saved_offset = offset = ahd->msgin_buf[4]; 3868 ahd_devlimited_syncrate(ahd, tinfo, &period, 3869 &ppr_options, devinfo->role); 3870 ahd_validate_offset(ahd, tinfo, period, &offset, 3871 tinfo->curr.width, devinfo->role); 3872 if (bootverbose) { 3873 printf("(%s:%c:%d:%d): Received " 3874 "SDTR period %x, offset %x\n\t" 3875 "Filtered to period %x, offset %x\n", 3876 ahd_name(ahd), devinfo->channel, 3877 devinfo->target, devinfo->lun, 3878 ahd->msgin_buf[3], saved_offset, 3879 period, offset); 3880 } 3881 ahd_set_syncrate(ahd, devinfo, period, 3882 offset, ppr_options, 3883 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 3884 /*paused*/TRUE); 3885 3886 /* 3887 * See if we initiated Sync Negotiation 3888 * and didn't have to fall down to async 3889 * transfers. 3890 */ 3891 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { 3892 /* We started it */ 3893 if (saved_offset != offset) { 3894 /* Went too low - force async */ 3895 reject = TRUE; 3896 } 3897 } else { 3898 /* 3899 * Send our own SDTR in reply 3900 */ 3901 if (bootverbose 3902 && devinfo->role == ROLE_INITIATOR) { 3903 printf("(%s:%c:%d:%d): Target " 3904 "Initiated SDTR\n", 3905 ahd_name(ahd), devinfo->channel, 3906 devinfo->target, devinfo->lun); 3907 } 3908 ahd->msgout_index = 0; 3909 ahd->msgout_len = 0; 3910 ahd_construct_sdtr(ahd, devinfo, 3911 period, offset); 3912 ahd->msgout_index = 0; 3913 response = TRUE; 3914 } 3915 done = MSGLOOP_MSGCOMPLETE; 3916 break; 3917 } 3918 case MSG_EXT_WDTR: 3919 { 3920 u_int bus_width; 3921 u_int saved_width; 3922 u_int sending_reply; 3923 3924 sending_reply = FALSE; 3925 if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3926 reject = TRUE; 3927 break; 3928 } 3929 3930 /* 3931 * Wait until we have our arg before validating 3932 * and acting on this message. 3933 * 3934 * Add one to MSG_EXT_WDTR_LEN to account for 3935 * the extended message preamble. 3936 */ 3937 if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3938 break; 3939 3940 bus_width = ahd->msgin_buf[3]; 3941 saved_width = bus_width; 3942 ahd_validate_width(ahd, tinfo, &bus_width, 3943 devinfo->role); 3944 if (bootverbose) { 3945 printf("(%s:%c:%d:%d): Received WDTR " 3946 "%x filtered to %x\n", 3947 ahd_name(ahd), devinfo->channel, 3948 devinfo->target, devinfo->lun, 3949 saved_width, bus_width); 3950 } 3951 3952 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { 3953 /* 3954 * Don't send a WDTR back to the 3955 * target, since we asked first. 3956 * If the width went higher than our 3957 * request, reject it. 3958 */ 3959 if (saved_width > bus_width) { 3960 reject = TRUE; 3961 printf("(%s:%c:%d:%d): requested %dBit " 3962 "transfers. Rejecting...\n", 3963 ahd_name(ahd), devinfo->channel, 3964 devinfo->target, devinfo->lun, 3965 8 * (0x01 << bus_width)); 3966 bus_width = 0; 3967 } 3968 } else { 3969 /* 3970 * Send our own WDTR in reply 3971 */ 3972 if (bootverbose 3973 && devinfo->role == ROLE_INITIATOR) { 3974 printf("(%s:%c:%d:%d): Target " 3975 "Initiated WDTR\n", 3976 ahd_name(ahd), devinfo->channel, 3977 devinfo->target, devinfo->lun); 3978 } 3979 ahd->msgout_index = 0; 3980 ahd->msgout_len = 0; 3981 ahd_construct_wdtr(ahd, devinfo, bus_width); 3982 ahd->msgout_index = 0; 3983 response = TRUE; 3984 sending_reply = TRUE; 3985 } 3986 ahd_set_width(ahd, devinfo, bus_width, 3987 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 3988 /*paused*/TRUE); 3989 /* After a wide message, we are async */ 3990 ahd_set_syncrate(ahd, devinfo, /*period*/0, 3991 /*offset*/0, /*ppr_options*/0, 3992 AHD_TRANS_ACTIVE, /*paused*/TRUE); 3993 if (sending_reply == FALSE && reject == FALSE) { 3994 3995 if (tinfo->goal.offset) { 3996 ahd->msgout_index = 0; 3997 ahd->msgout_len = 0; 3998 ahd_build_transfer_msg(ahd, devinfo); 3999 ahd->msgout_index = 0; 4000 response = TRUE; 4001 } 4002 } 4003 done = MSGLOOP_MSGCOMPLETE; 4004 break; 4005 } 4006 case MSG_EXT_PPR: 4007 { 4008 u_int period; 4009 u_int offset; 4010 u_int bus_width; 4011 u_int ppr_options; 4012 u_int saved_width; 4013 u_int saved_offset; 4014 u_int saved_ppr_options; 4015 4016 if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { 4017 reject = TRUE; 4018 break; 4019 } 4020 4021 /* 4022 * Wait until we have all args before validating 4023 * and acting on this message. 4024 * 4025 * Add one to MSG_EXT_PPR_LEN to account for 4026 * the extended message preamble. 4027 */ 4028 if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) 4029 break; 4030 4031 period = ahd->msgin_buf[3]; 4032 offset = ahd->msgin_buf[5]; 4033 bus_width = ahd->msgin_buf[6]; 4034 saved_width = bus_width; 4035 ppr_options = ahd->msgin_buf[7]; 4036 /* 4037 * According to the spec, a DT only 4038 * period factor with no DT option 4039 * set implies async. 4040 */ 4041 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 4042 && period <= 9) 4043 offset = 0; 4044 saved_ppr_options = ppr_options; 4045 saved_offset = offset; 4046 4047 /* 4048 * Transfer options are only available if we 4049 * are negotiating wide. 4050 */ 4051 if (bus_width == 0) 4052 ppr_options &= MSG_EXT_PPR_QAS_REQ; 4053 4054 ahd_validate_width(ahd, tinfo, &bus_width, 4055 devinfo->role); 4056 ahd_devlimited_syncrate(ahd, tinfo, &period, 4057 &ppr_options, devinfo->role); 4058 ahd_validate_offset(ahd, tinfo, period, &offset, 4059 bus_width, devinfo->role); 4060 4061 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { 4062 /* 4063 * If we are unable to do any of the 4064 * requested options (we went too low), 4065 * then we'll have to reject the message. 4066 */ 4067 if (saved_width > bus_width 4068 || saved_offset != offset 4069 || saved_ppr_options != ppr_options) { 4070 reject = TRUE; 4071 period = 0; 4072 offset = 0; 4073 bus_width = 0; 4074 ppr_options = 0; 4075 } 4076 } else { 4077 if (devinfo->role != ROLE_TARGET) 4078 printf("(%s:%c:%d:%d): Target " 4079 "Initiated PPR\n", 4080 ahd_name(ahd), devinfo->channel, 4081 devinfo->target, devinfo->lun); 4082 else 4083 printf("(%s:%c:%d:%d): Initiator " 4084 "Initiated PPR\n", 4085 ahd_name(ahd), devinfo->channel, 4086 devinfo->target, devinfo->lun); 4087 ahd->msgout_index = 0; 4088 ahd->msgout_len = 0; 4089 ahd_construct_ppr(ahd, devinfo, period, offset, 4090 bus_width, ppr_options); 4091 ahd->msgout_index = 0; 4092 response = TRUE; 4093 } 4094 if (bootverbose) { 4095 printf("(%s:%c:%d:%d): Received PPR width %x, " 4096 "period %x, offset %x,options %x\n" 4097 "\tFiltered to width %x, period %x, " 4098 "offset %x, options %x\n", 4099 ahd_name(ahd), devinfo->channel, 4100 devinfo->target, devinfo->lun, 4101 saved_width, ahd->msgin_buf[3], 4102 saved_offset, saved_ppr_options, 4103 bus_width, period, offset, ppr_options); 4104 } 4105 ahd_set_width(ahd, devinfo, bus_width, 4106 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 4107 /*paused*/TRUE); 4108 ahd_set_syncrate(ahd, devinfo, period, 4109 offset, ppr_options, 4110 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 4111 /*paused*/TRUE); 4112 4113 done = MSGLOOP_MSGCOMPLETE; 4114 break; 4115 } 4116 default: 4117 /* Unknown extended message. Reject it. */ 4118 reject = TRUE; 4119 break; 4120 } 4121 break; 4122 } 4123 #ifdef AHD_TARGET_MODE 4124 case MSG_BUS_DEV_RESET: 4125 ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, 4126 CAM_BDR_SENT, 4127 "Bus Device Reset Received", 4128 /*verbose_level*/0); 4129 ahd_restart(ahd); 4130 done = MSGLOOP_TERMINATED; 4131 break; 4132 case MSG_ABORT_TAG: 4133 case MSG_ABORT: 4134 case MSG_CLEAR_QUEUE: 4135 { 4136 int tag; 4137 4138 /* Target mode messages */ 4139 if (devinfo->role != ROLE_TARGET) { 4140 reject = TRUE; 4141 break; 4142 } 4143 tag = SCB_LIST_NULL; 4144 if (ahd->msgin_buf[0] == MSG_ABORT_TAG) 4145 tag = ahd_inb(ahd, INITIATOR_TAG); 4146 ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, 4147 devinfo->lun, tag, ROLE_TARGET, 4148 CAM_REQ_ABORTED); 4149 4150 tstate = ahd->enabled_targets[devinfo->our_scsiid]; 4151 if (tstate != NULL) { 4152 struct ahd_tmode_lstate* lstate; 4153 4154 lstate = tstate->enabled_luns[devinfo->lun]; 4155 if (lstate != NULL) { 4156 ahd_queue_lstate_event(ahd, lstate, 4157 devinfo->our_scsiid, 4158 ahd->msgin_buf[0], 4159 /*arg*/tag); 4160 ahd_send_lstate_events(ahd, lstate); 4161 } 4162 } 4163 ahd_restart(ahd); 4164 done = MSGLOOP_TERMINATED; 4165 break; 4166 } 4167 #endif 4168 case MSG_QAS_REQUEST: 4169 #ifdef AHD_DEBUG 4170 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4171 printf("%s: QAS request. SCSISIGI == 0x%x\n", 4172 ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); 4173 #endif 4174 ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; 4175 /* FALLTHROUGH */ 4176 case MSG_TERM_IO_PROC: 4177 default: 4178 reject = TRUE; 4179 break; 4180 } 4181 4182 if (reject) { 4183 /* 4184 * Setup to reject the message. 4185 */ 4186 ahd->msgout_index = 0; 4187 ahd->msgout_len = 1; 4188 ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; 4189 done = MSGLOOP_MSGCOMPLETE; 4190 response = TRUE; 4191 } 4192 4193 if (done != MSGLOOP_IN_PROG && !response) 4194 /* Clear the outgoing message buffer */ 4195 ahd->msgout_len = 0; 4196 4197 return (done); 4198 } 4199 4200 /* 4201 * Process a message reject message. 4202 */ 4203 static int 4204 ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4205 { 4206 /* 4207 * What we care about here is if we had an 4208 * outstanding SDTR or WDTR message for this 4209 * target. If we did, this is a signal that 4210 * the target is refusing negotiation. 4211 */ 4212 struct scb *scb; 4213 struct ahd_initiator_tinfo *tinfo; 4214 struct ahd_tmode_tstate *tstate; 4215 u_int scb_index; 4216 u_int last_msg; 4217 int response = 0; 4218 4219 scb_index = ahd_get_scbptr(ahd); 4220 scb = ahd_lookup_scb(ahd, scb_index); 4221 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, 4222 devinfo->our_scsiid, 4223 devinfo->target, &tstate); 4224 /* Might be necessary */ 4225 last_msg = ahd_inb(ahd, LAST_MSG); 4226 4227 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 4228 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE) 4229 && tinfo->goal.period <= AHD_SYNCRATE_PACED) { 4230 /* 4231 * Target may not like our SPI-4 PPR Options. 4232 * Attempt to negotiate 80MHz which will turn 4233 * off these options. 4234 */ 4235 if (bootverbose) { 4236 printf("(%s:%c:%d:%d): PPR Rejected. " 4237 "Trying simple U160 PPR\n", 4238 ahd_name(ahd), devinfo->channel, 4239 devinfo->target, devinfo->lun); 4240 } 4241 tinfo->goal.period = AHD_SYNCRATE_DT; 4242 tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ 4243 | MSG_EXT_PPR_QAS_REQ 4244 | MSG_EXT_PPR_DT_REQ; 4245 } else { 4246 /* 4247 * Target does not support the PPR message. 4248 * Attempt to negotiate SPI-2 style. 4249 */ 4250 if (bootverbose) { 4251 printf("(%s:%c:%d:%d): PPR Rejected. " 4252 "Trying WDTR/SDTR\n", 4253 ahd_name(ahd), devinfo->channel, 4254 devinfo->target, devinfo->lun); 4255 } 4256 tinfo->goal.ppr_options = 0; 4257 tinfo->curr.transport_version = 2; 4258 tinfo->goal.transport_version = 2; 4259 } 4260 ahd->msgout_index = 0; 4261 ahd->msgout_len = 0; 4262 ahd_build_transfer_msg(ahd, devinfo); 4263 ahd->msgout_index = 0; 4264 response = 1; 4265 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 4266 4267 /* note 8bit xfers */ 4268 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 4269 "8bit transfers\n", ahd_name(ahd), 4270 devinfo->channel, devinfo->target, devinfo->lun); 4271 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 4272 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 4273 /*paused*/TRUE); 4274 /* 4275 * No need to clear the sync rate. If the target 4276 * did not accept the command, our syncrate is 4277 * unaffected. If the target started the negotiation, 4278 * but rejected our response, we already cleared the 4279 * sync rate before sending our WDTR. 4280 */ 4281 if (tinfo->goal.offset != tinfo->curr.offset) { 4282 4283 /* Start the sync negotiation */ 4284 ahd->msgout_index = 0; 4285 ahd->msgout_len = 0; 4286 ahd_build_transfer_msg(ahd, devinfo); 4287 ahd->msgout_index = 0; 4288 response = 1; 4289 } 4290 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 4291 /* note asynch xfers and clear flag */ 4292 ahd_set_syncrate(ahd, devinfo, /*period*/0, 4293 /*offset*/0, /*ppr_options*/0, 4294 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 4295 /*paused*/TRUE); 4296 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 4297 "Using asynchronous transfers\n", 4298 ahd_name(ahd), devinfo->channel, 4299 devinfo->target, devinfo->lun); 4300 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 4301 int tag_type; 4302 int mask; 4303 4304 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 4305 4306 if (tag_type == MSG_SIMPLE_TASK) { 4307 printf("(%s:%c:%d:%d): refuses tagged commands. " 4308 "Performing non-tagged I/O\n", ahd_name(ahd), 4309 devinfo->channel, devinfo->target, devinfo->lun); 4310 ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE); 4311 mask = ~0x23; 4312 } else { 4313 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 4314 "Performing simple queue tagged I/O only\n", 4315 ahd_name(ahd), devinfo->channel, devinfo->target, 4316 devinfo->lun, tag_type == MSG_ORDERED_TASK 4317 ? "ordered" : "head of queue"); 4318 ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC); 4319 mask = ~0x03; 4320 } 4321 4322 /* 4323 * Resend the identify for this CCB as the target 4324 * may believe that the selection is invalid otherwise. 4325 */ 4326 ahd_outb(ahd, SCB_CONTROL, 4327 ahd_inb_scbram(ahd, SCB_CONTROL) & mask); 4328 scb->hscb->control &= mask; 4329 ahd_set_transaction_tag(scb, /*enabled*/FALSE, 4330 /*type*/MSG_SIMPLE_TASK); 4331 ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); 4332 ahd_assert_atn(ahd); 4333 ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 4334 SCB_GET_TAG(scb)); 4335 4336 /* 4337 * Requeue all tagged commands for this target 4338 * currently in our posession so they can be 4339 * converted to untagged commands. 4340 */ 4341 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 4342 SCB_GET_CHANNEL(ahd, scb), 4343 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 4344 ROLE_INITIATOR, CAM_REQUEUE_REQ, 4345 SEARCH_COMPLETE); 4346 } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { 4347 /* 4348 * Most likely the device believes that we had 4349 * previously negotiated packetized. 4350 */ 4351 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE 4352 | MSG_FLAG_IU_REQ_CHANGED; 4353 4354 ahd_force_renegotiation(ahd, devinfo); 4355 ahd->msgout_index = 0; 4356 ahd->msgout_len = 0; 4357 ahd_build_transfer_msg(ahd, devinfo); 4358 ahd->msgout_index = 0; 4359 response = 1; 4360 } else { 4361 /* 4362 * Otherwise, we ignore it. 4363 */ 4364 printf("%s:%c:%d: Message reject for %x -- ignored\n", 4365 ahd_name(ahd), devinfo->channel, devinfo->target, 4366 last_msg); 4367 } 4368 return (response); 4369 } 4370 4371 /* 4372 * Process an ingnore wide residue message. 4373 */ 4374 static void 4375 ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4376 { 4377 u_int scb_index; 4378 struct scb *scb; 4379 4380 scb_index = ahd_get_scbptr(ahd); 4381 scb = ahd_lookup_scb(ahd, scb_index); 4382 /* 4383 * XXX Actually check data direction in the sequencer? 4384 * Perhaps add datadir to some spare bits in the hscb? 4385 */ 4386 if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 4387 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { 4388 /* 4389 * Ignore the message if we haven't 4390 * seen an appropriate data phase yet. 4391 */ 4392 } else { 4393 /* 4394 * If the residual occurred on the last 4395 * transfer and the transfer request was 4396 * expected to end on an odd count, do 4397 * nothing. Otherwise, subtract a byte 4398 * and update the residual count accordingly. 4399 */ 4400 uint32_t sgptr; 4401 4402 sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); 4403 if ((sgptr & SG_LIST_NULL) != 0 4404 && ahd_inb(ahd, DATA_COUNT_ODD) == 1) { 4405 /* 4406 * If the residual occurred on the last 4407 * transfer and the transfer request was 4408 * expected to end on an odd count, do 4409 * nothing. 4410 */ 4411 } else { 4412 uint32_t data_cnt; 4413 uint64_t data_addr; 4414 uint32_t sglen; 4415 4416 /* Pull in the rest of the sgptr */ 4417 sgptr |= 4418 (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 3) << 24) 4419 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 2) << 16) 4420 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 1) << 8); 4421 sgptr &= SG_PTR_MASK; 4422 data_cnt = 4423 (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24) 4424 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+2) << 16) 4425 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+1) << 8) 4426 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT)); 4427 4428 data_addr = (((uint64_t)ahd_inb(ahd, SHADDR + 7)) << 56) 4429 | (((uint64_t)ahd_inb(ahd, SHADDR + 6)) << 48) 4430 | (((uint64_t)ahd_inb(ahd, SHADDR + 5)) << 40) 4431 | (((uint64_t)ahd_inb(ahd, SHADDR + 4)) << 32) 4432 | (ahd_inb(ahd, SHADDR + 3) << 24) 4433 | (ahd_inb(ahd, SHADDR + 2) << 16) 4434 | (ahd_inb(ahd, SHADDR + 1) << 8) 4435 | (ahd_inb(ahd, SHADDR)); 4436 4437 data_cnt += 1; 4438 data_addr -= 1; 4439 4440 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 4441 struct ahd_dma64_seg *sg; 4442 4443 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 4444 4445 /* 4446 * The residual sg ptr points to the next S/G 4447 * to load so we must go back one. 4448 */ 4449 sg--; 4450 sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 4451 if (sg != scb->sg_list 4452 && sglen < (data_cnt & AHD_SG_LEN_MASK)) { 4453 4454 sg--; 4455 sglen = ahd_le32toh(sg->len); 4456 /* 4457 * Preserve High Address and SG_LIST 4458 * bits while setting the count to 1. 4459 */ 4460 data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); 4461 data_addr = ahd_le64toh(sg->addr) 4462 + (sglen & AHD_SG_LEN_MASK) 4463 - 1; 4464 4465 /* 4466 * Increment sg so it points to the 4467 * "next" sg. 4468 */ 4469 sg++; 4470 sgptr = ahd_sg_virt_to_bus(ahd, scb, 4471 sg); 4472 } 4473 } else { 4474 struct ahd_dma_seg *sg; 4475 4476 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 4477 4478 /* 4479 * The residual sg ptr points to the next S/G 4480 * to load so we must go back one. 4481 */ 4482 sg--; 4483 sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 4484 if (sg != scb->sg_list 4485 && sglen < (data_cnt & AHD_SG_LEN_MASK)) { 4486 4487 sg--; 4488 sglen = ahd_le32toh(sg->len); 4489 /* 4490 * Preserve High Address and SG_LIST 4491 * bits while setting the count to 1. 4492 */ 4493 data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); 4494 data_addr = ahd_le32toh(sg->addr) 4495 + (sglen & AHD_SG_LEN_MASK) 4496 - 1; 4497 4498 /* 4499 * Increment sg so it points to the 4500 * "next" sg. 4501 */ 4502 sg++; 4503 sgptr = ahd_sg_virt_to_bus(ahd, scb, 4504 sg); 4505 } 4506 } 4507 ahd_outb(ahd, SCB_RESIDUAL_SGPTR + 3, sgptr >> 24); 4508 ahd_outb(ahd, SCB_RESIDUAL_SGPTR + 2, sgptr >> 16); 4509 ahd_outb(ahd, SCB_RESIDUAL_SGPTR + 1, sgptr >> 8); 4510 ahd_outb(ahd, SCB_RESIDUAL_SGPTR, sgptr); 4511 4512 ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 4513 ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 4514 ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 4515 ahd_outb(ahd, SCB_RESIDUAL_DATACNT, data_cnt); 4516 4517 /* 4518 * The FIFO's pointers will be updated if/when the 4519 * sequencer re-enters a data phase. 4520 */ 4521 } 4522 } 4523 } 4524 4525 4526 /* 4527 * Reinitialize the data pointers for the active transfer 4528 * based on its current residual. 4529 */ 4530 static void 4531 ahd_reinitialize_dataptrs(struct ahd_softc *ahd) 4532 { 4533 struct scb *scb; 4534 ahd_mode_state saved_modes; 4535 u_int scb_index; 4536 u_int wait; 4537 uint32_t sgptr; 4538 uint32_t resid; 4539 uint64_t dataptr; 4540 4541 AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, 4542 AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); 4543 4544 scb_index = ahd_get_scbptr(ahd); 4545 scb = ahd_lookup_scb(ahd, scb_index); 4546 4547 /* 4548 * Release and reacquire the FIFO so we 4549 * have a clean slate. 4550 */ 4551 ahd_outb(ahd, DFFSXFRCTL, CLRCHN); 4552 wait = 1000; 4553 do { 4554 ahd_delay(100); 4555 } while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)); 4556 if (wait == 0) { 4557 ahd_print_path(ahd, scb); 4558 printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); 4559 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); 4560 } 4561 saved_modes = ahd_save_modes(ahd); 4562 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4563 ahd_outb(ahd, DFFSTAT, 4564 ahd_inb(ahd, DFFSTAT) 4565 | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); 4566 4567 /* 4568 * Determine initial values for data_addr and data_cnt 4569 * for resuming the data phase. 4570 */ 4571 sgptr = (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 3) << 24) 4572 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 2) << 16) 4573 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 1) << 8) 4574 | ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); 4575 sgptr &= SG_PTR_MASK; 4576 4577 resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) 4578 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) 4579 | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); 4580 4581 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 4582 struct ahd_dma64_seg *sg; 4583 4584 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 4585 4586 /* The residual sg_ptr always points to the next sg */ 4587 sg--; 4588 4589 dataptr = ahd_le64toh(sg->addr) 4590 + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) 4591 - resid; 4592 ahd_outb(ahd, HADDR + 7, dataptr >> 56); 4593 ahd_outb(ahd, HADDR + 6, dataptr >> 48); 4594 ahd_outb(ahd, HADDR + 5, dataptr >> 40); 4595 ahd_outb(ahd, HADDR + 4, dataptr >> 32); 4596 } else { 4597 struct ahd_dma_seg *sg; 4598 4599 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 4600 4601 /* The residual sg_ptr always points to the next sg */ 4602 sg--; 4603 4604 dataptr = ahd_le32toh(sg->addr) 4605 + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) 4606 - resid; 4607 ahd_outb(ahd, HADDR + 4, 4608 (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); 4609 } 4610 ahd_outb(ahd, HADDR + 3, dataptr >> 24); 4611 ahd_outb(ahd, HADDR + 2, dataptr >> 16); 4612 ahd_outb(ahd, HADDR + 1, dataptr >> 8); 4613 ahd_outb(ahd, HADDR, dataptr); 4614 ahd_outb(ahd, HCNT + 2, resid >> 16); 4615 ahd_outb(ahd, HCNT + 1, resid >> 8); 4616 ahd_outb(ahd, HCNT, resid); 4617 } 4618 4619 /* 4620 * Handle the effects of issuing a bus device reset message. 4621 */ 4622 static void 4623 ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4624 u_int lun, cam_status status, char *message, 4625 int verbose_level) 4626 { 4627 #ifdef AHD_TARGET_MODE 4628 struct ahd_tmode_tstate* tstate; 4629 #endif 4630 int found; 4631 4632 found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, 4633 lun, SCB_LIST_NULL, devinfo->role, 4634 status); 4635 4636 #ifdef AHD_TARGET_MODE 4637 /* 4638 * Send an immediate notify ccb to all target mord peripheral 4639 * drivers affected by this action. 4640 */ 4641 tstate = ahd->enabled_targets[devinfo->our_scsiid]; 4642 if (tstate != NULL) { 4643 u_int cur_lun; 4644 u_int max_lun; 4645 4646 if (lun != CAM_LUN_WILDCARD) { 4647 cur_lun = 0; 4648 max_lun = AHD_NUM_LUNS - 1; 4649 } else { 4650 cur_lun = lun; 4651 max_lun = lun; 4652 } 4653 for (cur_lun <= max_lun; cur_lun++) { 4654 struct ahd_tmode_lstate* lstate; 4655 4656 lstate = tstate->enabled_luns[cur_lun]; 4657 if (lstate == NULL) 4658 continue; 4659 4660 ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, 4661 MSG_BUS_DEV_RESET, /*arg*/0); 4662 ahd_send_lstate_events(ahd, lstate); 4663 } 4664 } 4665 #endif 4666 4667 /* 4668 * Go back to async/narrow transfers and renegotiate. 4669 */ 4670 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 4671 AHD_TRANS_CUR, /*paused*/TRUE); 4672 ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, 4673 /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); 4674 4675 ahd_send_async(ahd, devinfo->channel, devinfo->target, 4676 lun, AC_SENT_BDR, NULL); 4677 4678 if (message != NULL 4679 && (verbose_level <= bootverbose)) 4680 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), 4681 message, devinfo->channel, devinfo->target, found); 4682 } 4683 4684 #ifdef AHD_TARGET_MODE 4685 static void 4686 ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4687 struct scb *scb) 4688 { 4689 4690 /* 4691 * To facilitate adding multiple messages together, 4692 * each routine should increment the index and len 4693 * variables instead of setting them explicitly. 4694 */ 4695 ahd->msgout_index = 0; 4696 ahd->msgout_len = 0; 4697 4698 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 4699 ahd_build_transfer_msg(ahd, devinfo); 4700 else 4701 panic("ahd_intr: AWAITING target message with no message"); 4702 4703 ahd->msgout_index = 0; 4704 ahd->msg_type = MSG_TYPE_TARGET_MSGIN; 4705 } 4706 #endif 4707 /**************************** Initialization **********************************/ 4708 static u_int 4709 ahd_sglist_size(struct ahd_softc *ahd) 4710 { 4711 bus_size_t list_size; 4712 4713 list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; 4714 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 4715 list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; 4716 return (list_size); 4717 } 4718 4719 /* 4720 * Calculate the optimum S/G List allocation size. S/G elements used 4721 * for a given transaction must be physically contiguous. Assume the 4722 * OS will allocate full pages to us, so it doesn't make sense to request 4723 * less than a page. 4724 */ 4725 static u_int 4726 ahd_sglist_allocsize(struct ahd_softc *ahd) 4727 { 4728 bus_size_t sg_list_increment; 4729 bus_size_t sg_list_size; 4730 bus_size_t max_list_size; 4731 bus_size_t best_list_size; 4732 4733 /* Start out with the minimum required for AHD_NSEG. */ 4734 sg_list_increment = ahd_sglist_size(ahd); 4735 sg_list_size = sg_list_increment; 4736 4737 /* Get us as close as possible to a page in size. */ 4738 while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) 4739 sg_list_size += sg_list_increment; 4740 4741 /* 4742 * Try to reduce the amount of wastage by allocating 4743 * multiple pages. 4744 */ 4745 best_list_size = sg_list_size; 4746 max_list_size = roundup(sg_list_increment, PAGE_SIZE); 4747 if (max_list_size < 4 * PAGE_SIZE) 4748 max_list_size = 4 * PAGE_SIZE; 4749 if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) 4750 max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); 4751 while ((sg_list_size + sg_list_increment) <= max_list_size 4752 && (sg_list_size % PAGE_SIZE) != 0) { 4753 bus_size_t new_mod; 4754 bus_size_t best_mod; 4755 4756 sg_list_size += sg_list_increment; 4757 new_mod = sg_list_size % PAGE_SIZE; 4758 best_mod = best_list_size % PAGE_SIZE; 4759 if (new_mod > best_mod || new_mod == 0) { 4760 best_list_size = sg_list_size; 4761 } 4762 } 4763 return (best_list_size); 4764 } 4765 4766 /* 4767 * Allocate a controller structure for a new device 4768 * and perform initial initializion. 4769 */ 4770 struct ahd_softc * 4771 ahd_alloc(void *platform_arg, char *name) 4772 { 4773 struct ahd_softc *ahd; 4774 4775 #ifndef __FreeBSD__ 4776 ahd = malloc(sizeof(*ahd), M_DEVBUF, M_NOWAIT); 4777 if (!ahd) { 4778 printf("aic7xxx: cannot malloc softc!\n"); 4779 free(name, M_DEVBUF); 4780 return NULL; 4781 } 4782 #else 4783 ahd = device_get_softc((device_t)platform_arg); 4784 #endif 4785 memset(ahd, 0, sizeof(*ahd)); 4786 ahd->seep_config = malloc(sizeof(*ahd->seep_config), 4787 M_DEVBUF, M_NOWAIT); 4788 if (ahd->seep_config == NULL) { 4789 #ifndef __FreeBSD__ 4790 free(ahd, M_DEVBUF); 4791 #endif 4792 free(name, M_DEVBUF); 4793 return (NULL); 4794 } 4795 LIST_INIT(&ahd->pending_scbs); 4796 /* We don't know our unit number until the OSM sets it */ 4797 ahd->name = name; 4798 ahd->unit = -1; 4799 ahd->description = NULL; 4800 ahd->bus_description = NULL; 4801 ahd->channel = 'A'; 4802 ahd->chip = AHD_NONE; 4803 ahd->features = AHD_FENONE; 4804 ahd->bugs = AHD_BUGNONE; 4805 ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A 4806 | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; 4807 ahd_timer_init(&ahd->reset_timer); 4808 ahd_timer_init(&ahd->stat_timer); 4809 ahd->int_coalessing_timer = AHD_INT_COALESSING_TIMER_DEFAULT; 4810 ahd->int_coalessing_maxcmds = AHD_INT_COALESSING_MAXCMDS_DEFAULT; 4811 ahd->int_coalessing_mincmds = AHD_INT_COALESSING_MINCMDS_DEFAULT; 4812 ahd->int_coalessing_threshold = AHD_INT_COALESSING_THRESHOLD_DEFAULT; 4813 ahd->int_coalessing_stop_threshold = 4814 AHD_INT_COALESSING_STOP_THRESHOLD_DEFAULT; 4815 4816 if (ahd_platform_alloc(ahd, platform_arg) != 0) { 4817 ahd_free(ahd); 4818 ahd = NULL; 4819 } 4820 #ifdef AHD_DEBUG 4821 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { 4822 printf("%s: scb size = 0x%x, hscb size = 0x%x\n", 4823 ahd_name(ahd), (u_int)sizeof(struct scb), 4824 (u_int)sizeof(struct hardware_scb)); 4825 } 4826 #endif 4827 return (ahd); 4828 } 4829 4830 int 4831 ahd_softc_init(struct ahd_softc *ahd) 4832 { 4833 4834 ahd->unpause = 0; 4835 ahd->pause = PAUSE; 4836 return (0); 4837 } 4838 4839 void 4840 ahd_softc_insert(struct ahd_softc *ahd) 4841 { 4842 struct ahd_softc *list_ahd; 4843 4844 #if AHD_PCI_CONFIG > 0 4845 /* 4846 * Second Function PCI devices need to inherit some 4847 * settings from function 0. 4848 */ 4849 if ((ahd->features & AHD_MULTI_FUNC) != 0) { 4850 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { 4851 ahd_dev_softc_t list_pci; 4852 ahd_dev_softc_t pci; 4853 4854 list_pci = list_ahd->dev_softc; 4855 pci = ahd->dev_softc; 4856 if (ahd_get_pci_slot(list_pci) == ahd_get_pci_slot(pci) 4857 && ahd_get_pci_bus(list_pci) == ahd_get_pci_bus(pci)) { 4858 struct ahd_softc *master; 4859 struct ahd_softc *slave; 4860 4861 if (ahd_get_pci_function(list_pci) == 0) { 4862 master = list_ahd; 4863 slave = ahd; 4864 } else { 4865 master = ahd; 4866 slave = list_ahd; 4867 } 4868 slave->flags &= ~AHD_BIOS_ENABLED; 4869 slave->flags |= 4870 master->flags & AHD_BIOS_ENABLED; 4871 break; 4872 } 4873 } 4874 } 4875 #endif 4876 4877 /* 4878 * Insertion sort into our list of softcs. 4879 */ 4880 list_ahd = TAILQ_FIRST(&ahd_tailq); 4881 while (list_ahd != NULL 4882 && ahd_softc_comp(ahd, list_ahd) <= 0) 4883 list_ahd = TAILQ_NEXT(list_ahd, links); 4884 if (list_ahd != NULL) 4885 TAILQ_INSERT_BEFORE(list_ahd, ahd, links); 4886 else 4887 TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links); 4888 ahd->init_level++; 4889 } 4890 4891 /* 4892 * Verify that the passed in softc pointer is for a 4893 * controller that is still configured. 4894 */ 4895 struct ahd_softc * 4896 ahd_find_softc(struct ahd_softc *ahd) 4897 { 4898 struct ahd_softc *list_ahd; 4899 4900 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { 4901 if (list_ahd == ahd) 4902 return (ahd); 4903 } 4904 return (NULL); 4905 } 4906 4907 void 4908 ahd_set_unit(struct ahd_softc *ahd, int unit) 4909 { 4910 ahd->unit = unit; 4911 } 4912 4913 void 4914 ahd_set_name(struct ahd_softc *ahd, char *name) 4915 { 4916 if (ahd->name != NULL) 4917 free(ahd->name, M_DEVBUF); 4918 ahd->name = name; 4919 } 4920 4921 void 4922 ahd_free(struct ahd_softc *ahd) 4923 { 4924 int i; 4925 4926 switch (ahd->init_level) { 4927 default: 4928 case 5: 4929 ahd_shutdown(ahd); 4930 TAILQ_REMOVE(&ahd_tailq, ahd, links); 4931 /* FALLTHROUGH */ 4932 case 4: 4933 ahd_dmamap_unload(ahd, ahd->shared_data_dmat, 4934 ahd->shared_data_dmamap); 4935 /* FALLTHROUGH */ 4936 case 3: 4937 ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, 4938 ahd->shared_data_dmamap); 4939 ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, 4940 ahd->shared_data_dmamap); 4941 /* FALLTHROUGH */ 4942 case 2: 4943 ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); 4944 case 1: 4945 #ifndef __linux__ 4946 ahd_dma_tag_destroy(ahd, ahd->buffer_dmat); 4947 #endif 4948 break; 4949 case 0: 4950 break; 4951 } 4952 4953 #ifndef __linux__ 4954 ahd_dma_tag_destroy(ahd, ahd->parent_dmat); 4955 #endif 4956 ahd_platform_free(ahd); 4957 ahd_fini_scbdata(ahd); 4958 for (i = 0; i < AHD_NUM_TARGETS; i++) { 4959 struct ahd_tmode_tstate *tstate; 4960 4961 tstate = ahd->enabled_targets[i]; 4962 if (tstate != NULL) { 4963 #if AHD_TARGET_MODE 4964 int j; 4965 4966 for (j = 0; j < AHD_NUM_LUNS; j++) { 4967 struct ahd_tmode_lstate *lstate; 4968 4969 lstate = tstate->enabled_luns[j]; 4970 if (lstate != NULL) { 4971 xpt_free_path(lstate->path); 4972 free(lstate, M_DEVBUF); 4973 } 4974 } 4975 #endif 4976 free(tstate, M_DEVBUF); 4977 } 4978 } 4979 #if AHD_TARGET_MODE 4980 if (ahd->black_hole != NULL) { 4981 xpt_free_path(ahd->black_hole->path); 4982 free(ahd->black_hole, M_DEVBUF); 4983 } 4984 #endif 4985 if (ahd->name != NULL) 4986 free(ahd->name, M_DEVBUF); 4987 if (ahd->seep_config != NULL) 4988 free(ahd->seep_config, M_DEVBUF); 4989 if (ahd->saved_stack != NULL) 4990 free(ahd->saved_stack, M_DEVBUF); 4991 #ifndef __FreeBSD__ 4992 free(ahd, M_DEVBUF); 4993 #endif 4994 return; 4995 } 4996 4997 void 4998 ahd_shutdown(void *arg) 4999 { 5000 struct ahd_softc *ahd; 5001 5002 ahd = (struct ahd_softc *)arg; 5003 5004 /* 5005 * Stop periodic timer callbacks. 5006 */ 5007 ahd_timer_stop(&ahd->reset_timer); 5008 ahd_timer_stop(&ahd->stat_timer); 5009 5010 /* This will reset most registers to 0, but not all */ 5011 ahd_reset(ahd); 5012 } 5013 5014 /* 5015 * Reset the controller and record some information about it 5016 * that is only available just after a reset. 5017 */ 5018 int 5019 ahd_reset(struct ahd_softc *ahd) 5020 { 5021 u_int sxfrctl1; 5022 int wait; 5023 uint32_t cmd; 5024 5025 /* 5026 * Preserve the value of the SXFRCTL1 register for all channels. 5027 * It contains settings that affect termination and we don't want 5028 * to disturb the integrity of the bus. 5029 */ 5030 ahd_pause(ahd); 5031 ahd_update_modes(ahd); 5032 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 5033 sxfrctl1 = ahd_inb(ahd, SXFRCTL1); 5034 5035 cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); 5036 if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { 5037 uint32_t mod_cmd; 5038 5039 /* 5040 * A4 Razor #632 5041 * During the assertion of CHIPRST, the chip 5042 * does not disable its parity logic prior to 5043 * the start of the reset. This may cause a 5044 * parity error to be detected and thus a 5045 * spurious SERR or PERR assertion. Disble 5046 * PERR and SERR responses during the CHIPRST. 5047 */ 5048 mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); 5049 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, 5050 mod_cmd, /*bytes*/2); 5051 } 5052 ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); 5053 5054 /* 5055 * Ensure that the reset has finished. We delay 1000us 5056 * prior to reading the register to make sure the chip 5057 * has sufficiently completed its reset to handle register 5058 * accesses. 5059 */ 5060 wait = 1000; 5061 do { 5062 ahd_delay(1000); 5063 } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); 5064 5065 if (wait == 0) { 5066 printf("%s: WARNING - Failed chip reset! " 5067 "Trying to initialize anyway.\n", ahd_name(ahd)); 5068 } 5069 ahd_outb(ahd, HCNTRL, ahd->pause); 5070 5071 if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { 5072 /* 5073 * Clear any latched PCI error status and restore 5074 * previous SERR and PERR response enables. 5075 */ 5076 ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 5077 0xFF, /*bytes*/1); 5078 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, 5079 cmd, /*bytes*/2); 5080 } 5081 5082 /* 5083 * Mode should be SCSI after a chip reset, but lets 5084 * set it just to be safe. We touch the MODE_PTR 5085 * register directly so as to bypass the lazy update 5086 * code in ahd_set_modes(). 5087 */ 5088 ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 5089 ahd_outb(ahd, MODE_PTR, 5090 ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); 5091 5092 /* 5093 * Restore SXFRCTL1. 5094 * 5095 * We must always initialize STPWEN to 1 before we 5096 * restore the saved values. STPWEN is initialized 5097 * to a tri-state condition which can only be cleared 5098 * by turning it on. 5099 */ 5100 ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); 5101 ahd_outb(ahd, SXFRCTL1, sxfrctl1); 5102 5103 /* Determine chip configuration */ 5104 ahd->features &= ~AHD_WIDE; 5105 if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) 5106 ahd->features |= AHD_WIDE; 5107 5108 /* 5109 * If a recovery action has forced a chip reset, 5110 * re-initialize the chip to our liking. 5111 */ 5112 if (ahd->init_level > 0) 5113 ahd_chip_init(ahd); 5114 5115 return (0); 5116 } 5117 5118 /* 5119 * Determine the number of SCBs available on the controller 5120 */ 5121 int 5122 ahd_probe_scbs(struct ahd_softc *ahd) { 5123 int i; 5124 5125 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 5126 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 5127 for (i = 0; i < AHD_SCB_MAX; i++) { 5128 int j; 5129 5130 ahd_set_scbptr(ahd, i); 5131 ahd_outw(ahd, SCB_BASE, i); 5132 for (j = 2; j < 64; j++) 5133 ahd_outb(ahd, SCB_BASE+j, 0); 5134 /* Start out life as unallocated (needing an abort) */ 5135 ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); 5136 if (ahd_inw_scbram(ahd, SCB_BASE) != i) 5137 break; 5138 ahd_set_scbptr(ahd, 0); 5139 if (ahd_inw_scbram(ahd, SCB_BASE) != 0) 5140 break; 5141 } 5142 return (i); 5143 } 5144 5145 static void 5146 ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 5147 { 5148 bus_addr_t *baddr; 5149 5150 baddr = (bus_addr_t *)arg; 5151 *baddr = segs->ds_addr; 5152 } 5153 5154 static void 5155 ahd_initialize_hscbs(struct ahd_softc *ahd) 5156 { 5157 int i; 5158 5159 for (i = 0; i < ahd->scb_data.maxhscbs; i++) { 5160 ahd_set_scbptr(ahd, i); 5161 5162 /* Clear the control byte. */ 5163 ahd_outb(ahd, SCB_CONTROL, 0); 5164 5165 /* Set the next pointer */ 5166 ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); 5167 } 5168 } 5169 5170 static int 5171 ahd_init_scbdata(struct ahd_softc *ahd) 5172 { 5173 struct scb_data *scb_data; 5174 int i; 5175 5176 scb_data = &ahd->scb_data; 5177 TAILQ_INIT(&scb_data->free_scbs); 5178 for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) 5179 LIST_INIT(&scb_data->free_scb_lists[i]); 5180 LIST_INIT(&scb_data->any_dev_free_scb_list); 5181 SLIST_INIT(&scb_data->hscb_maps); 5182 SLIST_INIT(&scb_data->sg_maps); 5183 SLIST_INIT(&scb_data->sense_maps); 5184 5185 /* Determine the number of hardware SCBs and initialize them */ 5186 scb_data->maxhscbs = ahd_probe_scbs(ahd); 5187 if (scb_data->maxhscbs == 0) { 5188 printf("%s: No SCB space found\n", ahd_name(ahd)); 5189 return (ENXIO); 5190 } 5191 5192 ahd_initialize_hscbs(ahd); 5193 5194 /* 5195 * Create our DMA tags. These tags define the kinds of device 5196 * accessible memory allocations and memory mappings we will 5197 * need to perform during normal operation. 5198 * 5199 * Unless we need to further restrict the allocation, we rely 5200 * on the restrictions of the parent dmat, hence the common 5201 * use of MAXADDR and MAXSIZE. 5202 */ 5203 5204 /* DMA tag for our hardware scb structures */ 5205 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 5206 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 5207 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 5208 /*highaddr*/BUS_SPACE_MAXADDR, 5209 /*filter*/NULL, /*filterarg*/NULL, 5210 PAGE_SIZE, /*nsegments*/1, 5211 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 5212 /*flags*/0, &scb_data->hscb_dmat) != 0) { 5213 goto error_exit; 5214 } 5215 5216 scb_data->init_level++; 5217 5218 /* DMA tag for our S/G structures. */ 5219 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, 5220 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 5221 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 5222 /*highaddr*/BUS_SPACE_MAXADDR, 5223 /*filter*/NULL, /*filterarg*/NULL, 5224 ahd_sglist_allocsize(ahd), /*nsegments*/1, 5225 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 5226 /*flags*/0, &scb_data->sg_dmat) != 0) { 5227 goto error_exit; 5228 } 5229 #ifdef AHD_DEBUG 5230 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) 5231 printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), 5232 ahd_sglist_allocsize(ahd)); 5233 #endif 5234 5235 scb_data->init_level++; 5236 5237 /* DMA tag for our sense buffers. We allocate in page sized chunks */ 5238 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 5239 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 5240 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 5241 /*highaddr*/BUS_SPACE_MAXADDR, 5242 /*filter*/NULL, /*filterarg*/NULL, 5243 PAGE_SIZE, /*nsegments*/1, 5244 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 5245 /*flags*/0, &scb_data->sense_dmat) != 0) { 5246 goto error_exit; 5247 } 5248 5249 scb_data->init_level++; 5250 5251 /* Perform initial CCB allocation */ 5252 ahd_alloc_scbs(ahd); 5253 5254 if (scb_data->numscbs == 0) { 5255 printf("%s: ahd_init_scbdata - " 5256 "Unable to allocate initial scbs\n", 5257 ahd_name(ahd)); 5258 goto error_exit; 5259 } 5260 5261 /* 5262 * Note that we were successfull 5263 */ 5264 return (0); 5265 5266 error_exit: 5267 5268 return (ENOMEM); 5269 } 5270 5271 static struct scb * 5272 ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) 5273 { 5274 struct scb *scb; 5275 5276 /* 5277 * Look on the pending list. 5278 */ 5279 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 5280 if (SCB_GET_TAG(scb) == tag) 5281 return (scb); 5282 } 5283 5284 /* 5285 * Then on all of the collision free lists. 5286 */ 5287 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 5288 struct scb *list_scb; 5289 5290 list_scb = scb; 5291 do { 5292 if (SCB_GET_TAG(list_scb) == tag) 5293 return (list_scb); 5294 list_scb = LIST_NEXT(list_scb, collision_links); 5295 } while (list_scb); 5296 } 5297 5298 /* 5299 * And finally on the generic free list. 5300 */ 5301 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { 5302 if (SCB_GET_TAG(scb) == tag) 5303 return (scb); 5304 } 5305 5306 return (NULL); 5307 } 5308 5309 static void 5310 ahd_fini_scbdata(struct ahd_softc *ahd) 5311 { 5312 struct scb_data *scb_data; 5313 5314 scb_data = &ahd->scb_data; 5315 if (scb_data == NULL) 5316 return; 5317 5318 switch (scb_data->init_level) { 5319 default: 5320 case 7: 5321 { 5322 struct map_node *sns_map; 5323 5324 while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { 5325 SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); 5326 ahd_dmamap_unload(ahd, scb_data->sense_dmat, 5327 sns_map->dmamap); 5328 ahd_dmamem_free(ahd, scb_data->sense_dmat, 5329 sns_map->vaddr, sns_map->dmamap); 5330 free(sns_map, M_DEVBUF); 5331 } 5332 ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); 5333 /* FALLTHROUGH */ 5334 } 5335 case 6: 5336 { 5337 struct map_node *sg_map; 5338 5339 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { 5340 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 5341 ahd_dmamap_unload(ahd, scb_data->sg_dmat, 5342 sg_map->dmamap); 5343 ahd_dmamem_free(ahd, scb_data->sg_dmat, 5344 sg_map->vaddr, sg_map->dmamap); 5345 free(sg_map, M_DEVBUF); 5346 } 5347 ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); 5348 /* FALLTHROUGH */ 5349 } 5350 case 5: 5351 { 5352 struct map_node *hscb_map; 5353 5354 while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { 5355 SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); 5356 ahd_dmamap_unload(ahd, scb_data->hscb_dmat, 5357 hscb_map->dmamap); 5358 ahd_dmamem_free(ahd, scb_data->hscb_dmat, 5359 hscb_map->vaddr, hscb_map->dmamap); 5360 free(hscb_map, M_DEVBUF); 5361 } 5362 ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); 5363 /* FALLTHROUGH */ 5364 } 5365 case 4: 5366 case 3: 5367 case 2: 5368 case 1: 5369 case 0: 5370 break; 5371 } 5372 } 5373 5374 /* 5375 * DSP filter Bypass must be enabled until the first selection 5376 * after a change in bus mode (Razor #491 and #493). 5377 */ 5378 static void 5379 ahd_setup_iocell_workaround(struct ahd_softc *ahd) 5380 { 5381 ahd_mode_state saved_modes; 5382 5383 saved_modes = ahd_save_modes(ahd); 5384 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 5385 ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) 5386 | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); 5387 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); 5388 #ifdef AHD_DEBUG 5389 if ((ahd_debug & AHD_SHOW_MISC) != 0) 5390 printf("%s: Setting up iocell workaround\n", ahd_name(ahd)); 5391 #endif 5392 ahd_restore_modes(ahd, saved_modes); 5393 } 5394 5395 static void 5396 ahd_iocell_first_selection(struct ahd_softc *ahd) 5397 { 5398 ahd_mode_state saved_modes; 5399 u_int sblkctl; 5400 5401 saved_modes = ahd_save_modes(ahd); 5402 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 5403 sblkctl = ahd_inb(ahd, SBLKCTL); 5404 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 5405 #ifdef AHD_DEBUG 5406 if ((ahd_debug & AHD_SHOW_MISC) != 0) 5407 printf("%s: iocell first selection\n", ahd_name(ahd)); 5408 #endif 5409 if ((sblkctl & ENAB40) != 0) { 5410 ahd_outb(ahd, DSPDATACTL, 5411 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); 5412 #ifdef AHD_DEBUG 5413 if ((ahd_debug & AHD_SHOW_MISC) != 0) 5414 printf("%s: BYPASS now disabled\n", ahd_name(ahd)); 5415 #endif 5416 } 5417 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); 5418 ahd_outb(ahd, CLRINT, CLRSCSIINT); 5419 ahd_restore_modes(ahd, saved_modes); 5420 } 5421 5422 /*************************** SCB Management ***********************************/ 5423 static void 5424 ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) 5425 { 5426 struct scb_list *free_list; 5427 struct scb_tailq *free_tailq; 5428 struct scb *first_scb; 5429 5430 scb->flags |= SCB_ON_COL_LIST; 5431 AHD_SET_SCB_COL_IDX(scb, col_idx); 5432 free_list = &ahd->scb_data.free_scb_lists[col_idx]; 5433 free_tailq = &ahd->scb_data.free_scbs; 5434 first_scb = LIST_FIRST(free_list); 5435 if (first_scb != NULL) { 5436 LIST_INSERT_AFTER(first_scb, scb, collision_links); 5437 } else { 5438 LIST_INSERT_HEAD(free_list, scb, collision_links); 5439 TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); 5440 } 5441 } 5442 5443 static void 5444 ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) 5445 { 5446 struct scb_list *free_list; 5447 struct scb_tailq *free_tailq; 5448 struct scb *first_scb; 5449 u_int col_idx; 5450 5451 scb->flags &= ~SCB_ON_COL_LIST; 5452 col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); 5453 free_list = &ahd->scb_data.free_scb_lists[col_idx]; 5454 free_tailq = &ahd->scb_data.free_scbs; 5455 first_scb = LIST_FIRST(free_list); 5456 if (first_scb == scb) { 5457 struct scb *next_scb; 5458 5459 /* 5460 * Maintain order in the collision free 5461 * lists for fairness if this device has 5462 * other colliding tags active. 5463 */ 5464 next_scb = LIST_NEXT(scb, collision_links); 5465 if (next_scb != NULL) { 5466 TAILQ_INSERT_AFTER(free_tailq, scb, 5467 next_scb, links.tqe); 5468 } 5469 TAILQ_REMOVE(free_tailq, scb, links.tqe); 5470 } 5471 LIST_REMOVE(scb, collision_links); 5472 } 5473 5474 /* 5475 * Get a free scb. If there are none, see if we can allocate a new SCB. 5476 */ 5477 struct scb * 5478 ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) 5479 { 5480 struct scb *scb; 5481 int tries; 5482 5483 tries = 0; 5484 look_again: 5485 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 5486 if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { 5487 ahd_rem_col_list(ahd, scb); 5488 goto found; 5489 } 5490 } 5491 if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { 5492 5493 if (tries++ != 0) 5494 return (NULL); 5495 ahd_alloc_scbs(ahd); 5496 goto look_again; 5497 } 5498 LIST_REMOVE(scb, links.le); 5499 if (col_idx != AHD_NEVER_COL_IDX 5500 && (scb->col_scb != NULL) 5501 && (scb->col_scb->flags & SCB_ACTIVE) == 0) { 5502 LIST_REMOVE(scb->col_scb, links.le); 5503 ahd_add_col_list(ahd, scb->col_scb, col_idx); 5504 } 5505 found: 5506 scb->flags |= SCB_ACTIVE; 5507 return (scb); 5508 } 5509 5510 /* 5511 * Return an SCB resource to the free list. 5512 */ 5513 void 5514 ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) 5515 { 5516 5517 /* Clean up for the next user */ 5518 scb->flags = SCB_FLAG_NONE; 5519 scb->hscb->control = 0; 5520 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; 5521 5522 if (scb->col_scb == NULL) { 5523 5524 /* 5525 * No collision possible. Just free normally. 5526 */ 5527 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 5528 scb, links.le); 5529 } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { 5530 5531 /* 5532 * The SCB we might have collided with is on 5533 * a free collision list. Put both SCBs on 5534 * the generic list. 5535 */ 5536 ahd_rem_col_list(ahd, scb->col_scb); 5537 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 5538 scb, links.le); 5539 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 5540 scb->col_scb, links.le); 5541 } else if ((scb->col_scb->flags 5542 & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE 5543 && (scb->col_scb->hscb->control & TAG_ENB) != 0) { 5544 5545 /* 5546 * The SCB we might collide with on the next allocation 5547 * is still active in a non-packetized, tagged, context. 5548 * Put us on the SCB collision list. 5549 */ 5550 ahd_add_col_list(ahd, scb, 5551 AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); 5552 } else { 5553 /* 5554 * The SCB we might collide with on the next allocation 5555 * is either active in a packetized context, or free. 5556 * Since we can't collide, put this SCB on the generic 5557 * free list. 5558 */ 5559 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 5560 scb, links.le); 5561 } 5562 5563 ahd_platform_scb_free(ahd, scb); 5564 } 5565 5566 void 5567 ahd_alloc_scbs(struct ahd_softc *ahd) 5568 { 5569 struct scb_data *scb_data; 5570 struct scb *next_scb; 5571 struct hardware_scb *hscb; 5572 struct map_node *hscb_map; 5573 struct map_node *sg_map; 5574 struct map_node *sense_map; 5575 uint8_t *segs; 5576 uint8_t *sense_data; 5577 bus_addr_t hscb_busaddr; 5578 bus_addr_t sg_busaddr; 5579 bus_addr_t sense_busaddr; 5580 int newcount; 5581 int i; 5582 5583 scb_data = &ahd->scb_data; 5584 if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) 5585 /* Can't allocate any more */ 5586 return; 5587 5588 if (scb_data->scbs_left != 0) { 5589 int offset; 5590 5591 offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; 5592 hscb_map = SLIST_FIRST(&scb_data->hscb_maps); 5593 hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; 5594 hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); 5595 } else { 5596 hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT); 5597 5598 if (hscb_map == NULL) 5599 return; 5600 5601 /* Allocate the next batch of hardware SCBs */ 5602 if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, 5603 (void **)&hscb_map->vaddr, 5604 BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { 5605 free(hscb_map, M_DEVBUF); 5606 return; 5607 } 5608 5609 SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); 5610 5611 ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, 5612 hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, 5613 &hscb_map->physaddr, /*flags*/0); 5614 5615 hscb = (struct hardware_scb *)hscb_map->vaddr; 5616 hscb_busaddr = hscb_map->physaddr; 5617 scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); 5618 } 5619 5620 if (scb_data->sgs_left != 0) { 5621 int offset; 5622 5623 offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) 5624 - scb_data->sgs_left) * ahd_sglist_size(ahd); 5625 sg_map = SLIST_FIRST(&scb_data->sg_maps); 5626 segs = sg_map->vaddr + offset; 5627 sg_busaddr = sg_map->physaddr + offset; 5628 } else { 5629 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 5630 5631 if (sg_map == NULL) 5632 return; 5633 5634 /* Allocate the next batch of S/G lists */ 5635 if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, 5636 (void **)&sg_map->vaddr, 5637 BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { 5638 free(sg_map, M_DEVBUF); 5639 return; 5640 } 5641 5642 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 5643 5644 ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, 5645 sg_map->vaddr, ahd_sglist_allocsize(ahd), 5646 ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); 5647 5648 segs = sg_map->vaddr; 5649 sg_busaddr = sg_map->physaddr; 5650 scb_data->sgs_left = 5651 ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); 5652 #ifdef AHD_DEBUG 5653 if (ahd_debug & AHD_SHOW_MEMORY) 5654 printf("Mapped SG data\n"); 5655 #endif 5656 } 5657 5658 if (scb_data->sense_left != 0) { 5659 int offset; 5660 5661 offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); 5662 sense_map = SLIST_FIRST(&scb_data->sense_maps); 5663 sense_data = sense_map->vaddr + offset; 5664 sense_busaddr = sense_map->physaddr + offset; 5665 } else { 5666 sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT); 5667 5668 if (sense_map == NULL) 5669 return; 5670 5671 /* Allocate the next batch of sense buffers */ 5672 if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, 5673 (void **)&sense_map->vaddr, 5674 BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { 5675 free(sense_map, M_DEVBUF); 5676 return; 5677 } 5678 5679 SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); 5680 5681 ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, 5682 sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, 5683 &sense_map->physaddr, /*flags*/0); 5684 5685 sense_data = sense_map->vaddr; 5686 sense_busaddr = sense_map->physaddr; 5687 scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; 5688 #ifdef AHD_DEBUG 5689 if (ahd_debug & AHD_SHOW_MEMORY) 5690 printf("Mapped sense data\n"); 5691 #endif 5692 } 5693 5694 newcount = MIN(scb_data->sense_left, scb_data->scbs_left); 5695 newcount = MIN(newcount, scb_data->sgs_left); 5696 newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); 5697 scb_data->sense_left -= newcount; 5698 scb_data->scbs_left -= newcount; 5699 scb_data->sgs_left -= newcount; 5700 for (i = 0; i < newcount; i++) { 5701 u_int col_tag; 5702 5703 struct scb_platform_data *pdata; 5704 #ifndef __linux__ 5705 int error; 5706 #endif 5707 next_scb = (struct scb *)malloc(sizeof(*next_scb), 5708 M_DEVBUF, M_NOWAIT); 5709 if (next_scb == NULL) 5710 break; 5711 5712 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 5713 M_DEVBUF, M_NOWAIT); 5714 if (pdata == NULL) { 5715 free(next_scb, M_DEVBUF); 5716 break; 5717 } 5718 next_scb->platform_data = pdata; 5719 next_scb->hscb_map = hscb_map; 5720 next_scb->sg_map = sg_map; 5721 next_scb->sense_map = sense_map; 5722 next_scb->sg_list = segs; 5723 next_scb->sense_data = sense_data; 5724 next_scb->sense_busaddr = sense_busaddr; 5725 next_scb->hscb = hscb; 5726 hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); 5727 5728 /* 5729 * The sequencer always starts with the second entry. 5730 * The first entry is embedded in the scb. 5731 */ 5732 next_scb->sg_list_busaddr = sg_busaddr; 5733 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 5734 next_scb->sg_list_busaddr 5735 += sizeof(struct ahd_dma64_seg); 5736 else 5737 next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); 5738 next_scb->ahd_softc = ahd; 5739 next_scb->flags = SCB_FLAG_NONE; 5740 #ifndef __linux__ 5741 error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, 5742 &next_scb->dmamap); 5743 if (error != 0) { 5744 free(next_scb, M_DEVBUF); 5745 free(pdata, M_DEVBUF); 5746 break; 5747 } 5748 #endif 5749 next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); 5750 col_tag = scb_data->numscbs ^ 0x100; 5751 next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); 5752 if (next_scb->col_scb != NULL) 5753 next_scb->col_scb->col_scb = next_scb; 5754 ahd_free_scb(ahd, next_scb); 5755 hscb++; 5756 hscb_busaddr += sizeof(*hscb); 5757 segs += ahd_sglist_size(ahd); 5758 sg_busaddr += ahd_sglist_size(ahd); 5759 sense_data += AHD_SENSE_BUFSIZE; 5760 sense_busaddr += AHD_SENSE_BUFSIZE; 5761 scb_data->numscbs++; 5762 } 5763 } 5764 5765 void 5766 ahd_controller_info(struct ahd_softc *ahd, char *buf) 5767 { 5768 const char *speed; 5769 const char *type; 5770 int len; 5771 5772 len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); 5773 buf += len; 5774 5775 speed = "Ultra320 "; 5776 if ((ahd->features & AHD_WIDE) != 0) { 5777 type = "Wide "; 5778 } else { 5779 type = "Single "; 5780 } 5781 len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", 5782 speed, type, ahd->channel, ahd->our_id); 5783 buf += len; 5784 5785 sprintf(buf, "%s, %d SCBs", ahd->bus_description, 5786 ahd->scb_data.maxhscbs); 5787 } 5788 5789 static const char *channel_strings[] = { 5790 "Primary Low", 5791 "Primary High", 5792 "Secondary Low", 5793 "Secondary High" 5794 }; 5795 5796 static const char *termstat_strings[] = { 5797 "Terminated Correctly", 5798 "Over Terminated", 5799 "Under Terminated", 5800 "Not Configured" 5801 }; 5802 5803 /* 5804 * Start the board, ready for normal operation 5805 */ 5806 int 5807 ahd_init(struct ahd_softc *ahd) 5808 { 5809 uint8_t *base_vaddr; 5810 uint8_t *next_vaddr; 5811 bus_addr_t next_baddr; 5812 size_t driver_data_size; 5813 int i; 5814 int error; 5815 u_int warn_user; 5816 uint8_t current_sensing; 5817 uint8_t fstat; 5818 5819 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 5820 5821 ahd->stack_size = ahd_probe_stack_size(ahd); 5822 ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t), 5823 M_DEVBUF, M_NOWAIT); 5824 if (ahd->saved_stack == NULL) 5825 return (ENOMEM); 5826 5827 /* 5828 * Verify that the compiler hasn't over-agressively 5829 * padded important structures. 5830 */ 5831 if (sizeof(struct hardware_scb) != 64) 5832 panic("Hardware SCB size is incorrect"); 5833 5834 #ifdef AHD_DEBUG 5835 if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) 5836 ahd->flags |= AHD_SEQUENCER_DEBUG; 5837 #endif 5838 5839 /* 5840 * Default to allowing initiator operations. 5841 */ 5842 ahd->flags |= AHD_INITIATORROLE; 5843 5844 /* 5845 * Only allow target mode features if this unit has them enabled. 5846 */ 5847 if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) 5848 ahd->features &= ~AHD_TARGETMODE; 5849 5850 #ifndef __linux__ 5851 /* DMA tag for mapping buffers into device visible space. */ 5852 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 5853 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 5854 /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING 5855 ? (bus_addr_t)0x7FFFFFFFFFULL 5856 : BUS_SPACE_MAXADDR_32BIT, 5857 /*highaddr*/BUS_SPACE_MAXADDR, 5858 /*filter*/NULL, /*filterarg*/NULL, 5859 /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE, 5860 /*nsegments*/AHD_NSEG, 5861 /*maxsegsz*/AHD_MAXTRANSFER_SIZE, 5862 /*flags*/BUS_DMA_ALLOCNOW, 5863 &ahd->buffer_dmat) != 0) { 5864 return (ENOMEM); 5865 } 5866 #endif 5867 5868 ahd->init_level++; 5869 5870 /* 5871 * DMA tag for our command fifos and other data in system memory 5872 * the card's sequencer must be able to access. For initiator 5873 * roles, we need to allocate space for the qoutfifo. When providing 5874 * for the target mode role, we must additionally provide space for 5875 * the incoming target command fifo. 5876 */ 5877 driver_data_size = AHD_SCB_MAX * sizeof(uint16_t) 5878 + sizeof(struct hardware_scb); 5879 if ((ahd->features & AHD_TARGETMODE) != 0) 5880 driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); 5881 if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) 5882 driver_data_size += PKT_OVERRUN_BUFSIZE; 5883 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 5884 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 5885 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 5886 /*highaddr*/BUS_SPACE_MAXADDR, 5887 /*filter*/NULL, /*filterarg*/NULL, 5888 driver_data_size, 5889 /*nsegments*/1, 5890 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 5891 /*flags*/0, &ahd->shared_data_dmat) != 0) { 5892 return (ENOMEM); 5893 } 5894 5895 ahd->init_level++; 5896 5897 /* Allocation of driver data */ 5898 if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, 5899 (void **)&base_vaddr, 5900 BUS_DMA_NOWAIT, &ahd->shared_data_dmamap) != 0) { 5901 return (ENOMEM); 5902 } 5903 5904 ahd->init_level++; 5905 5906 /* And permanently map it in */ 5907 ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, 5908 base_vaddr, driver_data_size, ahd_dmamap_cb, 5909 &ahd->shared_data_busaddr, /*flags*/0); 5910 ahd->qoutfifo = (uint16_t *)base_vaddr; 5911 next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; 5912 next_baddr = ahd->shared_data_busaddr + AHD_QOUT_SIZE*sizeof(uint16_t); 5913 if ((ahd->features & AHD_TARGETMODE) != 0) { 5914 ahd->targetcmds = (struct target_cmd *)next_vaddr; 5915 next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); 5916 next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); 5917 } 5918 5919 if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { 5920 ahd->overrun_buf = next_vaddr; 5921 next_vaddr += PKT_OVERRUN_BUFSIZE; 5922 next_baddr += PKT_OVERRUN_BUFSIZE; 5923 } 5924 5925 /* 5926 * We need one SCB to serve as the "next SCB". Since the 5927 * tag identifier in this SCB will never be used, there is 5928 * no point in using a valid HSCB tag from an SCB pulled from 5929 * the standard free pool. So, we allocate this "sentinel" 5930 * specially from the DMA safe memory chunk used for the QOUTFIFO. 5931 */ 5932 ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; 5933 ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); 5934 5935 ahd->init_level++; 5936 5937 /* Allocate SCB data now that buffer_dmat is initialized */ 5938 if (ahd_init_scbdata(ahd) != 0) 5939 return (ENOMEM); 5940 5941 if ((ahd->flags & AHD_INITIATORROLE) == 0) 5942 ahd->flags &= ~AHD_RESET_BUS_A; 5943 5944 /* 5945 * Before committing these settings to the chip, give 5946 * the OSM one last chance to modify our configuration. 5947 */ 5948 ahd_platform_init(ahd); 5949 5950 /* Bring up the chip. */ 5951 ahd_chip_init(ahd); 5952 5953 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 5954 5955 if ((ahd->flags & AHD_CURRENT_SENSING) == 0) 5956 goto init_done; 5957 5958 /* 5959 * Verify termination based on current draw and 5960 * warn user if the bus is over/under terminated. 5961 */ 5962 error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 5963 CURSENSE_ENB); 5964 if (error != 0) { 5965 printf("%s: current sensing timeout 1\n", ahd_name(ahd)); 5966 goto init_done; 5967 } 5968 for (i = 20, fstat = FLX_FSTAT_BUSY; 5969 (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { 5970 error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); 5971 if (error != 0) { 5972 printf("%s: current sensing timeout 2\n", 5973 ahd_name(ahd)); 5974 goto init_done; 5975 } 5976 } 5977 if (i == 0) { 5978 printf("%s: Timedout during current-sensing test\n", 5979 ahd_name(ahd)); 5980 goto init_done; 5981 } 5982 5983 /* Latch Current Sensing status. */ 5984 error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, ¤t_sensing); 5985 if (error != 0) { 5986 printf("%s: current sensing timeout 3\n", ahd_name(ahd)); 5987 goto init_done; 5988 } 5989 5990 /* Diable current sensing. */ 5991 ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); 5992 5993 #ifdef AHD_DEBUG 5994 if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { 5995 printf("%s: current_sensing == 0x%x\n", 5996 ahd_name(ahd), current_sensing); 5997 } 5998 #endif 5999 warn_user = 0; 6000 for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { 6001 u_int term_stat; 6002 6003 term_stat = (current_sensing & FLX_CSTAT_MASK); 6004 switch (term_stat) { 6005 case FLX_CSTAT_OVER: 6006 case FLX_CSTAT_UNDER: 6007 warn_user++; 6008 case FLX_CSTAT_INVALID: 6009 case FLX_CSTAT_OKAY: 6010 if (warn_user == 0 && bootverbose == 0) 6011 break; 6012 printf("%s: %s Channel %s\n", ahd_name(ahd), 6013 channel_strings[i], termstat_strings[term_stat]); 6014 break; 6015 } 6016 } 6017 if (warn_user) { 6018 printf("%s: WARNING. Termination is not configured correctly.\n" 6019 "%s: WARNING. SCSI bus operations may FAIL.\n", 6020 ahd_name(ahd), ahd_name(ahd)); 6021 } 6022 init_done: 6023 ahd_restart(ahd); 6024 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, 6025 ahd_stat_timer, ahd); 6026 return (0); 6027 } 6028 6029 /* 6030 * (Re)initialize chip state after a chip reset. 6031 */ 6032 static void 6033 ahd_chip_init(struct ahd_softc *ahd) 6034 { 6035 uint32_t busaddr; 6036 u_int sxfrctl1; 6037 u_int scsiseq_template; 6038 u_int wait; 6039 u_int i; 6040 u_int target; 6041 6042 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6043 /* 6044 * Take the LED out of diagnostic mode 6045 */ 6046 ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); 6047 6048 /* 6049 * Return HS_MAILBOX to its default value. 6050 */ 6051 ahd->hs_mailbox = 0; 6052 ahd_outb(ahd, HS_MAILBOX, 0); 6053 6054 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ 6055 ahd_outb(ahd, IOWNID, ahd->our_id); 6056 ahd_outb(ahd, TOWNID, ahd->our_id); 6057 sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; 6058 sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; 6059 if ((ahd->bugs & AHD_LONG_SETIMO_BUG) 6060 && (ahd->seltime != STIMESEL_MIN)) { 6061 /* 6062 * The selection timer duration is twice as long 6063 * as it should be. Halve it by adding "1" to 6064 * the user specified setting. 6065 */ 6066 sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; 6067 } else { 6068 sxfrctl1 |= ahd->seltime; 6069 } 6070 6071 ahd_outb(ahd, SXFRCTL0, DFON); 6072 ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); 6073 ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 6074 6075 /* 6076 * Now that termination is set, wait for up 6077 * to 500ms for our transceivers to settle. If 6078 * the adapter does not have a cable attached, 6079 * the transceivers may never settle, so don't 6080 * complain if we fail here. 6081 */ 6082 for (wait = 10000; 6083 (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 6084 wait--) 6085 ahd_delay(100); 6086 6087 /* Clear any false bus resets due to the transceivers settling */ 6088 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 6089 ahd_outb(ahd, CLRINT, CLRSCSIINT); 6090 6091 /* Initialize mode specific S/G state. */ 6092 for (i = 0; i < 2; i++) { 6093 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); 6094 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 6095 ahd_outb(ahd, SG_STATE, 0); 6096 ahd_outb(ahd, CLRSEQINTSRC, 0xFF); 6097 ahd_outb(ahd, SEQIMODE, 6098 ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT 6099 |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); 6100 } 6101 6102 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 6103 ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); 6104 ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); 6105 ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); 6106 ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); 6107 if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { 6108 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); 6109 } else { 6110 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); 6111 } 6112 ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); 6113 if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) 6114 /* 6115 * Do not issue a target abort when a split completion 6116 * error occurs. Let our PCIX interrupt handler deal 6117 * with it instead. H2A4 Razor #625 6118 */ 6119 ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); 6120 6121 if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) 6122 ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); 6123 6124 /* 6125 * Tweak IOCELL settings. 6126 */ 6127 if ((ahd->flags & AHD_HP_BOARD) != 0) { 6128 for (i = 0; i < NUMDSPS; i++) { 6129 ahd_outb(ahd, DSPSELECT, i); 6130 ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); 6131 } 6132 #ifdef AHD_DEBUG 6133 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6134 printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), 6135 WRTBIASCTL_HP_DEFAULT); 6136 #endif 6137 } 6138 ahd_setup_iocell_workaround(ahd); 6139 6140 /* 6141 * Enable LQI Manager interrupts. 6142 */ 6143 ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT 6144 | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI 6145 | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); 6146 ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); 6147 /* 6148 * An interrupt from LQOBUSFREE is made redundant by the 6149 * BUSFREE interrupt. We choose to have the sequencer catch 6150 * LQOPHCHGINPKT errors manually for the command phase at the 6151 * start of a packetized selection case. 6152 ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE|ENLQOPHACHGINPKT); 6153 */ 6154 ahd_outb(ahd, LQOMODE1, 0); 6155 6156 /* 6157 * Setup sequencer interrupt handlers. 6158 */ 6159 ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); 6160 ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); 6161 6162 /* 6163 * Setup SCB Offset registers. 6164 */ 6165 if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { 6166 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, 6167 pkt_long_lun)); 6168 } else { 6169 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); 6170 } 6171 ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); 6172 ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); 6173 ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); 6174 ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, 6175 shared_data.idata.cdb)); 6176 ahd_outb(ahd, QNEXTPTR, 6177 offsetof(struct hardware_scb, next_hscb_busaddr)); 6178 ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); 6179 ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); 6180 if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { 6181 ahd_outb(ahd, LUNLEN, 6182 sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); 6183 } else { 6184 ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_hscb->lun) - 1); 6185 } 6186 ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); 6187 ahd_outb(ahd, MAXCMD, 0xFF); 6188 ahd_outb(ahd, SCBAUTOPTR, 6189 AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); 6190 6191 /* We haven't been enabled for target mode yet. */ 6192 ahd_outb(ahd, MULTARGID, 0); 6193 ahd_outb(ahd, MULTARGID + 1, 0); 6194 6195 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6196 /* Initialize the negotiation table. */ 6197 if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { 6198 /* 6199 * Clear the spare bytes in the neg table to avoid 6200 * spurious parity errors. 6201 */ 6202 for (target = 0; target < AHD_NUM_TARGETS; target++) { 6203 ahd_outb(ahd, NEGOADDR, target); 6204 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); 6205 for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) 6206 ahd_outb(ahd, ANNEXDAT, 0); 6207 } 6208 } 6209 for (target = 0; target < AHD_NUM_TARGETS; target++) { 6210 struct ahd_devinfo devinfo; 6211 struct ahd_initiator_tinfo *tinfo; 6212 struct ahd_tmode_tstate *tstate; 6213 6214 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 6215 target, &tstate); 6216 ahd_compile_devinfo(&devinfo, ahd->our_id, 6217 target, CAM_LUN_WILDCARD, 6218 'A', ROLE_INITIATOR); 6219 ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); 6220 } 6221 6222 ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); 6223 ahd_outb(ahd, CLRINT, CLRSCSIINT); 6224 6225 /* 6226 * Always enable abort on incoming L_Qs if this feature is 6227 * supported. We use this to catch invalid SCB references. 6228 */ 6229 if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) 6230 ahd_outb(ahd, LQCTL1, ABORTPENDING); 6231 else 6232 ahd_outb(ahd, LQCTL1, 0); 6233 6234 /* All of our queues are empty */ 6235 ahd->qoutfifonext = 0; 6236 ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID_LE; 6237 ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID >> 8); 6238 for (i = 0; i < AHD_QOUT_SIZE; i++) 6239 ahd->qoutfifo[i] = 0; 6240 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); 6241 6242 ahd->qinfifonext = 0; 6243 for (i = 0; i < AHD_QIN_SIZE; i++) 6244 ahd->qinfifo[i] = SCB_LIST_NULL; 6245 6246 if ((ahd->features & AHD_TARGETMODE) != 0) { 6247 /* All target command blocks start out invalid. */ 6248 for (i = 0; i < AHD_TMODE_CMDS; i++) 6249 ahd->targetcmds[i].cmd_valid = 0; 6250 ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); 6251 ahd->tqinfifonext = 1; 6252 ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); 6253 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); 6254 } 6255 6256 /* Initialize Scratch Ram. */ 6257 ahd_outb(ahd, SEQ_FLAGS, 0); 6258 ahd_outb(ahd, SEQ_FLAGS2, 0); 6259 6260 /* We don't have any waiting selections */ 6261 ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); 6262 ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); 6263 for (i = 0; i < AHD_NUM_TARGETS; i++) 6264 ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); 6265 6266 /* 6267 * Nobody is waiting to be DMAed into the QOUTFIFO. 6268 */ 6269 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); 6270 ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); 6271 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); 6272 6273 /* 6274 * The Freeze Count is 0. 6275 */ 6276 ahd_outw(ahd, QFREEZE_COUNT, 0); 6277 6278 /* 6279 * Tell the sequencer where it can find our arrays in memory. 6280 */ 6281 busaddr = ahd->shared_data_busaddr; 6282 ahd_outb(ahd, SHARED_DATA_ADDR, busaddr & 0xFF); 6283 ahd_outb(ahd, SHARED_DATA_ADDR + 1, (busaddr >> 8) & 0xFF); 6284 ahd_outb(ahd, SHARED_DATA_ADDR + 2, (busaddr >> 16) & 0xFF); 6285 ahd_outb(ahd, SHARED_DATA_ADDR + 3, (busaddr >> 24) & 0xFF); 6286 ahd_outb(ahd, QOUTFIFO_NEXT_ADDR, busaddr & 0xFF); 6287 ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 1, (busaddr >> 8) & 0xFF); 6288 ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 2, (busaddr >> 16) & 0xFF); 6289 ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 3, (busaddr >> 24) & 0xFF); 6290 6291 /* 6292 * Setup the allowed SCSI Sequences based on operational mode. 6293 * If we are a target, we'll enable select in operations once 6294 * we've had a lun enabled. 6295 */ 6296 scsiseq_template = ENAUTOATNP; 6297 if ((ahd->flags & AHD_INITIATORROLE) != 0) 6298 scsiseq_template |= ENRSELI; 6299 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); 6300 6301 /* There are no busy SCBs yet. */ 6302 for (target = 0; target < AHD_NUM_TARGETS; target++) { 6303 int lun; 6304 6305 for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) 6306 ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); 6307 } 6308 6309 /* 6310 * Initialize the group code to command length table. 6311 * Vendor Unique codes are set to 0 so we only capture 6312 * the first byte of the cdb. These can be overridden 6313 * when target mode is enabled. 6314 */ 6315 ahd_outb(ahd, CMDSIZE_TABLE, 5); 6316 ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); 6317 ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); 6318 ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); 6319 ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); 6320 ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); 6321 ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); 6322 ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); 6323 6324 /* Tell the sequencer of our initial queue positions */ 6325 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 6326 ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); 6327 ahd->qinfifonext = 0; 6328 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 6329 ahd_set_hescb_qoff(ahd, 0); 6330 ahd_set_snscb_qoff(ahd, 0); 6331 ahd_set_sescb_qoff(ahd, 0); 6332 ahd_set_sdscb_qoff(ahd, 0); 6333 6334 /* 6335 * Tell the sequencer which SCB will be the next one it receives. 6336 */ 6337 busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); 6338 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); 6339 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); 6340 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); 6341 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); 6342 6343 /* 6344 * Default to coalessing disabled. 6345 */ 6346 ahd_outw(ahd, INT_COALESSING_CMDCOUNT, 0); 6347 ahd_outw(ahd, CMDS_PENDING, 0); 6348 ahd_update_coalessing_values(ahd, ahd->int_coalessing_timer, 6349 ahd->int_coalessing_maxcmds, 6350 ahd->int_coalessing_mincmds); 6351 ahd_enable_coalessing(ahd, FALSE); 6352 6353 ahd_loadseq(ahd); 6354 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6355 } 6356 6357 /* 6358 * Setup default device and controller settings. 6359 * This should only be called if our probe has 6360 * determined that no configuration data is available. 6361 */ 6362 int 6363 ahd_default_config(struct ahd_softc *ahd) 6364 { 6365 int targ; 6366 6367 ahd->our_id = 7; 6368 6369 /* 6370 * Allocate a tstate to house information for our 6371 * initiator presence on the bus as well as the user 6372 * data for any target mode initiator. 6373 */ 6374 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 6375 printf("%s: unable to allocate ahd_tmode_tstate. " 6376 "Failing attach\n", ahd_name(ahd)); 6377 return (ENOMEM); 6378 } 6379 6380 for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { 6381 struct ahd_devinfo devinfo; 6382 struct ahd_initiator_tinfo *tinfo; 6383 struct ahd_tmode_tstate *tstate; 6384 uint16_t target_mask; 6385 6386 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 6387 targ, &tstate); 6388 /* 6389 * We support SPC2 and SPI4. 6390 */ 6391 tinfo->user.protocol_version = 4; 6392 tinfo->user.transport_version = 4; 6393 6394 target_mask = 0x01 << targ; 6395 ahd->user_discenable |= target_mask; 6396 tstate->discenable |= target_mask; 6397 ahd->user_tagenable |= target_mask; 6398 #ifdef AHD_FORCE_160 6399 tinfo->user.period = AHD_SYNCRATE_DT; 6400 #else 6401 tinfo->user.period = AHD_SYNCRATE_160; 6402 #endif 6403 tinfo->user.offset = MAX_OFFSET; 6404 tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM 6405 | MSG_EXT_PPR_WR_FLOW 6406 | MSG_EXT_PPR_HOLD_MCS 6407 | MSG_EXT_PPR_IU_REQ 6408 | MSG_EXT_PPR_QAS_REQ 6409 | MSG_EXT_PPR_DT_REQ; 6410 if ((ahd->features & AHD_RTI) != 0) 6411 tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; 6412 6413 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 6414 6415 /* 6416 * Start out Async/Narrow/Untagged and with 6417 * conservative protocol support. 6418 */ 6419 tinfo->goal.protocol_version = 2; 6420 tinfo->goal.transport_version = 2; 6421 tinfo->curr.protocol_version = 2; 6422 tinfo->curr.transport_version = 2; 6423 ahd_compile_devinfo(&devinfo, ahd->our_id, 6424 targ, CAM_LUN_WILDCARD, 6425 'A', ROLE_INITIATOR); 6426 tstate->tagenable &= ~target_mask; 6427 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6428 AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); 6429 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 6430 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, 6431 /*paused*/TRUE); 6432 } 6433 return (0); 6434 } 6435 6436 /* 6437 * Parse device configuration information. 6438 */ 6439 int 6440 ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) 6441 { 6442 int targ; 6443 int max_targ; 6444 6445 max_targ = sc->max_targets & CFMAXTARG; 6446 ahd->our_id = sc->brtime_id & CFSCSIID; 6447 6448 /* 6449 * Allocate a tstate to house information for our 6450 * initiator presence on the bus as well as the user 6451 * data for any target mode initiator. 6452 */ 6453 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 6454 printf("%s: unable to allocate ahd_tmode_tstate. " 6455 "Failing attach\n", ahd_name(ahd)); 6456 return (ENOMEM); 6457 } 6458 6459 for (targ = 0; targ < max_targ; targ++) { 6460 struct ahd_devinfo devinfo; 6461 struct ahd_initiator_tinfo *tinfo; 6462 struct ahd_transinfo *user_tinfo; 6463 struct ahd_tmode_tstate *tstate; 6464 uint16_t target_mask; 6465 6466 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 6467 targ, &tstate); 6468 user_tinfo = &tinfo->user; 6469 6470 /* 6471 * We support SPC2 and SPI4. 6472 */ 6473 tinfo->user.protocol_version = 4; 6474 tinfo->user.transport_version = 4; 6475 6476 target_mask = 0x01 << targ; 6477 ahd->user_discenable &= ~target_mask; 6478 tstate->discenable &= ~target_mask; 6479 ahd->user_tagenable &= ~target_mask; 6480 if (sc->device_flags[targ] & CFDISC) { 6481 tstate->discenable |= target_mask; 6482 ahd->user_discenable |= target_mask; 6483 ahd->user_tagenable |= target_mask; 6484 } else { 6485 /* 6486 * Cannot be packetized without disconnection. 6487 */ 6488 sc->device_flags[targ] &= ~CFPACKETIZED; 6489 } 6490 6491 user_tinfo->ppr_options = 0; 6492 user_tinfo->period = (sc->device_flags[targ] & CFXFER); 6493 if (user_tinfo->period < CFXFER_ASYNC) { 6494 if (user_tinfo->period <= AHD_PERIOD_10MHz) 6495 user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; 6496 user_tinfo->offset = MAX_OFFSET; 6497 } else { 6498 user_tinfo->offset = 0; 6499 user_tinfo->period = AHD_ASYNC_XFER_PERIOD; 6500 } 6501 #ifdef AHD_FORCE_160 6502 if (user_tinfo->period <= AHD_SYNCRATE_160) 6503 user_tinfo->period = AHD_SYNCRATE_DT; 6504 #endif 6505 6506 if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { 6507 user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM 6508 | MSG_EXT_PPR_WR_FLOW 6509 | MSG_EXT_PPR_HOLD_MCS 6510 | MSG_EXT_PPR_IU_REQ; 6511 if ((ahd->features & AHD_RTI) != 0) 6512 user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; 6513 } 6514 6515 if ((sc->device_flags[targ] & CFQAS) != 0) 6516 user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; 6517 6518 if ((sc->device_flags[targ] & CFWIDEB) != 0) 6519 user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; 6520 else 6521 user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; 6522 #ifdef AHD_DEBUG 6523 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6524 printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, 6525 user_tinfo->period, user_tinfo->offset, 6526 user_tinfo->ppr_options); 6527 #endif 6528 /* 6529 * Start out Async/Narrow/Untagged and with 6530 * conservative protocol support. 6531 */ 6532 tstate->tagenable &= ~target_mask; 6533 tinfo->goal.protocol_version = 2; 6534 tinfo->goal.transport_version = 2; 6535 tinfo->curr.protocol_version = 2; 6536 tinfo->curr.transport_version = 2; 6537 ahd_compile_devinfo(&devinfo, ahd->our_id, 6538 targ, CAM_LUN_WILDCARD, 6539 'A', ROLE_INITIATOR); 6540 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6541 AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); 6542 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 6543 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, 6544 /*paused*/TRUE); 6545 } 6546 6547 ahd->flags &= ~AHD_SPCHK_ENB_A; 6548 if (sc->bios_control & CFSPARITY) 6549 ahd->flags |= AHD_SPCHK_ENB_A; 6550 6551 ahd->flags &= ~AHD_RESET_BUS_A; 6552 if (sc->bios_control & CFRESETB) 6553 ahd->flags |= AHD_RESET_BUS_A; 6554 6555 ahd->flags &= ~AHD_EXTENDED_TRANS_A; 6556 if (sc->bios_control & CFEXTEND) 6557 ahd->flags |= AHD_EXTENDED_TRANS_A; 6558 6559 ahd->flags &= ~AHD_BIOS_ENABLED; 6560 if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) 6561 ahd->flags |= AHD_BIOS_ENABLED; 6562 6563 ahd->flags &= ~AHD_STPWLEVEL_A; 6564 if ((sc->adapter_control & CFSTPWLEVEL) != 0) 6565 ahd->flags |= AHD_STPWLEVEL_A; 6566 6567 return (0); 6568 } 6569 6570 /* 6571 * Parse device configuration information. 6572 */ 6573 int 6574 ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) 6575 { 6576 int error; 6577 6578 error = ahd_verify_vpd_cksum(vpd); 6579 if (error == 0) 6580 return (EINVAL); 6581 if ((vpd->bios_flags & VPDBOOTHOST) != 0) 6582 ahd->flags |= AHD_BOOT_CHANNEL; 6583 return (0); 6584 } 6585 6586 void 6587 ahd_intr_enable(struct ahd_softc *ahd, int enable) 6588 { 6589 u_int hcntrl; 6590 6591 hcntrl = ahd_inb(ahd, HCNTRL); 6592 hcntrl &= ~INTEN; 6593 ahd->pause &= ~INTEN; 6594 ahd->unpause &= ~INTEN; 6595 if (enable) { 6596 hcntrl |= INTEN; 6597 ahd->pause |= INTEN; 6598 ahd->unpause |= INTEN; 6599 } 6600 ahd_outb(ahd, HCNTRL, hcntrl); 6601 } 6602 6603 void 6604 ahd_update_coalessing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, 6605 u_int mincmds) 6606 { 6607 if (timer > AHD_TIMER_MAX_US) 6608 timer = AHD_TIMER_MAX_US; 6609 ahd->int_coalessing_timer = timer; 6610 6611 if (maxcmds > AHD_INT_COALESSING_MAXCMDS_MAX) 6612 maxcmds = AHD_INT_COALESSING_MAXCMDS_MAX; 6613 if (mincmds > AHD_INT_COALESSING_MINCMDS_MAX) 6614 mincmds = AHD_INT_COALESSING_MINCMDS_MAX; 6615 ahd->int_coalessing_maxcmds = maxcmds; 6616 ahd_outw(ahd, INT_COALESSING_TIMER, timer / AHD_TIMER_US_PER_TICK); 6617 ahd_outb(ahd, INT_COALESSING_MAXCMDS, -maxcmds); 6618 ahd_outb(ahd, INT_COALESSING_MINCMDS, -mincmds); 6619 } 6620 6621 void 6622 ahd_enable_coalessing(struct ahd_softc *ahd, int enable) 6623 { 6624 6625 ahd->hs_mailbox &= ~ENINT_COALESS; 6626 if (enable) 6627 ahd->hs_mailbox |= ENINT_COALESS; 6628 ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); 6629 ahd_flush_device_writes(ahd); 6630 ahd_run_qoutfifo(ahd); 6631 } 6632 6633 /* 6634 * Ensure that the card is paused in a location 6635 * outside of all critical sections and that all 6636 * pending work is completed prior to returning. 6637 * This routine should only be called from outside 6638 * an interrupt context. 6639 */ 6640 void 6641 ahd_pause_and_flushwork(struct ahd_softc *ahd) 6642 { 6643 u_int intstat; 6644 u_int maxloops; 6645 u_int qfreeze_cnt; 6646 6647 maxloops = 1000; 6648 ahd->flags |= AHD_ALL_INTERRUPTS; 6649 ahd_pause(ahd); 6650 /* 6651 * Increment the QFreeze Count so that the sequencer 6652 * will not start new selections. We do this only 6653 * until we are safely paused without further selections 6654 * pending. 6655 */ 6656 ahd_outw(ahd, QFREEZE_COUNT, ahd_inw(ahd, QFREEZE_COUNT) + 1); 6657 ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); 6658 do { 6659 struct scb *waiting_scb; 6660 6661 ahd_unpause(ahd); 6662 ahd_intr(ahd); 6663 ahd_pause(ahd); 6664 ahd_clear_critical_section(ahd); 6665 intstat = ahd_inb(ahd, INTSTAT); 6666 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6667 if ((ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) 6668 ahd_outb(ahd, SCSISEQ0, 6669 ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 6670 /* 6671 * In the non-packetized case, the sequencer (for Rev A), 6672 * relies on ENSELO remaining set after SELDO. The hardware 6673 * auto-clears ENSELO in the packetized case. 6674 */ 6675 waiting_scb = ahd_lookup_scb(ahd, 6676 ahd_inw(ahd, WAITING_TID_HEAD)); 6677 if (waiting_scb != NULL 6678 && (waiting_scb->flags & SCB_PACKETIZED) == 0 6679 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0) 6680 ahd_outb(ahd, SCSISEQ0, 6681 ahd_inb(ahd, SCSISEQ0) | ENSELO); 6682 } while (--maxloops 6683 && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) 6684 && ((intstat & INT_PEND) != 0 6685 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 6686 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); 6687 6688 if (maxloops == 0) { 6689 printf("Infinite interrupt loop, INTSTAT = %x", 6690 ahd_inb(ahd, INTSTAT)); 6691 } 6692 qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT); 6693 if (qfreeze_cnt == 0) { 6694 printf("%s: ahd_pause_and_flushwork with 0 qfreeze count!\n", 6695 ahd_name(ahd)); 6696 } else { 6697 qfreeze_cnt--; 6698 } 6699 ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt); 6700 if (qfreeze_cnt == 0) 6701 ahd_outb(ahd, SEQ_FLAGS2, 6702 ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN); 6703 6704 ahd_flush_qoutfifo(ahd); 6705 6706 ahd_platform_flushwork(ahd); 6707 ahd->flags &= ~AHD_ALL_INTERRUPTS; 6708 } 6709 6710 int 6711 ahd_suspend(struct ahd_softc *ahd) 6712 { 6713 #if 0 6714 uint8_t *ptr; 6715 int i; 6716 6717 ahd_pause_and_flushwork(ahd); 6718 6719 if (LIST_FIRST(&ahd->pending_scbs) != NULL) 6720 return (EBUSY); 6721 6722 #if AHD_TARGET_MODE 6723 /* 6724 * XXX What about ATIOs that have not yet been serviced? 6725 * Perhaps we should just refuse to be suspended if we 6726 * are acting in a target role. 6727 */ 6728 if (ahd->pending_device != NULL) 6729 return (EBUSY); 6730 #endif 6731 6732 /* Save volatile registers */ 6733 ahd->suspend_state.channel[0].scsiseq = ahd_inb(ahd, SCSISEQ0); 6734 ahd->suspend_state.channel[0].sxfrctl0 = ahd_inb(ahd, SXFRCTL0); 6735 ahd->suspend_state.channel[0].sxfrctl1 = ahd_inb(ahd, SXFRCTL1); 6736 ahd->suspend_state.channel[0].simode0 = ahd_inb(ahd, SIMODE0); 6737 ahd->suspend_state.channel[0].simode1 = ahd_inb(ahd, SIMODE1); 6738 ahd->suspend_state.channel[0].seltimer = ahd_inb(ahd, SELTIMER); 6739 ahd->suspend_state.channel[0].seqctl = ahd_inb(ahd, SEQCTL0); 6740 ahd->suspend_state.dscommand0 = ahd_inb(ahd, DSCOMMAND0); 6741 ahd->suspend_state.dspcistatus = ahd_inb(ahd, DSPCISTATUS); 6742 6743 if ((ahd->features & AHD_DT) != 0) { 6744 u_int sfunct; 6745 6746 sfunct = ahd_inb(ahd, SFUNCT) & ~ALT_MODE; 6747 ahd_outb(ahd, SFUNCT, sfunct | ALT_MODE); 6748 ahd->suspend_state.optionmode = ahd_inb(ahd, OPTIONMODE); 6749 ahd_outb(ahd, SFUNCT, sfunct); 6750 ahd->suspend_state.crccontrol1 = ahd_inb(ahd, CRCCONTROL1); 6751 } 6752 6753 if ((ahd->features & AHD_MULTI_FUNC) != 0) 6754 ahd->suspend_state.scbbaddr = ahd_inb(ahd, SCBBADDR); 6755 6756 if ((ahd->features & AHD_ULTRA2) != 0) 6757 ahd->suspend_state.dff_thrsh = ahd_inb(ahd, DFF_THRSH); 6758 6759 ptr = ahd->suspend_state.scratch_ram; 6760 for (i = 0; i < 64; i++) 6761 *ptr++ = ahd_inb(ahd, SRAM_BASE + i); 6762 6763 if ((ahd->features & AHD_MORE_SRAM) != 0) { 6764 for (i = 0; i < 16; i++) 6765 *ptr++ = ahd_inb(ahd, TARG_OFFSET + i); 6766 } 6767 6768 ptr = ahd->suspend_state.btt; 6769 for (i = 0;i < AHD_NUM_TARGETS; i++) { 6770 int j; 6771 6772 for (j = 0;j < AHD_NUM_LUNS_NONPKT; j++) { 6773 u_int tcl; 6774 6775 tcl = BUILD_TCL_RAW(i, 'A', j); 6776 *ptr = ahd_find_busy_tcl(ahd, tcl); 6777 } 6778 } 6779 ahd_shutdown(ahd); 6780 #endif 6781 return (0); 6782 } 6783 6784 int 6785 ahd_resume(struct ahd_softc *ahd) 6786 { 6787 #if 0 6788 uint8_t *ptr; 6789 int i; 6790 6791 ahd_reset(ahd); 6792 6793 ahd_build_free_scb_list(ahd); 6794 6795 /* Restore volatile registers */ 6796 ahd_outb(ahd, SCSISEQ0, ahd->suspend_state.channel[0].scsiseq); 6797 ahd_outb(ahd, SXFRCTL0, ahd->suspend_state.channel[0].sxfrctl0); 6798 ahd_outb(ahd, SXFRCTL1, ahd->suspend_state.channel[0].sxfrctl1); 6799 ahd_outb(ahd, SIMODE0, ahd->suspend_state.channel[0].simode0); 6800 ahd_outb(ahd, SIMODE1, ahd->suspend_state.channel[0].simode1); 6801 ahd_outb(ahd, SELTIMER, ahd->suspend_state.channel[0].seltimer); 6802 ahd_outb(ahd, SEQCTL0, ahd->suspend_state.channel[0].seqctl); 6803 if ((ahd->features & AHD_ULTRA2) != 0) 6804 ahd_outb(ahd, SCSIID_ULTRA2, ahd->our_id); 6805 else 6806 ahd_outb(ahd, SCSIID, ahd->our_id); 6807 6808 ahd_outb(ahd, DSCOMMAND0, ahd->suspend_state.dscommand0); 6809 ahd_outb(ahd, DSPCISTATUS, ahd->suspend_state.dspcistatus); 6810 6811 if ((ahd->features & AHD_DT) != 0) { 6812 u_int sfunct; 6813 6814 sfunct = ahd_inb(ahd, SFUNCT) & ~ALT_MODE; 6815 ahd_outb(ahd, SFUNCT, sfunct | ALT_MODE); 6816 ahd_outb(ahd, OPTIONMODE, ahd->suspend_state.optionmode); 6817 ahd_outb(ahd, SFUNCT, sfunct); 6818 ahd_outb(ahd, CRCCONTROL1, ahd->suspend_state.crccontrol1); 6819 } 6820 6821 if ((ahd->features & AHD_MULTI_FUNC) != 0) 6822 ahd_outb(ahd, SCBBADDR, ahd->suspend_state.scbbaddr); 6823 6824 if ((ahd->features & AHD_ULTRA2) != 0) 6825 ahd_outb(ahd, DFF_THRSH, ahd->suspend_state.dff_thrsh); 6826 6827 ptr = ahd->suspend_state.scratch_ram; 6828 for (i = 0; i < 64; i++) 6829 ahd_outb(ahd, SRAM_BASE + i, *ptr++); 6830 6831 if ((ahd->features & AHD_MORE_SRAM) != 0) { 6832 for (i = 0; i < 16; i++) 6833 ahd_outb(ahd, TARG_OFFSET + i, *ptr++); 6834 } 6835 6836 ptr = ahd->suspend_state.btt; 6837 for (i = 0;i < AHD_NUM_TARGETS; i++) { 6838 int j; 6839 6840 for (j = 0;j < AHD_NUM_LUNS; j++) { 6841 u_int tcl; 6842 6843 tcl = BUILD_TCL(i << 4, j); 6844 ahd_busy_tcl(ahd, tcl, *ptr); 6845 } 6846 } 6847 #endif 6848 return (0); 6849 } 6850 6851 /************************** Busy Target Table *********************************/ 6852 /* 6853 * Set SCBPTR to the SCB that contains the busy 6854 * table entry for TCL. Return the offset into 6855 * the SCB that contains the entry for TCL. 6856 * saved_scbid is dereferenced and set to the 6857 * scbid that should be restored once manipualtion 6858 * of the TCL entry is complete. 6859 */ 6860 static __inline u_int 6861 ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) 6862 { 6863 /* 6864 * Index to the SCB that contains the busy entry. 6865 */ 6866 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 6867 *saved_scbid = ahd_get_scbptr(ahd); 6868 ahd_set_scbptr(ahd, TCL_LUN(tcl) 6869 | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); 6870 6871 /* 6872 * And now calculate the SCB offset to the entry. 6873 * Each entry is 2 bytes wide, hence the 6874 * multiplication by 2. 6875 */ 6876 return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); 6877 } 6878 6879 /* 6880 * Return the untagged transaction id for a given target/channel lun. 6881 */ 6882 u_int 6883 ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) 6884 { 6885 u_int scbid; 6886 u_int scb_offset; 6887 u_int saved_scbptr; 6888 6889 scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); 6890 scbid = ahd_inw_scbram(ahd, scb_offset); 6891 ahd_set_scbptr(ahd, saved_scbptr); 6892 return (scbid); 6893 } 6894 6895 void 6896 ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) 6897 { 6898 u_int scb_offset; 6899 u_int saved_scbptr; 6900 6901 scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); 6902 ahd_outw(ahd, scb_offset, scbid); 6903 ahd_set_scbptr(ahd, saved_scbptr); 6904 } 6905 6906 /************************** SCB and SCB queue management **********************/ 6907 int 6908 ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, 6909 char channel, int lun, u_int tag, role_t role) 6910 { 6911 int targ = SCB_GET_TARGET(ahd, scb); 6912 char chan = SCB_GET_CHANNEL(ahd, scb); 6913 int slun = SCB_GET_LUN(scb); 6914 int match; 6915 6916 match = ((chan == channel) || (channel == ALL_CHANNELS)); 6917 if (match != 0) 6918 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 6919 if (match != 0) 6920 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 6921 if (match != 0) { 6922 #if AHD_TARGET_MODE 6923 int group; 6924 6925 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 6926 if (role == ROLE_INITIATOR) { 6927 match = (group != XPT_FC_GROUP_TMODE) 6928 && ((tag == SCB_GET_TAG(scb)) 6929 || (tag == SCB_LIST_NULL)); 6930 } else if (role == ROLE_TARGET) { 6931 match = (group == XPT_FC_GROUP_TMODE) 6932 && ((tag == scb->io_ctx->csio.tag_id) 6933 || (tag == SCB_LIST_NULL)); 6934 } 6935 #else /* !AHD_TARGET_MODE */ 6936 match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); 6937 #endif /* AHD_TARGET_MODE */ 6938 } 6939 6940 return match; 6941 } 6942 6943 void 6944 ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) 6945 { 6946 int target; 6947 char channel; 6948 int lun; 6949 6950 target = SCB_GET_TARGET(ahd, scb); 6951 lun = SCB_GET_LUN(scb); 6952 channel = SCB_GET_CHANNEL(ahd, scb); 6953 6954 ahd_search_qinfifo(ahd, target, channel, lun, 6955 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 6956 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 6957 6958 ahd_platform_freeze_devq(ahd, scb); 6959 } 6960 6961 void 6962 ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) 6963 { 6964 struct scb *prev_scb; 6965 ahd_mode_state saved_modes; 6966 6967 saved_modes = ahd_save_modes(ahd); 6968 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 6969 prev_scb = NULL; 6970 if (ahd_qinfifo_count(ahd) != 0) { 6971 u_int prev_tag; 6972 u_int prev_pos; 6973 6974 prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); 6975 prev_tag = ahd->qinfifo[prev_pos]; 6976 prev_scb = ahd_lookup_scb(ahd, prev_tag); 6977 } 6978 ahd_qinfifo_requeue(ahd, prev_scb, scb); 6979 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 6980 ahd_restore_modes(ahd, saved_modes); 6981 } 6982 6983 static void 6984 ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, 6985 struct scb *scb) 6986 { 6987 if (prev_scb == NULL) { 6988 uint32_t busaddr; 6989 6990 busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); 6991 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); 6992 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); 6993 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); 6994 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); 6995 } else { 6996 prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; 6997 ahd_sync_scb(ahd, prev_scb, 6998 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 6999 } 7000 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); 7001 ahd->qinfifonext++; 7002 scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; 7003 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 7004 } 7005 7006 static int 7007 ahd_qinfifo_count(struct ahd_softc *ahd) 7008 { 7009 u_int qinpos; 7010 u_int wrap_qinpos; 7011 u_int wrap_qinfifonext; 7012 7013 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 7014 qinpos = ahd_get_snscb_qoff(ahd); 7015 wrap_qinpos = AHD_QIN_WRAP(qinpos); 7016 wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); 7017 if (wrap_qinfifonext >= wrap_qinpos) 7018 return (wrap_qinfifonext - wrap_qinpos); 7019 else 7020 return (wrap_qinfifonext 7021 + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos); 7022 } 7023 7024 void 7025 ahd_reset_cmds_pending(struct ahd_softc *ahd) 7026 { 7027 struct scb *scb; 7028 ahd_mode_state saved_modes; 7029 u_int pending_cmds; 7030 7031 saved_modes = ahd_save_modes(ahd); 7032 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 7033 7034 /* 7035 * Don't count any commands as outstanding that the 7036 * sequencer has already marked for completion. 7037 */ 7038 ahd_flush_qoutfifo(ahd); 7039 7040 pending_cmds = 0; 7041 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 7042 pending_cmds++; 7043 } 7044 ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); 7045 ahd_restore_modes(ahd, saved_modes); 7046 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 7047 } 7048 7049 int 7050 ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, 7051 int lun, u_int tag, role_t role, uint32_t status, 7052 ahd_search_action action) 7053 { 7054 struct scb *scb; 7055 struct scb *prev_scb; 7056 ahd_mode_state saved_modes; 7057 u_int qinstart; 7058 u_int qinpos; 7059 u_int qintail; 7060 u_int tid_next; 7061 u_int tid_prev; 7062 u_int scbid; 7063 u_int savedscbptr; 7064 uint32_t busaddr; 7065 int found; 7066 int targets; 7067 7068 /* Must be in CCHAN mode */ 7069 saved_modes = ahd_save_modes(ahd); 7070 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 7071 7072 /* 7073 * Halt any pending SCB DMA. The sequencer will reinitiate 7074 * this dma if the qinfifo is not empty once we unpause. 7075 */ 7076 if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) 7077 == (CCARREN|CCSCBEN|CCSCBDIR)) { 7078 ahd_outb(ahd, CCSCBCTL, 7079 ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); 7080 while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) 7081 ; 7082 } 7083 /* Determine sequencer's position in the qinfifo. */ 7084 qintail = AHD_QIN_WRAP(ahd->qinfifonext); 7085 qinstart = ahd_get_snscb_qoff(ahd); 7086 qinpos = AHD_QIN_WRAP(qinstart); 7087 found = 0; 7088 prev_scb = NULL; 7089 7090 if (action == SEARCH_PRINT) { 7091 printf("qinstart = %d qinfifonext = %d\nQINFIFO:", 7092 qinstart, ahd->qinfifonext); 7093 } 7094 7095 /* 7096 * Start with an empty queue. Entries that are not chosen 7097 * for removal will be re-added to the queue as we go. 7098 */ 7099 ahd->qinfifonext = qinstart; 7100 busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); 7101 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); 7102 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); 7103 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); 7104 ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); 7105 7106 while (qinpos != qintail) { 7107 scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); 7108 if (scb == NULL) { 7109 printf("qinpos = %d, SCB index = %d\n", 7110 qinpos, ahd->qinfifo[qinpos]); 7111 panic("Loop 1\n"); 7112 } 7113 7114 if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { 7115 /* 7116 * We found an scb that needs to be acted on. 7117 */ 7118 found++; 7119 switch (action) { 7120 case SEARCH_COMPLETE: 7121 { 7122 cam_status ostat; 7123 cam_status cstat; 7124 7125 ostat = ahd_get_transaction_status(scb); 7126 if (ostat == CAM_REQ_INPROG) 7127 ahd_set_transaction_status(scb, 7128 status); 7129 cstat = ahd_get_transaction_status(scb); 7130 if (cstat != CAM_REQ_CMP) 7131 ahd_freeze_scb(scb); 7132 if ((scb->flags & SCB_ACTIVE) == 0) 7133 printf("Inactive SCB in qinfifo\n"); 7134 ahd_done(ahd, scb); 7135 7136 /* FALLTHROUGH */ 7137 } 7138 case SEARCH_REMOVE: 7139 break; 7140 case SEARCH_PRINT: 7141 printf(" 0x%x", ahd->qinfifo[qinpos]); 7142 /* FALLTHROUGH */ 7143 case SEARCH_COUNT: 7144 ahd_qinfifo_requeue(ahd, prev_scb, scb); 7145 prev_scb = scb; 7146 break; 7147 } 7148 } else { 7149 ahd_qinfifo_requeue(ahd, prev_scb, scb); 7150 prev_scb = scb; 7151 } 7152 qinpos = AHD_QIN_WRAP(qinpos+1); 7153 } 7154 7155 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 7156 7157 if (action == SEARCH_PRINT) 7158 printf("\nWAITING_TID_QUEUES:\n"); 7159 7160 /* 7161 * Search waiting for selection lists. We traverse the 7162 * list of "their ids" waiting for selection and, if 7163 * appropriate, traverse the SCBs of each "their id" 7164 * looking for matches. 7165 */ 7166 savedscbptr = ahd_get_scbptr(ahd); 7167 tid_next = ahd_inw(ahd, WAITING_TID_HEAD); 7168 tid_prev = SCB_LIST_NULL; 7169 targets = 0; 7170 for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { 7171 u_int tid_head; 7172 7173 /* 7174 * We limit based on the number of SCBs since 7175 * MK_MESSAGE SCBs are not in the per-tid lists. 7176 */ 7177 targets++; 7178 if (targets > AHD_SCB_MAX) { 7179 panic("TID LIST LOOP"); 7180 } 7181 if (scbid >= ahd->scb_data.numscbs) { 7182 printf("%s: Waiting TID List inconsistency. " 7183 "SCB index == 0x%x, yet numscbs == 0x%x.", 7184 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 7185 ahd_dump_card_state(ahd); 7186 panic("for safety"); 7187 } 7188 scb = ahd_lookup_scb(ahd, scbid); 7189 if (scb == NULL) { 7190 printf("%s: SCB = 0x%x Not Active!\n", 7191 ahd_name(ahd), scbid); 7192 panic("Waiting TID List traversal\n"); 7193 } 7194 ahd_set_scbptr(ahd, scbid); 7195 tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); 7196 if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, 7197 SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { 7198 tid_prev = scbid; 7199 continue; 7200 } 7201 7202 /* 7203 * We found a list of scbs that needs to be searched. 7204 */ 7205 if (action == SEARCH_PRINT) 7206 printf(" %d ( ", SCB_GET_TARGET(ahd, scb)); 7207 tid_head = scbid; 7208 found += ahd_search_scb_list(ahd, target, channel, 7209 lun, tag, role, status, 7210 action, &tid_head, 7211 SCB_GET_TARGET(ahd, scb)); 7212 if (tid_head != scbid) 7213 ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); 7214 if (!SCBID_IS_NULL(tid_head)) 7215 tid_prev = tid_head; 7216 if (action == SEARCH_PRINT) 7217 printf(")\n"); 7218 } 7219 ahd_set_scbptr(ahd, savedscbptr); 7220 ahd_restore_modes(ahd, saved_modes); 7221 return (found); 7222 } 7223 7224 static int 7225 ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, 7226 int lun, u_int tag, role_t role, uint32_t status, 7227 ahd_search_action action, u_int *list_head, u_int tid) 7228 { 7229 struct scb *scb; 7230 u_int scbid; 7231 u_int next; 7232 u_int prev; 7233 int found; 7234 7235 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 7236 found = 0; 7237 prev = SCB_LIST_NULL; 7238 next = *list_head; 7239 for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { 7240 if (scbid >= ahd->scb_data.numscbs) { 7241 printf("%s:SCB List inconsistency. " 7242 "SCB == 0x%x, yet numscbs == 0x%x.", 7243 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 7244 ahd_dump_card_state(ahd); 7245 panic("for safety"); 7246 } 7247 scb = ahd_lookup_scb(ahd, scbid); 7248 if (scb == NULL) { 7249 printf("%s: SCB = %d Not Active!\n", 7250 ahd_name(ahd), scbid); 7251 panic("Waiting List traversal\n"); 7252 } 7253 ahd_set_scbptr(ahd, scbid); 7254 next = ahd_inw_scbram(ahd, SCB_NEXT); 7255 if (ahd_match_scb(ahd, scb, target, channel, 7256 lun, SCB_LIST_NULL, role) == 0) { 7257 prev = scbid; 7258 continue; 7259 } 7260 found++; 7261 switch (action) { 7262 case SEARCH_COMPLETE: 7263 { 7264 cam_status ostat; 7265 cam_status cstat; 7266 7267 ostat = ahd_get_transaction_status(scb); 7268 if (ostat == CAM_REQ_INPROG) 7269 ahd_set_transaction_status(scb, status); 7270 cstat = ahd_get_transaction_status(scb); 7271 if (cstat != CAM_REQ_CMP) 7272 ahd_freeze_scb(scb); 7273 if ((scb->flags & SCB_ACTIVE) == 0) 7274 printf("Inactive SCB in Waiting List\n"); 7275 ahd_done(ahd, scb); 7276 /* FALLTHROUGH */ 7277 } 7278 case SEARCH_REMOVE: 7279 ahd_rem_wscb(ahd, scbid, prev, next, tid); 7280 if (prev == SCB_LIST_NULL) 7281 *list_head = next; 7282 break; 7283 case SEARCH_PRINT: 7284 printf("0x%x ", scbid); 7285 case SEARCH_COUNT: 7286 prev = scbid; 7287 break; 7288 } 7289 if (found > AHD_SCB_MAX) 7290 panic("SCB LIST LOOP"); 7291 } 7292 if (action == SEARCH_COMPLETE 7293 || action == SEARCH_REMOVE) 7294 ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); 7295 return (found); 7296 } 7297 7298 static void 7299 ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, 7300 u_int tid_cur, u_int tid_next) 7301 { 7302 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 7303 7304 if (SCBID_IS_NULL(tid_cur)) { 7305 7306 /* Bypass current TID list */ 7307 if (SCBID_IS_NULL(tid_prev)) { 7308 ahd_outw(ahd, WAITING_TID_HEAD, tid_next); 7309 } else { 7310 ahd_set_scbptr(ahd, tid_prev); 7311 ahd_outw(ahd, SCB_NEXT2, tid_next); 7312 } 7313 if (SCBID_IS_NULL(tid_next)) 7314 ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); 7315 } else { 7316 7317 /* Stitch through tid_cur */ 7318 if (SCBID_IS_NULL(tid_prev)) { 7319 ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); 7320 } else { 7321 ahd_set_scbptr(ahd, tid_prev); 7322 ahd_outw(ahd, SCB_NEXT2, tid_cur); 7323 } 7324 ahd_set_scbptr(ahd, tid_cur); 7325 ahd_outw(ahd, SCB_NEXT2, tid_next); 7326 7327 if (SCBID_IS_NULL(tid_next)) 7328 ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); 7329 } 7330 } 7331 7332 /* 7333 * Manipulate the waiting for selection list and return the 7334 * scb that follows the one that we remove. 7335 */ 7336 static u_int 7337 ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, 7338 u_int prev, u_int next, u_int tid) 7339 { 7340 u_int tail_offset; 7341 7342 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 7343 if (!SCBID_IS_NULL(prev)) { 7344 ahd_set_scbptr(ahd, prev); 7345 ahd_outw(ahd, SCB_NEXT, next); 7346 } 7347 7348 /* 7349 * SCBs that had MK_MESSAGE set in them will not 7350 * be queued to the per-target lists, so don't 7351 * blindly clear the tail pointer. 7352 */ 7353 tail_offset = WAITING_SCB_TAILS + (2 * tid); 7354 if (SCBID_IS_NULL(next) 7355 && ahd_inw(ahd, tail_offset) == scbid) 7356 ahd_outw(ahd, tail_offset, prev); 7357 ahd_add_scb_to_free_list(ahd, scbid); 7358 return (next); 7359 } 7360 7361 /* 7362 * Add the SCB as selected by SCBPTR onto the on chip list of 7363 * free hardware SCBs. This list is empty/unused if we are not 7364 * performing SCB paging. 7365 */ 7366 static void 7367 ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) 7368 { 7369 /* XXX Need some other mechanism to designate "free". */ 7370 /* 7371 * Invalidate the tag so that our abort 7372 * routines don't think it's active. 7373 ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); 7374 */ 7375 } 7376 7377 /******************************** Error Handling ******************************/ 7378 /* 7379 * Abort all SCBs that match the given description (target/channel/lun/tag), 7380 * setting their status to the passed in status if the status has not already 7381 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 7382 * is paused before it is called. 7383 */ 7384 int 7385 ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, 7386 int lun, u_int tag, role_t role, uint32_t status) 7387 { 7388 struct scb *scbp; 7389 struct scb *scbp_next; 7390 u_int i, j; 7391 u_int maxtarget; 7392 u_int minlun; 7393 u_int maxlun; 7394 int found; 7395 ahd_mode_state saved_modes; 7396 7397 /* restore this when we're done */ 7398 saved_modes = ahd_save_modes(ahd); 7399 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7400 7401 found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, 7402 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 7403 7404 /* 7405 * Clean out the busy target table for any untagged commands. 7406 */ 7407 i = 0; 7408 maxtarget = 16; 7409 if (target != CAM_TARGET_WILDCARD) { 7410 i = target; 7411 if (channel == 'B') 7412 i += 8; 7413 maxtarget = i + 1; 7414 } 7415 7416 if (lun == CAM_LUN_WILDCARD) { 7417 minlun = 0; 7418 maxlun = AHD_NUM_LUNS_NONPKT; 7419 } else if (lun >= AHD_NUM_LUNS_NONPKT) { 7420 minlun = maxlun = 0; 7421 } else { 7422 minlun = lun; 7423 maxlun = lun + 1; 7424 } 7425 7426 if (role != ROLE_TARGET) { 7427 for (;i < maxtarget; i++) { 7428 for (j = minlun;j < maxlun; j++) { 7429 u_int scbid; 7430 u_int tcl; 7431 7432 tcl = BUILD_TCL_RAW(i, 'A', j); 7433 scbid = ahd_find_busy_tcl(ahd, tcl); 7434 scbp = ahd_lookup_scb(ahd, scbid); 7435 if (scbp == NULL 7436 || ahd_match_scb(ahd, scbp, target, channel, 7437 lun, tag, role) == 0) 7438 continue; 7439 ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); 7440 } 7441 } 7442 } 7443 7444 /* 7445 * Don't abort commands that have already completed, 7446 * but haven't quite made it up to the host yet. 7447 */ 7448 ahd_flush_qoutfifo(ahd); 7449 7450 /* 7451 * Go through the pending CCB list and look for 7452 * commands for this target that are still active. 7453 * These are other tagged commands that were 7454 * disconnected when the reset occurred. 7455 */ 7456 scbp_next = LIST_FIRST(&ahd->pending_scbs); 7457 while (scbp_next != NULL) { 7458 scbp = scbp_next; 7459 scbp_next = LIST_NEXT(scbp, pending_links); 7460 if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { 7461 cam_status ostat; 7462 7463 ostat = ahd_get_transaction_status(scbp); 7464 if (ostat == CAM_REQ_INPROG) 7465 ahd_set_transaction_status(scbp, status); 7466 if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) 7467 ahd_freeze_scb(scbp); 7468 if ((scbp->flags & SCB_ACTIVE) == 0) 7469 printf("Inactive SCB on pending list\n"); 7470 ahd_done(ahd, scbp); 7471 found++; 7472 } 7473 } 7474 ahd_restore_modes(ahd, saved_modes); 7475 ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); 7476 ahd->flags |= AHD_UPDATE_PEND_CMDS; 7477 return found; 7478 } 7479 7480 static void 7481 ahd_reset_current_bus(struct ahd_softc *ahd) 7482 { 7483 uint8_t scsiseq; 7484 7485 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7486 ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); 7487 scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); 7488 ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); 7489 ahd_delay(AHD_BUSRESET_DELAY); 7490 /* Turn off the bus reset */ 7491 ahd_outb(ahd, SCSISEQ0, scsiseq); 7492 if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { 7493 /* 7494 * 2A Razor #474 7495 * Certain chip state is not cleared for 7496 * SCSI bus resets that we initiate, so 7497 * we must reset the chip. 7498 */ 7499 ahd_delay(AHD_BUSRESET_DELAY); 7500 ahd_reset(ahd); 7501 ahd_intr_enable(ahd, /*enable*/TRUE); 7502 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7503 } 7504 7505 ahd_clear_intstat(ahd); 7506 } 7507 7508 int 7509 ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) 7510 { 7511 struct ahd_devinfo devinfo; 7512 u_int initiator; 7513 u_int target; 7514 u_int max_scsiid; 7515 int found; 7516 u_int fifo; 7517 u_int next_fifo; 7518 7519 ahd->pending_device = NULL; 7520 7521 ahd_compile_devinfo(&devinfo, 7522 CAM_TARGET_WILDCARD, 7523 CAM_TARGET_WILDCARD, 7524 CAM_LUN_WILDCARD, 7525 channel, ROLE_UNKNOWN); 7526 ahd_pause(ahd); 7527 7528 /* Make sure the sequencer is in a safe location. */ 7529 ahd_clear_critical_section(ahd); 7530 7531 #if AHD_TARGET_MODE 7532 if ((ahd->flags & AHD_TARGETROLE) != 0) { 7533 ahd_run_tqinfifo(ahd, /*paused*/TRUE); 7534 } 7535 #endif 7536 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7537 7538 /* 7539 * Disable selections so no automatic hardware 7540 * functions will modify chip state. 7541 */ 7542 ahd_outb(ahd, SCSISEQ0, 0); 7543 ahd_outb(ahd, SCSISEQ1, 0); 7544 7545 /* 7546 * Safely shut down our DMA engines. Always start with 7547 * the FIFO that is not currently active (if any are 7548 * actively connected). 7549 */ 7550 next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; 7551 if (next_fifo > CURRFIFO_1) 7552 /* If disconneced, arbitrarily start with FIFO1. */ 7553 next_fifo = fifo = 0; 7554 do { 7555 next_fifo ^= CURRFIFO_1; 7556 ahd_set_modes(ahd, next_fifo, next_fifo); 7557 ahd_outb(ahd, DFCNTRL, 7558 ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); 7559 while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) 7560 ahd_delay(10); 7561 /* 7562 * Set CURRFIFO to the now inactive channel. 7563 */ 7564 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7565 ahd_outb(ahd, DFFSTAT, next_fifo); 7566 } while (next_fifo != fifo); 7567 7568 /* 7569 * Reset the bus if we are initiating this reset 7570 */ 7571 ahd_clear_msg_state(ahd); 7572 ahd_outb(ahd, SIMODE1, 7573 ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST|ENBUSFREE)); 7574 7575 if (initiate_reset) 7576 ahd_reset_current_bus(ahd); 7577 7578 ahd_clear_intstat(ahd); 7579 7580 /* 7581 * Clean up all the state information for the 7582 * pending transactions on this bus. 7583 */ 7584 found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, 7585 CAM_LUN_WILDCARD, SCB_LIST_NULL, 7586 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 7587 7588 /* 7589 * Cleanup anything left in the FIFOs. 7590 */ 7591 ahd_clear_fifo(ahd, 0); 7592 ahd_clear_fifo(ahd, 1); 7593 7594 /* 7595 * Revert to async/narrow transfers until we renegotiate. 7596 */ 7597 max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; 7598 for (target = 0; target <= max_scsiid; target++) { 7599 7600 if (ahd->enabled_targets[target] == NULL) 7601 continue; 7602 for (initiator = 0; initiator <= max_scsiid; initiator++) { 7603 struct ahd_devinfo devinfo; 7604 7605 ahd_compile_devinfo(&devinfo, target, initiator, 7606 CAM_LUN_WILDCARD, 7607 'A', ROLE_UNKNOWN); 7608 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 7609 AHD_TRANS_CUR, /*paused*/TRUE); 7610 ahd_set_syncrate(ahd, &devinfo, /*period*/0, 7611 /*offset*/0, /*ppr_options*/0, 7612 AHD_TRANS_CUR, /*paused*/TRUE); 7613 } 7614 } 7615 7616 #ifdef AHD_TARGET_MODE 7617 max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; 7618 7619 /* 7620 * Send an immediate notify ccb to all target more peripheral 7621 * drivers affected by this action. 7622 */ 7623 for (target = 0; target <= max_scsiid; target++) { 7624 struct ahd_tmode_tstate* tstate; 7625 u_int lun; 7626 7627 tstate = ahd->enabled_targets[target]; 7628 if (tstate == NULL) 7629 continue; 7630 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 7631 struct ahd_tmode_lstate* lstate; 7632 7633 lstate = tstate->enabled_luns[lun]; 7634 if (lstate == NULL) 7635 continue; 7636 7637 ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, 7638 EVENT_TYPE_BUS_RESET, /*arg*/0); 7639 ahd_send_lstate_events(ahd, lstate); 7640 } 7641 } 7642 #endif 7643 /* Notify the XPT that a bus reset occurred */ 7644 ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, 7645 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 7646 ahd_restart(ahd); 7647 /* 7648 * Freeze the SIMQ until our poller can determine that 7649 * the bus reset has really gone away. We set the initial 7650 * timer to 0 to have the check performed as soon as possible 7651 * from the timer context. 7652 */ 7653 if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) { 7654 ahd->flags |= AHD_RESET_POLL_ACTIVE; 7655 ahd_freeze_simq(ahd); 7656 ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd); 7657 } 7658 return (found); 7659 } 7660 7661 7662 #define AHD_RESET_POLL_US 1000 7663 static void 7664 ahd_reset_poll(void *arg) 7665 { 7666 struct ahd_softc *ahd; 7667 u_int scsiseq1; 7668 u_long l; 7669 u_long s; 7670 7671 ahd_list_lock(&l); 7672 ahd = ahd_find_softc((struct ahd_softc *)arg); 7673 if (ahd == NULL) { 7674 printf("ahd_reset_poll: Instance %p no longer exists\n", arg); 7675 ahd_list_unlock(&l); 7676 return; 7677 } 7678 ahd_lock(ahd, &s); 7679 ahd_pause(ahd); 7680 ahd_update_modes(ahd); 7681 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7682 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 7683 if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) { 7684 ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US, 7685 ahd_reset_poll, ahd); 7686 ahd_unpause(ahd); 7687 ahd_unlock(ahd, &s); 7688 ahd_list_unlock(&l); 7689 return; 7690 } 7691 7692 /* Reset is now low. Complete chip reinitialization. */ 7693 ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); 7694 scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); 7695 ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP)); 7696 ahd_unpause(ahd); 7697 ahd->flags &= ~AHD_RESET_POLL_ACTIVE; 7698 ahd_unlock(ahd, &s); 7699 ahd_release_simq(ahd); 7700 ahd_list_unlock(&l); 7701 } 7702 7703 /**************************** Statistics Processing ***************************/ 7704 static void 7705 ahd_stat_timer(void *arg) 7706 { 7707 struct ahd_softc *ahd; 7708 u_long l; 7709 u_long s; 7710 int enint_coal; 7711 7712 ahd_list_lock(&l); 7713 ahd = ahd_find_softc((struct ahd_softc *)arg); 7714 if (ahd == NULL) { 7715 printf("ahd_stat_timer: Instance %p no longer exists\n", arg); 7716 ahd_list_unlock(&l); 7717 return; 7718 } 7719 ahd_lock(ahd, &s); 7720 7721 enint_coal = ahd->hs_mailbox & ENINT_COALESS; 7722 if (ahd->cmdcmplt_total > ahd->int_coalessing_threshold) 7723 enint_coal |= ENINT_COALESS; 7724 else if (ahd->cmdcmplt_total < ahd->int_coalessing_stop_threshold) 7725 enint_coal &= ~ENINT_COALESS; 7726 7727 if (enint_coal != (ahd->hs_mailbox & ENINT_COALESS)) { 7728 ahd_enable_coalessing(ahd, enint_coal); 7729 #ifdef AHD_DEBUG 7730 if ((ahd_debug & AHD_SHOW_INT_COALESSING) != 0) 7731 printf("%s: Interrupt coalessing " 7732 "now %sabled. Cmds %d\n", 7733 ahd_name(ahd), 7734 (enint_coal & ENINT_COALESS) ? "en" : "dis", 7735 ahd->cmdcmplt_total); 7736 #endif 7737 } 7738 7739 ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); 7740 ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; 7741 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; 7742 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, 7743 ahd_stat_timer, ahd); 7744 ahd_unlock(ahd, &s); 7745 ahd_list_unlock(&l); 7746 } 7747 7748 /****************************** Status Processing *****************************/ 7749 void 7750 ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) 7751 { 7752 if (scb->hscb->shared_data.istatus.scsi_status != 0) { 7753 ahd_handle_scsi_status(ahd, scb); 7754 } else { 7755 ahd_calc_residual(ahd, scb); 7756 ahd_done(ahd, scb); 7757 } 7758 } 7759 7760 void 7761 ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) 7762 { 7763 struct hardware_scb *hscb; 7764 u_int qfreeze_cnt; 7765 7766 /* 7767 * The sequencer freezes its select-out queue 7768 * anytime a SCSI status error occurs. We must 7769 * handle the error and decrement the QFREEZE count 7770 * to allow the sequencer to continue. 7771 */ 7772 hscb = scb->hscb; 7773 7774 /* Freeze the queue until the client sees the error. */ 7775 ahd_freeze_devq(ahd, scb); 7776 ahd_freeze_scb(scb); 7777 qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT); 7778 if (qfreeze_cnt == 0) { 7779 printf("%s: Bad status with 0 qfreeze count!\n", ahd_name(ahd)); 7780 } else { 7781 qfreeze_cnt--; 7782 ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt); 7783 } 7784 if (qfreeze_cnt == 0) 7785 ahd_outb(ahd, SEQ_FLAGS2, 7786 ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN); 7787 7788 /* Don't want to clobber the original sense code */ 7789 if ((scb->flags & SCB_SENSE) != 0) { 7790 /* 7791 * Clear the SCB_SENSE Flag and perform 7792 * a normal command completion. 7793 */ 7794 scb->flags &= ~SCB_SENSE; 7795 ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 7796 ahd_done(ahd, scb); 7797 return; 7798 } 7799 ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 7800 ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); 7801 switch (hscb->shared_data.istatus.scsi_status) { 7802 case STATUS_PKT_SENSE: 7803 { 7804 struct scsi_status_iu_header *siu; 7805 7806 ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); 7807 siu = (struct scsi_status_iu_header *)scb->sense_data; 7808 ahd_set_scsi_status(scb, siu->status); 7809 #ifdef AHD_DEBUG 7810 if ((ahd_debug & AHD_SHOW_SENSE) != 0) { 7811 ahd_print_path(ahd, scb); 7812 printf("SCB 0x%x Received PKT Status of 0x%x\n", 7813 SCB_GET_TAG(scb), siu->status); 7814 printf("\tflags = 0x%x, sense len = 0x%x, " 7815 "pktfail = 0x%x\n", 7816 siu->flags, scsi_4btoul(siu->sense_length), 7817 scsi_4btoul(siu->pkt_failures_length)); 7818 } 7819 #endif 7820 if ((siu->flags & SIU_RSPVALID) != 0) { 7821 ahd_print_path(ahd, scb); 7822 if (scsi_4btoul(siu->pkt_failures_length) < 4) { 7823 printf("Unable to parse pkt_failures\n"); 7824 } else { 7825 7826 switch (SIU_PKTFAIL_CODE(siu)) { 7827 case SIU_PFC_NONE: 7828 printf("No packet failure found\n"); 7829 break; 7830 case SIU_PFC_CIU_FIELDS_INVALID: 7831 printf("Invalid Command IU Field\n"); 7832 break; 7833 case SIU_PFC_TMF_NOT_SUPPORTED: 7834 printf("TMF not supportd\n"); 7835 break; 7836 case SIU_PFC_TMF_FAILED: 7837 printf("TMF failed\n"); 7838 break; 7839 case SIU_PFC_INVALID_TYPE_CODE: 7840 printf("Invalid L_Q Type code\n"); 7841 break; 7842 case SIU_PFC_ILLEGAL_REQUEST: 7843 printf("Illegal request\n"); 7844 default: 7845 break; 7846 } 7847 } 7848 if (siu->status == SCSI_STATUS_OK) 7849 ahd_set_transaction_status(scb, 7850 CAM_REQ_CMP_ERR); 7851 } 7852 if ((siu->flags & SIU_SNSVALID) != 0) { 7853 scb->flags |= SCB_PKT_SENSE; 7854 #ifdef AHD_DEBUG 7855 if ((ahd_debug & AHD_SHOW_SENSE) != 0) 7856 printf("Sense data available\n"); 7857 #endif 7858 } 7859 ahd_done(ahd, scb); 7860 break; 7861 } 7862 case SCSI_STATUS_CMD_TERMINATED: 7863 case SCSI_STATUS_CHECK_COND: 7864 { 7865 struct ahd_devinfo devinfo; 7866 struct ahd_dma_seg *sg; 7867 struct scsi_sense *sc; 7868 struct ahd_initiator_tinfo *targ_info; 7869 struct ahd_tmode_tstate *tstate; 7870 struct ahd_transinfo *tinfo; 7871 #ifdef AHD_DEBUG 7872 if (ahd_debug & AHD_SHOW_SENSE) { 7873 ahd_print_path(ahd, scb); 7874 printf("SCB %d: requests Check Status\n", 7875 SCB_GET_TAG(scb)); 7876 } 7877 #endif 7878 7879 if (ahd_perform_autosense(scb) == 0) 7880 break; 7881 7882 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), 7883 SCB_GET_TARGET(ahd, scb), 7884 SCB_GET_LUN(scb), 7885 SCB_GET_CHANNEL(ahd, scb), 7886 ROLE_INITIATOR); 7887 targ_info = ahd_fetch_transinfo(ahd, 7888 devinfo.channel, 7889 devinfo.our_scsiid, 7890 devinfo.target, 7891 &tstate); 7892 tinfo = &targ_info->curr; 7893 sg = scb->sg_list; 7894 sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; 7895 /* 7896 * Save off the residual if there is one. 7897 */ 7898 ahd_update_residual(ahd, scb); 7899 #ifdef AHD_DEBUG 7900 if (ahd_debug & AHD_SHOW_SENSE) { 7901 ahd_print_path(ahd, scb); 7902 printf("Sending Sense\n"); 7903 } 7904 #endif 7905 scb->sg_count = 0; 7906 sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), 7907 ahd_get_sense_bufsize(ahd, scb), 7908 /*last*/TRUE); 7909 sc->opcode = REQUEST_SENSE; 7910 sc->byte2 = 0; 7911 if (tinfo->protocol_version <= SCSI_REV_2 7912 && SCB_GET_LUN(scb) < 8) 7913 sc->byte2 = SCB_GET_LUN(scb) << 5; 7914 sc->unused[0] = 0; 7915 sc->unused[1] = 0; 7916 sc->length = ahd_get_sense_bufsize(ahd, scb); 7917 sc->control = 0; 7918 7919 /* 7920 * We can't allow the target to disconnect. 7921 * This will be an untagged transaction and 7922 * having the target disconnect will make this 7923 * transaction indestinguishable from outstanding 7924 * tagged transactions. 7925 */ 7926 hscb->control = 0; 7927 7928 /* 7929 * This request sense could be because the 7930 * the device lost power or in some other 7931 * way has lost our transfer negotiations. 7932 * Renegotiate if appropriate. Unit attention 7933 * errors will be reported before any data 7934 * phases occur. 7935 */ 7936 if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { 7937 ahd_update_neg_request(ahd, &devinfo, 7938 tstate, targ_info, 7939 AHD_NEG_IF_NON_ASYNC); 7940 } 7941 if (tstate->auto_negotiate & devinfo.target_mask) { 7942 hscb->control |= MK_MESSAGE; 7943 scb->flags &= 7944 ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); 7945 scb->flags |= SCB_AUTO_NEGOTIATE; 7946 } 7947 hscb->cdb_len = sizeof(*sc); 7948 ahd_setup_data_scb(ahd, scb); 7949 scb->flags |= SCB_SENSE; 7950 ahd_queue_scb(ahd, scb); 7951 /* 7952 * Ensure we have enough time to actually 7953 * retrieve the sense. 7954 */ 7955 ahd_scb_timer_reset(scb, 5 * 1000000); 7956 break; 7957 } 7958 case SCSI_STATUS_OK: 7959 printf("%s: Interrupted for staus of 0???\n", 7960 ahd_name(ahd)); 7961 /* FALLTHROUGH */ 7962 default: 7963 ahd_done(ahd, scb); 7964 break; 7965 } 7966 } 7967 7968 /* 7969 * Calculate the residual for a just completed SCB. 7970 */ 7971 void 7972 ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) 7973 { 7974 struct hardware_scb *hscb; 7975 struct initiator_status *spkt; 7976 uint32_t sgptr; 7977 uint32_t resid_sgptr; 7978 uint32_t resid; 7979 7980 /* 7981 * 5 cases. 7982 * 1) No residual. 7983 * SG_STATUS_VALID clear in sgptr. 7984 * 2) Transferless command 7985 * 3) Never performed any transfers. 7986 * sgptr has SG_FULL_RESID set. 7987 * 4) No residual but target did not 7988 * save data pointers after the 7989 * last transfer, so sgptr was 7990 * never updated. 7991 * 5) We have a partial residual. 7992 * Use residual_sgptr to determine 7993 * where we are. 7994 */ 7995 7996 hscb = scb->hscb; 7997 sgptr = ahd_le32toh(hscb->sgptr); 7998 if ((sgptr & SG_STATUS_VALID) == 0) 7999 /* Case 1 */ 8000 return; 8001 sgptr &= ~SG_STATUS_VALID; 8002 8003 if ((sgptr & SG_LIST_NULL) != 0) 8004 /* Case 2 */ 8005 return; 8006 8007 /* 8008 * Residual fields are the same in both 8009 * target and initiator status packets, 8010 * so we can always use the initiator fields 8011 * regardless of the role for this SCB. 8012 */ 8013 spkt = &hscb->shared_data.istatus; 8014 resid_sgptr = ahd_le32toh(spkt->residual_sgptr); 8015 if ((sgptr & SG_FULL_RESID) != 0) { 8016 /* Case 3 */ 8017 resid = ahd_get_transfer_length(scb); 8018 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 8019 /* Case 4 */ 8020 return; 8021 } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { 8022 ahd_print_path(ahd, scb); 8023 printf("data overrun detected Tag == 0x%x.\n", 8024 SCB_GET_TAG(scb)); 8025 ahd_freeze_devq(ahd, scb); 8026 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); 8027 ahd_freeze_scb(scb); 8028 return; 8029 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 8030 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 8031 /* NOTREACHED */ 8032 } else { 8033 struct ahd_dma_seg *sg; 8034 8035 /* 8036 * Remainder of the SG where the transfer 8037 * stopped. 8038 */ 8039 resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; 8040 sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); 8041 8042 /* The residual sg_ptr always points to the next sg */ 8043 sg--; 8044 8045 /* 8046 * Add up the contents of all residual 8047 * SG segments that are after the SG where 8048 * the transfer stopped. 8049 */ 8050 while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { 8051 sg++; 8052 resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 8053 } 8054 } 8055 if ((scb->flags & SCB_SENSE) == 0) 8056 ahd_set_residual(scb, resid); 8057 else 8058 ahd_set_sense_residual(scb, resid); 8059 8060 #ifdef AHD_DEBUG 8061 if ((ahd_debug & AHD_SHOW_MISC) != 0) { 8062 ahd_print_path(ahd, scb); 8063 printf("Handled %sResidual of %d bytes\n", 8064 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 8065 } 8066 #endif 8067 } 8068 8069 /******************************* Target Mode **********************************/ 8070 #ifdef AHD_TARGET_MODE 8071 /* 8072 * Add a target mode event to this lun's queue 8073 */ 8074 static void 8075 ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, 8076 u_int initiator_id, u_int event_type, u_int event_arg) 8077 { 8078 struct ahd_tmode_event *event; 8079 int pending; 8080 8081 xpt_freeze_devq(lstate->path, /*count*/1); 8082 if (lstate->event_w_idx >= lstate->event_r_idx) 8083 pending = lstate->event_w_idx - lstate->event_r_idx; 8084 else 8085 pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 8086 - (lstate->event_r_idx - lstate->event_w_idx); 8087 8088 if (event_type == EVENT_TYPE_BUS_RESET 8089 || event_type == MSG_BUS_DEV_RESET) { 8090 /* 8091 * Any earlier events are irrelevant, so reset our buffer. 8092 * This has the effect of allowing us to deal with reset 8093 * floods (an external device holding down the reset line) 8094 * without losing the event that is really interesting. 8095 */ 8096 lstate->event_r_idx = 0; 8097 lstate->event_w_idx = 0; 8098 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 8099 } 8100 8101 if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { 8102 xpt_print_path(lstate->path); 8103 printf("immediate event %x:%x lost\n", 8104 lstate->event_buffer[lstate->event_r_idx].event_type, 8105 lstate->event_buffer[lstate->event_r_idx].event_arg); 8106 lstate->event_r_idx++; 8107 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 8108 lstate->event_r_idx = 0; 8109 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 8110 } 8111 8112 event = &lstate->event_buffer[lstate->event_w_idx]; 8113 event->initiator_id = initiator_id; 8114 event->event_type = event_type; 8115 event->event_arg = event_arg; 8116 lstate->event_w_idx++; 8117 if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 8118 lstate->event_w_idx = 0; 8119 } 8120 8121 /* 8122 * Send any target mode events queued up waiting 8123 * for immediate notify resources. 8124 */ 8125 void 8126 ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) 8127 { 8128 struct ccb_hdr *ccbh; 8129 struct ccb_immed_notify *inot; 8130 8131 while (lstate->event_r_idx != lstate->event_w_idx 8132 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 8133 struct ahd_tmode_event *event; 8134 8135 event = &lstate->event_buffer[lstate->event_r_idx]; 8136 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 8137 inot = (struct ccb_immed_notify *)ccbh; 8138 switch (event->event_type) { 8139 case EVENT_TYPE_BUS_RESET: 8140 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 8141 break; 8142 default: 8143 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 8144 inot->message_args[0] = event->event_type; 8145 inot->message_args[1] = event->event_arg; 8146 break; 8147 } 8148 inot->initiator_id = event->initiator_id; 8149 inot->sense_len = 0; 8150 xpt_done((union ccb *)inot); 8151 lstate->event_r_idx++; 8152 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 8153 lstate->event_r_idx = 0; 8154 } 8155 } 8156 #endif 8157 8158 /******************** Sequencer Program Patching/Download *********************/ 8159 8160 #ifdef AHD_DUMP_SEQ 8161 void 8162 ahd_dumpseq(struct ahd_softc* ahd) 8163 { 8164 int i; 8165 int max_prog; 8166 8167 max_prog = 2048; 8168 8169 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 8170 ahd_outb(ahd, PRGMCNT, 0); 8171 ahd_outb(ahd, PRGMCNT+1, 0); 8172 for (i = 0; i < max_prog; i++) { 8173 uint8_t ins_bytes[4]; 8174 8175 ahd_insb(ahd, SEQRAM, ins_bytes, 4); 8176 printf("0x%08x\n", ins_bytes[0] << 24 8177 | ins_bytes[1] << 16 8178 | ins_bytes[2] << 8 8179 | ins_bytes[3]); 8180 } 8181 } 8182 #endif 8183 8184 static void 8185 ahd_loadseq(struct ahd_softc *ahd) 8186 { 8187 struct cs cs_table[num_critical_sections]; 8188 u_int begin_set[num_critical_sections]; 8189 u_int end_set[num_critical_sections]; 8190 struct patch *cur_patch; 8191 u_int cs_count; 8192 u_int cur_cs; 8193 u_int i; 8194 int downloaded; 8195 u_int skip_addr; 8196 u_int sg_prefetch_cnt; 8197 u_int sg_prefetch_cnt_limit; 8198 u_int sg_prefetch_align; 8199 u_int sg_size; 8200 uint8_t download_consts[DOWNLOAD_CONST_COUNT]; 8201 8202 if (bootverbose) 8203 printf("%s: Downloading Sequencer Program...", 8204 ahd_name(ahd)); 8205 8206 #if DOWNLOAD_CONST_COUNT != 7 8207 #error "Download Const Mismatch" 8208 #endif 8209 /* 8210 * Start out with 0 critical sections 8211 * that apply to this firmware load. 8212 */ 8213 cs_count = 0; 8214 cur_cs = 0; 8215 memset(begin_set, 0, sizeof(begin_set)); 8216 memset(end_set, 0, sizeof(end_set)); 8217 8218 /* 8219 * Setup downloadable constant table. 8220 * 8221 * The computation for the S/G prefetch variables is 8222 * a bit complicated. We would like to always fetch 8223 * in terms of cachelined sized increments. However, 8224 * if the cacheline is not an even multiple of the 8225 * SG element size or is larger than our SG RAM, using 8226 * just the cache size might leave us with only a portion 8227 * of an SG element at the tail of a prefetch. If the 8228 * cacheline is larger than our S/G prefetch buffer less 8229 * the size of an SG element, we may round down to a cacheline 8230 * that doesn't contain any or all of the S/G of interest 8231 * within the bounds of our S/G ram. Provide variables to 8232 * the sequencer that will allow it to handle these edge 8233 * cases. 8234 */ 8235 /* Start by aligning to the nearest cacheline. */ 8236 sg_prefetch_align = ahd->pci_cachesize; 8237 if (sg_prefetch_align == 0) 8238 sg_prefetch_align = 8; 8239 /* Round down to the nearest power of 2. */ 8240 while (powerof2(sg_prefetch_align) == 0) 8241 sg_prefetch_align--; 8242 /* 8243 * If the cacheline boundary is greater than half our prefetch RAM 8244 * we risk not being able to fetch even a single complete S/G 8245 * segment if we align to that boundary. 8246 */ 8247 if (sg_prefetch_align > CCSGADDR_MAX/2) 8248 sg_prefetch_align = CCSGADDR_MAX/2; 8249 /* Start by fetching a single cacheline. */ 8250 sg_prefetch_cnt = sg_prefetch_align; 8251 /* 8252 * Increment the prefetch count by cachelines until 8253 * at least one S/G element will fit. 8254 */ 8255 sg_size = sizeof(struct ahd_dma_seg); 8256 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 8257 sg_size = sizeof(struct ahd_dma64_seg); 8258 while (sg_prefetch_cnt < sg_size) 8259 sg_prefetch_cnt += sg_prefetch_align; 8260 /* 8261 * If the cacheline is not an even multiple of 8262 * the S/G size, we may only get a partial S/G when 8263 * we align. Add a cacheline if this is the case. 8264 */ 8265 if ((sg_prefetch_align % sg_size) != 0 8266 && (sg_prefetch_cnt < CCSGADDR_MAX)) 8267 sg_prefetch_cnt += sg_prefetch_align; 8268 /* 8269 * Lastly, compute a value that the sequencer can use 8270 * to determine if the remainder of the CCSGRAM buffer 8271 * has a full S/G element in it. 8272 */ 8273 sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); 8274 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 8275 download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; 8276 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); 8277 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); 8278 download_consts[SG_SIZEOF] = sg_size; 8279 download_consts[PKT_OVERRUN_BUFOFFSET] = 8280 (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; 8281 download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; 8282 if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) 8283 download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_FULL_LUN; 8284 cur_patch = patches; 8285 downloaded = 0; 8286 skip_addr = 0; 8287 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 8288 ahd_outb(ahd, PRGMCNT, 0); 8289 ahd_outb(ahd, PRGMCNT+1, 0); 8290 8291 for (i = 0; i < sizeof(seqprog)/4; i++) { 8292 if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { 8293 /* 8294 * Don't download this instruction as it 8295 * is in a patch that was removed. 8296 */ 8297 continue; 8298 } 8299 /* 8300 * Move through the CS table until we find a CS 8301 * that might apply to this instruction. 8302 */ 8303 for (; cur_cs < num_critical_sections; cur_cs++) { 8304 if (critical_sections[cur_cs].end <= i) { 8305 if (begin_set[cs_count] == TRUE 8306 && end_set[cs_count] == FALSE) { 8307 cs_table[cs_count].end = downloaded; 8308 end_set[cs_count] = TRUE; 8309 cs_count++; 8310 } 8311 continue; 8312 } 8313 if (critical_sections[cur_cs].begin <= i 8314 && begin_set[cs_count] == FALSE) { 8315 cs_table[cs_count].begin = downloaded; 8316 begin_set[cs_count] = TRUE; 8317 } 8318 break; 8319 } 8320 ahd_download_instr(ahd, i, download_consts); 8321 downloaded++; 8322 } 8323 8324 ahd->num_critical_sections = cs_count; 8325 if (cs_count != 0) { 8326 8327 cs_count *= sizeof(struct cs); 8328 ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 8329 if (ahd->critical_sections == NULL) 8330 panic("ahd_loadseq: Could not malloc"); 8331 memcpy(ahd->critical_sections, cs_table, cs_count); 8332 } 8333 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); 8334 8335 if (bootverbose) { 8336 printf(" %d instructions downloaded\n", downloaded); 8337 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 8338 ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); 8339 } 8340 } 8341 8342 static int 8343 ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, 8344 u_int start_instr, u_int *skip_addr) 8345 { 8346 struct patch *cur_patch; 8347 struct patch *last_patch; 8348 u_int num_patches; 8349 8350 num_patches = sizeof(patches)/sizeof(struct patch); 8351 last_patch = &patches[num_patches]; 8352 cur_patch = *start_patch; 8353 8354 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 8355 8356 if (cur_patch->patch_func(ahd) == 0) { 8357 8358 /* Start rejecting code */ 8359 *skip_addr = start_instr + cur_patch->skip_instr; 8360 cur_patch += cur_patch->skip_patch; 8361 } else { 8362 /* Accepted this patch. Advance to the next 8363 * one and wait for our intruction pointer to 8364 * hit this point. 8365 */ 8366 cur_patch++; 8367 } 8368 } 8369 8370 *start_patch = cur_patch; 8371 if (start_instr < *skip_addr) 8372 /* Still skipping */ 8373 return (0); 8374 8375 return (1); 8376 } 8377 8378 static u_int 8379 ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) 8380 { 8381 struct patch *cur_patch; 8382 int address_offset; 8383 u_int skip_addr; 8384 u_int i; 8385 8386 address_offset = 0; 8387 cur_patch = patches; 8388 skip_addr = 0; 8389 8390 for (i = 0; i < address;) { 8391 8392 ahd_check_patch(ahd, &cur_patch, i, &skip_addr); 8393 8394 if (skip_addr > i) { 8395 int end_addr; 8396 8397 end_addr = MIN(address, skip_addr); 8398 address_offset += end_addr - i; 8399 i = skip_addr; 8400 } else { 8401 i++; 8402 } 8403 } 8404 return (address - address_offset); 8405 } 8406 8407 static void 8408 ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) 8409 { 8410 union ins_formats instr; 8411 struct ins_format1 *fmt1_ins; 8412 struct ins_format3 *fmt3_ins; 8413 u_int opcode; 8414 8415 /* 8416 * The firmware is always compiled into a little endian format. 8417 */ 8418 instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 8419 8420 fmt1_ins = &instr.format1; 8421 fmt3_ins = NULL; 8422 8423 /* Pull the opcode */ 8424 opcode = instr.format1.opcode; 8425 switch (opcode) { 8426 case AIC_OP_JMP: 8427 case AIC_OP_JC: 8428 case AIC_OP_JNC: 8429 case AIC_OP_CALL: 8430 case AIC_OP_JNE: 8431 case AIC_OP_JNZ: 8432 case AIC_OP_JE: 8433 case AIC_OP_JZ: 8434 { 8435 fmt3_ins = &instr.format3; 8436 fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); 8437 /* FALLTHROUGH */ 8438 } 8439 case AIC_OP_OR: 8440 case AIC_OP_AND: 8441 case AIC_OP_XOR: 8442 case AIC_OP_ADD: 8443 case AIC_OP_ADC: 8444 case AIC_OP_BMOV: 8445 if (fmt1_ins->parity != 0) { 8446 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 8447 } 8448 fmt1_ins->parity = 0; 8449 /* FALLTHROUGH */ 8450 case AIC_OP_ROL: 8451 { 8452 int i, count; 8453 8454 /* Calculate odd parity for the instruction */ 8455 for (i = 0, count = 0; i < 31; i++) { 8456 uint32_t mask; 8457 8458 mask = 0x01 << i; 8459 if ((instr.integer & mask) != 0) 8460 count++; 8461 } 8462 if ((count & 0x01) == 0) 8463 instr.format1.parity = 1; 8464 8465 /* The sequencer is a little endian cpu */ 8466 instr.integer = ahd_htole32(instr.integer); 8467 ahd_outsb(ahd, SEQRAM, instr.bytes, 4); 8468 break; 8469 } 8470 default: 8471 panic("Unknown opcode encountered in seq program"); 8472 break; 8473 } 8474 } 8475 8476 static int 8477 ahd_probe_stack_size(struct ahd_softc *ahd) 8478 { 8479 int last_probe; 8480 8481 last_probe = 0; 8482 while (1) { 8483 int i; 8484 8485 /* 8486 * We avoid using 0 as a pattern to avoid 8487 * confusion if the stack implementation 8488 * "back-fills" with zeros when "poping' 8489 * entries. 8490 */ 8491 for (i = 1; i <= last_probe+1; i++) { 8492 ahd_outb(ahd, STACK, i & 0xFF); 8493 ahd_outb(ahd, STACK, (i >> 8) & 0xFF); 8494 } 8495 8496 /* Verify */ 8497 for (i = last_probe+1; i > 0; i--) { 8498 u_int stack_entry; 8499 8500 stack_entry = ahd_inb(ahd, STACK) 8501 |(ahd_inb(ahd, STACK) << 8); 8502 if (stack_entry != i) 8503 goto sized; 8504 } 8505 last_probe++; 8506 } 8507 sized: 8508 return (last_probe); 8509 } 8510 8511 void 8512 ahd_dump_all_cards_state() 8513 { 8514 struct ahd_softc *list_ahd; 8515 8516 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { 8517 ahd_dump_card_state(list_ahd); 8518 } 8519 } 8520 8521 int 8522 ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, 8523 const char *name, u_int address, u_int value, 8524 u_int *cur_column, u_int wrap_point) 8525 { 8526 int printed; 8527 u_int printed_mask; 8528 8529 if (cur_column != NULL && *cur_column >= wrap_point) { 8530 printf("\n"); 8531 *cur_column = 0; 8532 } 8533 printed = printf("%s[0x%x]", name, value); 8534 if (table == NULL) { 8535 printed += printf(" "); 8536 *cur_column += printed; 8537 return (printed); 8538 } 8539 printed_mask = 0; 8540 while (printed_mask != 0xFF) { 8541 int entry; 8542 8543 for (entry = 0; entry < num_entries; entry++) { 8544 if (((value & table[entry].mask) 8545 != table[entry].value) 8546 || ((printed_mask & table[entry].mask) 8547 == table[entry].mask)) 8548 continue; 8549 8550 printed += printf("%s%s", 8551 printed_mask == 0 ? ":(" : "|", 8552 table[entry].name); 8553 printed_mask |= table[entry].mask; 8554 8555 break; 8556 } 8557 if (entry >= num_entries) 8558 break; 8559 } 8560 if (printed_mask != 0) 8561 printed += printf(") "); 8562 else 8563 printed += printf(" "); 8564 if (cur_column != NULL) 8565 *cur_column += printed; 8566 return (printed); 8567 } 8568 8569 void 8570 ahd_dump_card_state(struct ahd_softc *ahd) 8571 { 8572 struct scb *scb; 8573 ahd_mode_state saved_modes; 8574 u_int dffstat; 8575 int paused; 8576 u_int scb_index; 8577 u_int saved_scb_index; 8578 u_int cur_col; 8579 int i; 8580 8581 if (ahd_is_paused(ahd)) { 8582 paused = 1; 8583 } else { 8584 paused = 0; 8585 ahd_pause(ahd); 8586 } 8587 saved_modes = ahd_save_modes(ahd); 8588 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8589 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 8590 "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", 8591 ahd_name(ahd), 8592 ahd_inb(ahd, CURADDR) | (ahd_inb(ahd, CURADDR+1) << 8), 8593 ahd_build_mode_state(ahd, ahd->saved_src_mode, 8594 ahd->saved_dst_mode)); 8595 if (paused) 8596 printf("Card was paused\n"); 8597 /* 8598 * Mode independent registers. 8599 */ 8600 cur_col = 0; 8601 ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); 8602 ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); 8603 ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); 8604 ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); 8605 ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); 8606 ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); 8607 ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); 8608 ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); 8609 ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); 8610 ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); 8611 ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); 8612 ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); 8613 ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); 8614 ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); 8615 ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); 8616 ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); 8617 ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); 8618 ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); 8619 ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); 8620 ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); 8621 ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); 8622 ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); 8623 ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); 8624 ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); 8625 ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); 8626 ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); 8627 ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); 8628 printf("\n"); 8629 printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " 8630 "CURRSCB 0x%x NEXTSCB 0x%x\n", 8631 ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), 8632 ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), 8633 ahd_inw(ahd, NEXTSCB)); 8634 cur_col = 0; 8635 /* QINFIFO */ 8636 ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, 8637 CAM_LUN_WILDCARD, SCB_LIST_NULL, 8638 ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); 8639 saved_scb_index = ahd_get_scbptr(ahd); 8640 printf("Pending list:"); 8641 i = 0; 8642 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 8643 if (i++ > AHD_SCB_MAX) 8644 break; 8645 cur_col = printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), 8646 ahd_inb(ahd, SCB_FIFO_USE_COUNT)); 8647 ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); 8648 ahd_scb_control_print(ahd_inb(ahd, SCB_CONTROL), &cur_col, 60); 8649 ahd_scb_scsiid_print(ahd_inb(ahd, SCB_SCSIID), &cur_col, 60); 8650 } 8651 printf("\nTotal %d\n", i); 8652 8653 printf("Kernel Free SCB list: "); 8654 i = 0; 8655 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 8656 struct scb *list_scb; 8657 8658 list_scb = scb; 8659 do { 8660 printf("%d ", SCB_GET_TAG(list_scb)); 8661 list_scb = LIST_NEXT(list_scb, collision_links); 8662 } while (list_scb && i++ < AHD_SCB_MAX); 8663 } 8664 8665 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { 8666 if (i++ > AHD_SCB_MAX) 8667 break; 8668 printf("%d ", SCB_GET_TAG(scb)); 8669 } 8670 printf("\n"); 8671 8672 printf("Sequencer Complete DMA-inprog list: "); 8673 scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); 8674 i = 0; 8675 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 8676 ahd_set_scbptr(ahd, scb_index); 8677 printf("%d ", scb_index); 8678 scb_index = ahd_inw(ahd, SCB_NEXT_COMPLETE); 8679 } 8680 printf("\n"); 8681 8682 printf("Sequencer Complete list: "); 8683 scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); 8684 i = 0; 8685 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 8686 ahd_set_scbptr(ahd, scb_index); 8687 printf("%d ", scb_index); 8688 scb_index = ahd_inw(ahd, SCB_NEXT_COMPLETE); 8689 } 8690 printf("\n"); 8691 8692 8693 printf("Sequencer DMA-Up and Complete list: "); 8694 scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 8695 i = 0; 8696 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 8697 ahd_set_scbptr(ahd, scb_index); 8698 printf("%d ", scb_index); 8699 scb_index = ahd_inw(ahd, SCB_NEXT_COMPLETE); 8700 } 8701 printf("\n"); 8702 ahd_set_scbptr(ahd, saved_scb_index); 8703 dffstat = ahd_inb(ahd, DFFSTAT); 8704 for (i = 0; i < 2; i++) { 8705 #ifdef AHD_DEBUG 8706 struct scb *fifo_scb; 8707 #endif 8708 u_int fifo_scbptr; 8709 8710 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); 8711 fifo_scbptr = ahd_get_scbptr(ahd); 8712 printf("\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", 8713 ahd_name(ahd), i, 8714 (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", 8715 ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); 8716 cur_col = 0; 8717 ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); 8718 ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); 8719 ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); 8720 ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); 8721 ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), 8722 &cur_col, 50); 8723 ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); 8724 ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); 8725 ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); 8726 ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); 8727 if (cur_col > 50) { 8728 printf("\n"); 8729 cur_col = 0; 8730 } 8731 cur_col += printf("SHADDR = 0x%x%x, SHCNT = 0x%x ", 8732 ahd_inl(ahd, SHADDR+4), 8733 ahd_inl(ahd, SHADDR), 8734 (ahd_inb(ahd, SHCNT) 8735 | (ahd_inb(ahd, SHCNT + 1) << 8) 8736 | (ahd_inb(ahd, SHCNT + 2) << 16))); 8737 if (cur_col > 50) { 8738 printf("\n"); 8739 cur_col = 0; 8740 } 8741 cur_col += printf("HADDR = 0x%x%x, HCNT = 0x%x ", 8742 ahd_inl(ahd, HADDR+4), 8743 ahd_inl(ahd, HADDR), 8744 (ahd_inb(ahd, HCNT) 8745 | (ahd_inb(ahd, HCNT + 1) << 8) 8746 | (ahd_inb(ahd, HCNT + 2) << 16))); 8747 ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); 8748 #ifdef AHD_DEBUG 8749 if ((ahd_debug & AHD_SHOW_SG) != 0) { 8750 fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); 8751 if (fifo_scb != NULL) 8752 ahd_dump_sglist(fifo_scb); 8753 } 8754 #endif 8755 } 8756 printf("\nLQIN: "); 8757 for (i = 0; i < 20; i++) 8758 printf("0x%x ", ahd_inb(ahd, LQIN + i)); 8759 printf("\n"); 8760 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 8761 printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", 8762 ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), 8763 ahd_inb(ahd, OPTIONMODE)); 8764 printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", 8765 ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), 8766 ahd_inb(ahd, MAXCMDCNT)); 8767 ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); 8768 printf("\n"); 8769 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 8770 cur_col = 0; 8771 ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); 8772 printf("\n"); 8773 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 8774 printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", 8775 ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), 8776 ahd_inw(ahd, DINDEX)); 8777 printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", 8778 ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw(ahd, SCB_NEXT), 8779 ahd_inw(ahd, SCB_NEXT2)); 8780 printf("CDB %x %x %x %x %x %x\n", 8781 ahd_inb(ahd, SCB_CDB_STORE), 8782 ahd_inb(ahd, SCB_CDB_STORE+1), 8783 ahd_inb(ahd, SCB_CDB_STORE+2), 8784 ahd_inb(ahd, SCB_CDB_STORE+3), 8785 ahd_inb(ahd, SCB_CDB_STORE+4), 8786 ahd_inb(ahd, SCB_CDB_STORE+5)); 8787 printf("STACK:"); 8788 for (i = 0; i < ahd->stack_size; i++) { 8789 ahd->saved_stack[i] = 8790 ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); 8791 printf(" 0x%x", ahd->saved_stack[i]); 8792 } 8793 for (i = ahd->stack_size-1; i >= 0; i--) { 8794 ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); 8795 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); 8796 } 8797 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 8798 ahd_platform_dump_card_state(ahd); 8799 ahd_restore_modes(ahd, saved_modes); 8800 if (paused == 0) 8801 ahd_unpause(ahd); 8802 } 8803 8804 void 8805 ahd_dump_scbs(struct ahd_softc *ahd) 8806 { 8807 ahd_mode_state saved_modes; 8808 u_int saved_scb_index; 8809 int i; 8810 8811 saved_modes = ahd_save_modes(ahd); 8812 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8813 saved_scb_index = ahd_get_scbptr(ahd); 8814 for (i = 0; i < AHD_SCB_MAX; i++) { 8815 ahd_set_scbptr(ahd, i); 8816 printf("%3d", i); 8817 printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", 8818 ahd_inb(ahd, SCB_CONTROL), 8819 ahd_inb(ahd, SCB_SCSIID), ahd_inw(ahd, SCB_NEXT), 8820 ahd_inw(ahd, SCB_NEXT2), ahd_inl(ahd, SCB_SGPTR), 8821 ahd_inl(ahd, SCB_RESIDUAL_SGPTR)); 8822 } 8823 printf("\n"); 8824 ahd_set_scbptr(ahd, saved_scb_index); 8825 ahd_restore_modes(ahd, saved_modes); 8826 } 8827 8828 /**************************** Flexport Logic **********************************/ 8829 /* 8830 * Read count 16bit words from 16bit word address start_addr from the 8831 * SEEPROM attached to the controller, into buf, using the controller's 8832 * SEEPROM reading state machine. Optionally treat the data as a byte 8833 * stream in terms of byte order. 8834 */ 8835 int 8836 ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, 8837 u_int start_addr, u_int count, int bytestream) 8838 { 8839 u_int cur_addr; 8840 u_int end_addr; 8841 int error; 8842 8843 /* 8844 * If we never make it through the loop even once, 8845 * we were passed invalid arguments. 8846 */ 8847 error = EINVAL; 8848 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8849 end_addr = start_addr + count; 8850 for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { 8851 8852 ahd_outb(ahd, SEEADR, cur_addr); 8853 ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); 8854 8855 error = ahd_wait_seeprom(ahd); 8856 if (error) 8857 break; 8858 if (bytestream != 0) { 8859 uint8_t *bytestream_ptr; 8860 8861 bytestream_ptr = (uint8_t *)buf; 8862 *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); 8863 *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); 8864 } else { 8865 /* 8866 * ahd_inw() already handles machine byte order. 8867 */ 8868 *buf = ahd_inw(ahd, SEEDAT); 8869 } 8870 buf++; 8871 } 8872 return (error); 8873 } 8874 8875 /* 8876 * Write count 16bit words from buf, into SEEPROM attache to the 8877 * controller starting at 16bit word address start_addr, using the 8878 * controller's SEEPROM writing state machine. 8879 */ 8880 int 8881 ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, 8882 u_int start_addr, u_int count) 8883 { 8884 u_int cur_addr; 8885 u_int end_addr; 8886 int error; 8887 int retval; 8888 8889 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8890 error = ENOENT; 8891 8892 /* Place the chip into write-enable mode */ 8893 ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); 8894 ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); 8895 error = ahd_wait_seeprom(ahd); 8896 if (error) 8897 return (error); 8898 8899 /* 8900 * Write the data. If we don't get throught the loop at 8901 * least once, the arguments were invalid. 8902 */ 8903 retval = EINVAL; 8904 end_addr = start_addr + count; 8905 for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { 8906 ahd_outw(ahd, SEEDAT, *buf++); 8907 ahd_outb(ahd, SEEADR, cur_addr); 8908 ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); 8909 8910 retval = ahd_wait_seeprom(ahd); 8911 if (retval) 8912 break; 8913 } 8914 8915 /* 8916 * Disable writes. 8917 */ 8918 ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); 8919 ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); 8920 error = ahd_wait_seeprom(ahd); 8921 if (error) 8922 return (error); 8923 return (retval); 8924 } 8925 8926 /* 8927 * Wait ~100us for the serial eeprom to satisfy our request. 8928 */ 8929 int 8930 ahd_wait_seeprom(struct ahd_softc *ahd) 8931 { 8932 int cnt; 8933 8934 cnt = 20; 8935 while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) 8936 ahd_delay(5); 8937 8938 if (cnt == 0) 8939 return (ETIMEDOUT); 8940 return (0); 8941 } 8942 8943 /* 8944 * Validate the two checksums in the per_channel 8945 * vital product data struct. 8946 */ 8947 int 8948 ahd_verify_vpd_cksum(struct vpd_config *vpd) 8949 { 8950 int i; 8951 int maxaddr; 8952 uint32_t checksum; 8953 uint8_t *vpdarray; 8954 8955 vpdarray = (uint8_t *)vpd; 8956 maxaddr = offsetof(struct vpd_config, vpd_checksum); 8957 checksum = 0; 8958 for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) 8959 checksum = checksum + vpdarray[i]; 8960 if (checksum == 0 8961 || (-checksum & 0xFF) != vpd->vpd_checksum) 8962 return (0); 8963 8964 checksum = 0; 8965 maxaddr = offsetof(struct vpd_config, checksum); 8966 for (i = offsetof(struct vpd_config, default_target_flags); 8967 i < maxaddr; i++) 8968 checksum = checksum + vpdarray[i]; 8969 if (checksum == 0 8970 || (-checksum & 0xFF) != vpd->checksum) 8971 return (0); 8972 return (1); 8973 } 8974 8975 int 8976 ahd_verify_cksum(struct seeprom_config *sc) 8977 { 8978 int i; 8979 int maxaddr; 8980 uint32_t checksum; 8981 uint16_t *scarray; 8982 8983 maxaddr = (sizeof(*sc)/2) - 1; 8984 checksum = 0; 8985 scarray = (uint16_t *)sc; 8986 8987 for (i = 0; i < maxaddr; i++) 8988 checksum = checksum + scarray[i]; 8989 if (checksum == 0 8990 || (checksum & 0xFFFF) != sc->checksum) { 8991 return (0); 8992 } else { 8993 return (1); 8994 } 8995 } 8996 8997 int 8998 ahd_acquire_seeprom(struct ahd_softc *ahd) 8999 { 9000 /* 9001 * We should be able to determine the SEEPROM type 9002 * from the flexport logic, but unfortunately not 9003 * all implementations have this logic and there is 9004 * no programatic method for determining if the logic 9005 * is present. 9006 */ 9007 return (1); 9008 #if 0 9009 uint8_t seetype; 9010 int error; 9011 9012 error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); 9013 if (error != 0 9014 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) 9015 return (0); 9016 return (1); 9017 #endif 9018 } 9019 9020 void 9021 ahd_release_seeprom(struct ahd_softc *ahd) 9022 { 9023 /* Currently a no-op */ 9024 } 9025 9026 int 9027 ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) 9028 { 9029 int error; 9030 9031 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 9032 if (addr > 7) 9033 panic("ahd_write_flexport: address out of range"); 9034 ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); 9035 error = ahd_wait_flexport(ahd); 9036 if (error != 0) 9037 return (error); 9038 ahd_outb(ahd, BRDDAT, value); 9039 ahd_flush_device_writes(ahd); 9040 ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); 9041 ahd_flush_device_writes(ahd); 9042 ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); 9043 ahd_flush_device_writes(ahd); 9044 ahd_outb(ahd, BRDCTL, 0); 9045 ahd_flush_device_writes(ahd); 9046 return (0); 9047 } 9048 9049 int 9050 ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) 9051 { 9052 int error; 9053 9054 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 9055 if (addr > 7) 9056 panic("ahd_read_flexport: address out of range"); 9057 ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); 9058 error = ahd_wait_flexport(ahd); 9059 if (error != 0) 9060 return (error); 9061 *value = ahd_inb(ahd, BRDDAT); 9062 ahd_outb(ahd, BRDCTL, 0); 9063 ahd_flush_device_writes(ahd); 9064 return (0); 9065 } 9066 9067 /* 9068 * Wait at most 2 seconds for flexport arbitration to succeed. 9069 */ 9070 int 9071 ahd_wait_flexport(struct ahd_softc *ahd) 9072 { 9073 int cnt; 9074 9075 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 9076 cnt = 1000000 * 2 / 5; 9077 while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) 9078 ahd_delay(5); 9079 9080 if (cnt == 0) 9081 return (ETIMEDOUT); 9082 return (0); 9083 } 9084 9085 /************************* Target Mode ****************************************/ 9086 #ifdef AHD_TARGET_MODE 9087 cam_status 9088 ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, 9089 struct ahd_tmode_tstate **tstate, 9090 struct ahd_tmode_lstate **lstate, 9091 int notfound_failure) 9092 { 9093 9094 if ((ahd->features & AHD_TARGETMODE) == 0) 9095 return (CAM_REQ_INVALID); 9096 9097 /* 9098 * Handle the 'black hole' device that sucks up 9099 * requests to unattached luns on enabled targets. 9100 */ 9101 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 9102 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 9103 *tstate = NULL; 9104 *lstate = ahd->black_hole; 9105 } else { 9106 u_int max_id; 9107 9108 max_id = (ahd->features & AHD_WIDE) ? 15 : 7; 9109 if (ccb->ccb_h.target_id > max_id) 9110 return (CAM_TID_INVALID); 9111 9112 if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) 9113 return (CAM_LUN_INVALID); 9114 9115 *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; 9116 *lstate = NULL; 9117 if (*tstate != NULL) 9118 *lstate = 9119 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 9120 } 9121 9122 if (notfound_failure != 0 && *lstate == NULL) 9123 return (CAM_PATH_INVALID); 9124 9125 return (CAM_REQ_CMP); 9126 } 9127 9128 void 9129 ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) 9130 { 9131 #if NOT_YET 9132 struct ahd_tmode_tstate *tstate; 9133 struct ahd_tmode_lstate *lstate; 9134 struct ccb_en_lun *cel; 9135 cam_status status; 9136 u_int target; 9137 u_int lun; 9138 u_int target_mask; 9139 u_long s; 9140 char channel; 9141 9142 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, 9143 /*notfound_failure*/FALSE); 9144 9145 if (status != CAM_REQ_CMP) { 9146 ccb->ccb_h.status = status; 9147 return; 9148 } 9149 9150 if ((ahd->features & AHD_MULTIROLE) != 0) { 9151 u_int our_id; 9152 9153 our_id = ahd->our_id; 9154 if (ccb->ccb_h.target_id != our_id) { 9155 if ((ahd->features & AHD_MULTI_TID) != 0 9156 && (ahd->flags & AHD_INITIATORROLE) != 0) { 9157 /* 9158 * Only allow additional targets if 9159 * the initiator role is disabled. 9160 * The hardware cannot handle a re-select-in 9161 * on the initiator id during a re-select-out 9162 * on a different target id. 9163 */ 9164 status = CAM_TID_INVALID; 9165 } else if ((ahd->flags & AHD_INITIATORROLE) != 0 9166 || ahd->enabled_luns > 0) { 9167 /* 9168 * Only allow our target id to change 9169 * if the initiator role is not configured 9170 * and there are no enabled luns which 9171 * are attached to the currently registered 9172 * scsi id. 9173 */ 9174 status = CAM_TID_INVALID; 9175 } 9176 } 9177 } 9178 9179 if (status != CAM_REQ_CMP) { 9180 ccb->ccb_h.status = status; 9181 return; 9182 } 9183 9184 /* 9185 * We now have an id that is valid. 9186 * If we aren't in target mode, switch modes. 9187 */ 9188 if ((ahd->flags & AHD_TARGETROLE) == 0 9189 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 9190 u_long s; 9191 9192 printf("Configuring Target Mode\n"); 9193 ahd_lock(ahd, &s); 9194 if (LIST_FIRST(&ahd->pending_scbs) != NULL) { 9195 ccb->ccb_h.status = CAM_BUSY; 9196 ahd_unlock(ahd, &s); 9197 return; 9198 } 9199 ahd->flags |= AHD_TARGETROLE; 9200 if ((ahd->features & AHD_MULTIROLE) == 0) 9201 ahd->flags &= ~AHD_INITIATORROLE; 9202 ahd_pause(ahd); 9203 ahd_loadseq(ahd); 9204 ahd_unlock(ahd, &s); 9205 } 9206 cel = &ccb->cel; 9207 target = ccb->ccb_h.target_id; 9208 lun = ccb->ccb_h.target_lun; 9209 channel = SIM_CHANNEL(ahd, sim); 9210 target_mask = 0x01 << target; 9211 if (channel == 'B') 9212 target_mask <<= 8; 9213 9214 if (cel->enable != 0) { 9215 u_int scsiseq1; 9216 9217 /* Are we already enabled?? */ 9218 if (lstate != NULL) { 9219 xpt_print_path(ccb->ccb_h.path); 9220 printf("Lun already enabled\n"); 9221 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 9222 return; 9223 } 9224 9225 if (cel->grp6_len != 0 9226 || cel->grp7_len != 0) { 9227 /* 9228 * Don't (yet?) support vendor 9229 * specific commands. 9230 */ 9231 ccb->ccb_h.status = CAM_REQ_INVALID; 9232 printf("Non-zero Group Codes\n"); 9233 return; 9234 } 9235 9236 /* 9237 * Seems to be okay. 9238 * Setup our data structures. 9239 */ 9240 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 9241 tstate = ahd_alloc_tstate(ahd, target, channel); 9242 if (tstate == NULL) { 9243 xpt_print_path(ccb->ccb_h.path); 9244 printf("Couldn't allocate tstate\n"); 9245 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 9246 return; 9247 } 9248 } 9249 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 9250 if (lstate == NULL) { 9251 xpt_print_path(ccb->ccb_h.path); 9252 printf("Couldn't allocate lstate\n"); 9253 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 9254 return; 9255 } 9256 memset(lstate, 0, sizeof(*lstate)); 9257 status = xpt_create_path(&lstate->path, /*periph*/NULL, 9258 xpt_path_path_id(ccb->ccb_h.path), 9259 xpt_path_target_id(ccb->ccb_h.path), 9260 xpt_path_lun_id(ccb->ccb_h.path)); 9261 if (status != CAM_REQ_CMP) { 9262 free(lstate, M_DEVBUF); 9263 xpt_print_path(ccb->ccb_h.path); 9264 printf("Couldn't allocate path\n"); 9265 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 9266 return; 9267 } 9268 SLIST_INIT(&lstate->accept_tios); 9269 SLIST_INIT(&lstate->immed_notifies); 9270 ahd_lock(ahd, &s); 9271 ahd_pause(ahd); 9272 if (target != CAM_TARGET_WILDCARD) { 9273 tstate->enabled_luns[lun] = lstate; 9274 ahd->enabled_luns++; 9275 9276 if ((ahd->features & AHD_MULTI_TID) != 0) { 9277 u_int targid_mask; 9278 9279 targid_mask = ahd_inb(ahd, TARGID) 9280 | (ahd_inb(ahd, TARGID + 1) << 8); 9281 9282 targid_mask |= target_mask; 9283 ahd_outb(ahd, TARGID, targid_mask); 9284 ahd_outb(ahd, TARGID+1, (targid_mask >> 8)); 9285 9286 ahd_update_scsiid(ahd, targid_mask); 9287 } else { 9288 u_int our_id; 9289 char channel; 9290 9291 channel = SIM_CHANNEL(ahd, sim); 9292 our_id = SIM_SCSI_ID(ahd, sim); 9293 9294 /* 9295 * This can only happen if selections 9296 * are not enabled 9297 */ 9298 if (target != our_id) { 9299 u_int sblkctl; 9300 char cur_channel; 9301 int swap; 9302 9303 sblkctl = ahd_inb(ahd, SBLKCTL); 9304 cur_channel = (sblkctl & SELBUSB) 9305 ? 'B' : 'A'; 9306 if ((ahd->features & AHD_TWIN) == 0) 9307 cur_channel = 'A'; 9308 swap = cur_channel != channel; 9309 ahd->our_id = target; 9310 9311 if (swap) 9312 ahd_outb(ahd, SBLKCTL, 9313 sblkctl ^ SELBUSB); 9314 9315 ahd_outb(ahd, SCSIID, target); 9316 9317 if (swap) 9318 ahd_outb(ahd, SBLKCTL, sblkctl); 9319 } 9320 } 9321 } else 9322 ahd->black_hole = lstate; 9323 /* Allow select-in operations */ 9324 if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { 9325 scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); 9326 scsiseq1 |= ENSELI; 9327 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); 9328 scsiseq1 = ahd_inb(ahd, SCSISEQ1); 9329 scsiseq1 |= ENSELI; 9330 ahd_outb(ahd, SCSISEQ1, scsiseq1); 9331 } 9332 ahd_unpause(ahd); 9333 ahd_unlock(ahd, &s); 9334 ccb->ccb_h.status = CAM_REQ_CMP; 9335 xpt_print_path(ccb->ccb_h.path); 9336 printf("Lun now enabled for target mode\n"); 9337 } else { 9338 struct scb *scb; 9339 int i, empty; 9340 9341 if (lstate == NULL) { 9342 ccb->ccb_h.status = CAM_LUN_INVALID; 9343 return; 9344 } 9345 9346 ahd_lock(ahd, &s); 9347 9348 ccb->ccb_h.status = CAM_REQ_CMP; 9349 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 9350 struct ccb_hdr *ccbh; 9351 9352 ccbh = &scb->io_ctx->ccb_h; 9353 if (ccbh->func_code == XPT_CONT_TARGET_IO 9354 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 9355 printf("CTIO pending\n"); 9356 ccb->ccb_h.status = CAM_REQ_INVALID; 9357 ahd_unlock(ahd, &s); 9358 return; 9359 } 9360 } 9361 9362 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 9363 printf("ATIOs pending\n"); 9364 ccb->ccb_h.status = CAM_REQ_INVALID; 9365 } 9366 9367 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 9368 printf("INOTs pending\n"); 9369 ccb->ccb_h.status = CAM_REQ_INVALID; 9370 } 9371 9372 if (ccb->ccb_h.status != CAM_REQ_CMP) { 9373 ahd_unlock(ahd, &s); 9374 return; 9375 } 9376 9377 xpt_print_path(ccb->ccb_h.path); 9378 printf("Target mode disabled\n"); 9379 xpt_free_path(lstate->path); 9380 free(lstate, M_DEVBUF); 9381 9382 ahd_pause(ahd); 9383 /* Can we clean up the target too? */ 9384 if (target != CAM_TARGET_WILDCARD) { 9385 tstate->enabled_luns[lun] = NULL; 9386 ahd->enabled_luns--; 9387 for (empty = 1, i = 0; i < 8; i++) 9388 if (tstate->enabled_luns[i] != NULL) { 9389 empty = 0; 9390 break; 9391 } 9392 9393 if (empty) { 9394 ahd_free_tstate(ahd, target, channel, 9395 /*force*/FALSE); 9396 if (ahd->features & AHD_MULTI_TID) { 9397 u_int targid_mask; 9398 9399 targid_mask = ahd_inb(ahd, TARGID) 9400 | (ahd_inb(ahd, TARGID + 1) 9401 << 8); 9402 9403 targid_mask &= ~target_mask; 9404 ahd_outb(ahd, TARGID, targid_mask); 9405 ahd_outb(ahd, TARGID+1, 9406 (targid_mask >> 8)); 9407 ahd_update_scsiid(ahd, targid_mask); 9408 } 9409 } 9410 } else { 9411 9412 ahd->black_hole = NULL; 9413 9414 /* 9415 * We can't allow selections without 9416 * our black hole device. 9417 */ 9418 empty = TRUE; 9419 } 9420 if (ahd->enabled_luns == 0) { 9421 /* Disallow select-in */ 9422 u_int scsiseq1; 9423 9424 scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); 9425 scsiseq1 &= ~ENSELI; 9426 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); 9427 scsiseq1 = ahd_inb(ahd, SCSISEQ1); 9428 scsiseq1 &= ~ENSELI; 9429 ahd_outb(ahd, SCSISEQ1, scsiseq1); 9430 9431 if ((ahd->features & AHD_MULTIROLE) == 0) { 9432 printf("Configuring Initiator Mode\n"); 9433 ahd->flags &= ~AHD_TARGETROLE; 9434 ahd->flags |= AHD_INITIATORROLE; 9435 ahd_pause(ahd); 9436 ahd_loadseq(ahd); 9437 } 9438 } 9439 ahd_unpause(ahd); 9440 ahd_unlock(ahd, &s); 9441 } 9442 #endif 9443 } 9444 9445 static void 9446 ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) 9447 { 9448 #if NOT_YET 9449 u_int scsiid_mask; 9450 u_int scsiid; 9451 9452 if ((ahd->features & AHD_MULTI_TID) == 0) 9453 panic("ahd_update_scsiid called on non-multitid unit\n"); 9454 9455 /* 9456 * Since we will rely on the TARGID mask 9457 * for selection enables, ensure that OID 9458 * in SCSIID is not set to some other ID 9459 * that we don't want to allow selections on. 9460 */ 9461 if ((ahd->features & AHD_ULTRA2) != 0) 9462 scsiid = ahd_inb(ahd, SCSIID_ULTRA2); 9463 else 9464 scsiid = ahd_inb(ahd, SCSIID); 9465 scsiid_mask = 0x1 << (scsiid & OID); 9466 if ((targid_mask & scsiid_mask) == 0) { 9467 u_int our_id; 9468 9469 /* ffs counts from 1 */ 9470 our_id = ffs(targid_mask); 9471 if (our_id == 0) 9472 our_id = ahd->our_id; 9473 else 9474 our_id--; 9475 scsiid &= TID; 9476 scsiid |= our_id; 9477 } 9478 if ((ahd->features & AHD_ULTRA2) != 0) 9479 ahd_outb(ahd, SCSIID_ULTRA2, scsiid); 9480 else 9481 ahd_outb(ahd, SCSIID, scsiid); 9482 #endif 9483 } 9484 9485 void 9486 ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) 9487 { 9488 struct target_cmd *cmd; 9489 9490 ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); 9491 while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { 9492 9493 /* 9494 * Only advance through the queue if we 9495 * have the resources to process the command. 9496 */ 9497 if (ahd_handle_target_cmd(ahd, cmd) != 0) 9498 break; 9499 9500 cmd->cmd_valid = 0; 9501 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, 9502 ahd->shared_data_dmamap, 9503 ahd_targetcmd_offset(ahd, ahd->tqinfifonext), 9504 sizeof(struct target_cmd), 9505 BUS_DMASYNC_PREREAD); 9506 ahd->tqinfifonext++; 9507 9508 /* 9509 * Lazily update our position in the target mode incoming 9510 * command queue as seen by the sequencer. 9511 */ 9512 if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 9513 u_int hs_mailbox; 9514 9515 hs_mailbox = ahd_inb(ahd, HS_MAILBOX); 9516 hs_mailbox &= ~HOST_TQINPOS; 9517 hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; 9518 ahd_outb(ahd, HS_MAILBOX, hs_mailbox); 9519 } 9520 } 9521 } 9522 9523 static int 9524 ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) 9525 { 9526 struct ahd_tmode_tstate *tstate; 9527 struct ahd_tmode_lstate *lstate; 9528 struct ccb_accept_tio *atio; 9529 uint8_t *byte; 9530 int initiator; 9531 int target; 9532 int lun; 9533 9534 initiator = SCSIID_TARGET(ahd, cmd->scsiid); 9535 target = SCSIID_OUR_ID(cmd->scsiid); 9536 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 9537 9538 byte = cmd->bytes; 9539 tstate = ahd->enabled_targets[target]; 9540 lstate = NULL; 9541 if (tstate != NULL) 9542 lstate = tstate->enabled_luns[lun]; 9543 9544 /* 9545 * Commands for disabled luns go to the black hole driver. 9546 */ 9547 if (lstate == NULL) 9548 lstate = ahd->black_hole; 9549 9550 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 9551 if (atio == NULL) { 9552 ahd->flags |= AHD_TQINFIFO_BLOCKED; 9553 /* 9554 * Wait for more ATIOs from the peripheral driver for this lun. 9555 */ 9556 return (1); 9557 } else 9558 ahd->flags &= ~AHD_TQINFIFO_BLOCKED; 9559 #ifdef AHD_DEBUG 9560 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 9561 printf("Incoming command from %d for %d:%d%s\n", 9562 initiator, target, lun, 9563 lstate == ahd->black_hole ? "(Black Holed)" : ""); 9564 #endif 9565 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 9566 9567 if (lstate == ahd->black_hole) { 9568 /* Fill in the wildcards */ 9569 atio->ccb_h.target_id = target; 9570 atio->ccb_h.target_lun = lun; 9571 } 9572 9573 /* 9574 * Package it up and send it off to 9575 * whomever has this lun enabled. 9576 */ 9577 atio->sense_len = 0; 9578 atio->init_id = initiator; 9579 if (byte[0] != 0xFF) { 9580 /* Tag was included */ 9581 atio->tag_action = *byte++; 9582 atio->tag_id = *byte++; 9583 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 9584 } else { 9585 atio->ccb_h.flags = 0; 9586 } 9587 byte++; 9588 9589 /* Okay. Now determine the cdb size based on the command code */ 9590 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 9591 case 0: 9592 atio->cdb_len = 6; 9593 break; 9594 case 1: 9595 case 2: 9596 atio->cdb_len = 10; 9597 break; 9598 case 4: 9599 atio->cdb_len = 16; 9600 break; 9601 case 5: 9602 atio->cdb_len = 12; 9603 break; 9604 case 3: 9605 default: 9606 /* Only copy the opcode. */ 9607 atio->cdb_len = 1; 9608 printf("Reserved or VU command code type encountered\n"); 9609 break; 9610 } 9611 9612 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 9613 9614 atio->ccb_h.status |= CAM_CDB_RECVD; 9615 9616 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 9617 /* 9618 * We weren't allowed to disconnect. 9619 * We're hanging on the bus until a 9620 * continue target I/O comes in response 9621 * to this accept tio. 9622 */ 9623 #ifdef AHD_DEBUG 9624 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 9625 printf("Received Immediate Command %d:%d:%d - %p\n", 9626 initiator, target, lun, ahd->pending_device); 9627 #endif 9628 ahd->pending_device = lstate; 9629 ahd_freeze_ccb((union ccb *)atio); 9630 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 9631 } 9632 xpt_done((union ccb*)atio); 9633 return (0); 9634 } 9635 9636 #endif 9637