1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2002 Justin T. Gibbs. 5 * Copyright (c) 2000-2002 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ 41 */ 42 43 #ifdef __linux__ 44 #include "aic7xxx_osm.h" 45 #include "aic7xxx_inline.h" 46 #include "aicasm/aicasm_insformat.h" 47 #else 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 #include <dev/aic7xxx/aic7xxx_osm.h> 51 #include <dev/aic7xxx/aic7xxx_inline.h> 52 #include <dev/aic7xxx/aicasm/aicasm_insformat.h> 53 #endif 54 55 /****************************** Softc Data ************************************/ 56 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 57 58 /***************************** Lookup Tables **********************************/ 59 char *ahc_chip_names[] = 60 { 61 "NONE", 62 "aic7770", 63 "aic7850", 64 "aic7855", 65 "aic7859", 66 "aic7860", 67 "aic7870", 68 "aic7880", 69 "aic7895", 70 "aic7895C", 71 "aic7890/91", 72 "aic7896/97", 73 "aic7892", 74 "aic7899" 75 }; 76 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 77 78 /* 79 * Hardware error codes. 80 */ 81 struct ahc_hard_error_entry { 82 uint8_t errno; 83 char *errmesg; 84 }; 85 86 static struct ahc_hard_error_entry ahc_hard_errors[] = { 87 { ILLHADDR, "Illegal Host Access" }, 88 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 89 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 90 { SQPARERR, "Sequencer Parity Error" }, 91 { DPARERR, "Data-path Parity Error" }, 92 { MPARERR, "Scratch or SCB Memory Parity Error" }, 93 { PCIERRSTAT, "PCI Error detected" }, 94 { CIOPARERR, "CIOBUS Parity Error" }, 95 }; 96 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 97 98 static struct ahc_phase_table_entry ahc_phase_table[] = 99 { 100 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 101 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 102 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 103 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 104 { P_COMMAND, MSG_NOOP, "in Command phase" }, 105 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 106 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 107 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 108 { P_BUSFREE, MSG_NOOP, "while idle" }, 109 { 0, MSG_NOOP, "in unknown phase" } 110 }; 111 112 /* 113 * In most cases we only wish to itterate over real phases, so 114 * exclude the last element from the count. 115 */ 116 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 117 118 /* 119 * Valid SCSIRATE values. (p. 3-17) 120 * Provides a mapping of tranfer periods in ns to the proper value to 121 * stick in the scsixfer reg. 122 */ 123 static struct ahc_syncrate ahc_syncrates[] = 124 { 125 /* ultra2 fast/ultra period rate */ 126 { 0x42, 0x000, 9, "80.0" }, 127 { 0x03, 0x000, 10, "40.0" }, 128 { 0x04, 0x000, 11, "33.0" }, 129 { 0x05, 0x100, 12, "20.0" }, 130 { 0x06, 0x110, 15, "16.0" }, 131 { 0x07, 0x120, 18, "13.4" }, 132 { 0x08, 0x000, 25, "10.0" }, 133 { 0x19, 0x010, 31, "8.0" }, 134 { 0x1a, 0x020, 37, "6.67" }, 135 { 0x1b, 0x030, 43, "5.7" }, 136 { 0x1c, 0x040, 50, "5.0" }, 137 { 0x00, 0x050, 56, "4.4" }, 138 { 0x00, 0x060, 62, "4.0" }, 139 { 0x00, 0x070, 68, "3.6" }, 140 { 0x00, 0x000, 0, NULL } 141 }; 142 143 /* Our Sequencer Program */ 144 #include "aic7xxx_seq.h" 145 146 /**************************** Function Declarations ***************************/ 147 static void ahc_force_renegotiation(struct ahc_softc *ahc, 148 struct ahc_devinfo *devinfo); 149 static struct ahc_tmode_tstate* 150 ahc_alloc_tstate(struct ahc_softc *ahc, 151 u_int scsi_id, char channel); 152 #ifdef AHC_TARGET_MODE 153 static void ahc_free_tstate(struct ahc_softc *ahc, 154 u_int scsi_id, char channel, int force); 155 #endif 156 static struct ahc_syncrate* 157 ahc_devlimited_syncrate(struct ahc_softc *ahc, 158 struct ahc_initiator_tinfo *, 159 u_int *period, 160 u_int *ppr_options, 161 role_t role); 162 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 163 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 164 struct ahc_devinfo *devinfo); 165 static void ahc_scb_devinfo(struct ahc_softc *ahc, 166 struct ahc_devinfo *devinfo, 167 struct scb *scb); 168 static void ahc_assert_atn(struct ahc_softc *ahc); 169 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 170 struct ahc_devinfo *devinfo, 171 struct scb *scb); 172 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 173 struct ahc_devinfo *devinfo); 174 static void ahc_construct_sdtr(struct ahc_softc *ahc, 175 struct ahc_devinfo *devinfo, 176 u_int period, u_int offset); 177 static void ahc_construct_wdtr(struct ahc_softc *ahc, 178 struct ahc_devinfo *devinfo, 179 u_int bus_width); 180 static void ahc_construct_ppr(struct ahc_softc *ahc, 181 struct ahc_devinfo *devinfo, 182 u_int period, u_int offset, 183 u_int bus_width, u_int ppr_options); 184 static void ahc_clear_msg_state(struct ahc_softc *ahc); 185 static void ahc_handle_proto_violation(struct ahc_softc *ahc); 186 static void ahc_handle_message_phase(struct ahc_softc *ahc); 187 typedef enum { 188 AHCMSG_1B, 189 AHCMSG_2B, 190 AHCMSG_EXT 191 } ahc_msgtype; 192 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 193 u_int msgval, int full); 194 static int ahc_parse_msg(struct ahc_softc *ahc, 195 struct ahc_devinfo *devinfo); 196 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 197 struct ahc_devinfo *devinfo); 198 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 199 struct ahc_devinfo *devinfo); 200 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 201 static void ahc_handle_devreset(struct ahc_softc *ahc, 202 struct ahc_devinfo *devinfo, 203 cam_status status, char *message, 204 int verbose_level); 205 #ifdef AHC_TARGET_MODE 206 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 207 struct ahc_devinfo *devinfo, 208 struct scb *scb); 209 #endif 210 211 static bus_dmamap_callback_t ahc_dmamap_cb; 212 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 213 static int ahc_init_scbdata(struct ahc_softc *ahc); 214 static void ahc_fini_scbdata(struct ahc_softc *ahc); 215 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 216 struct scb *prev_scb, 217 struct scb *scb); 218 static int ahc_qinfifo_count(struct ahc_softc *ahc); 219 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 220 u_int prev, u_int scbptr); 221 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 222 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 223 u_int scbpos, u_int prev); 224 static void ahc_reset_current_bus(struct ahc_softc *ahc); 225 #ifdef AHC_DUMP_SEQ 226 static void ahc_dumpseq(struct ahc_softc *ahc); 227 #endif 228 static int ahc_loadseq(struct ahc_softc *ahc); 229 static int ahc_check_patch(struct ahc_softc *ahc, 230 struct patch **start_patch, 231 u_int start_instr, u_int *skip_addr); 232 static void ahc_download_instr(struct ahc_softc *ahc, 233 u_int instrptr, uint8_t *dconsts); 234 static int ahc_other_scb_timeout(struct ahc_softc *ahc, 235 struct scb *scb, 236 struct scb *other_scb); 237 #ifdef AHC_TARGET_MODE 238 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 239 struct ahc_tmode_lstate *lstate, 240 u_int initiator_id, 241 u_int event_type, 242 u_int event_arg); 243 static void ahc_update_scsiid(struct ahc_softc *ahc, 244 u_int targid_mask); 245 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 246 struct target_cmd *cmd); 247 #endif 248 /************************* Sequencer Execution Control ************************/ 249 /* 250 * Restart the sequencer program from address zero 251 */ 252 void 253 ahc_restart(struct ahc_softc *ahc) 254 { 255 256 ahc_pause(ahc); 257 258 /* No more pending messages. */ 259 ahc_clear_msg_state(ahc); 260 261 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 262 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 263 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 264 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 265 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 266 ahc_outb(ahc, SAVED_LUN, 0xFF); 267 268 /* 269 * Ensure that the sequencer's idea of TQINPOS 270 * matches our own. The sequencer increments TQINPOS 271 * only after it sees a DMA complete and a reset could 272 * occur before the increment leaving the kernel to believe 273 * the command arrived but the sequencer to not. 274 */ 275 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 276 277 /* Always allow reselection */ 278 ahc_outb(ahc, SCSISEQ, 279 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 280 if ((ahc->features & AHC_CMD_CHAN) != 0) { 281 /* Ensure that no DMA operations are in progress */ 282 ahc_outb(ahc, CCSCBCNT, 0); 283 ahc_outb(ahc, CCSGCTL, 0); 284 ahc_outb(ahc, CCSCBCTL, 0); 285 } 286 /* 287 * If we were in the process of DMA'ing SCB data into 288 * an SCB, replace that SCB on the free list. This prevents 289 * an SCB leak. 290 */ 291 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 292 ahc_add_curscb_to_free_list(ahc); 293 ahc_outb(ahc, SEQ_FLAGS2, 294 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 295 } 296 297 /* 298 * Clear any pending sequencer interrupt. It is no 299 * longer relevant since we're resetting the Program 300 * Counter. 301 */ 302 ahc_outb(ahc, CLRINT, CLRSEQINT); 303 304 ahc_outb(ahc, MWI_RESIDUAL, 0); 305 ahc_outb(ahc, SEQCTL, ahc->seqctl); 306 ahc_outb(ahc, SEQADDR0, 0); 307 ahc_outb(ahc, SEQADDR1, 0); 308 309 ahc_unpause(ahc); 310 } 311 312 /************************* Input/Output Queues ********************************/ 313 void 314 ahc_run_qoutfifo(struct ahc_softc *ahc) 315 { 316 struct scb *scb; 317 u_int scb_index; 318 319 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 320 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 321 322 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 323 if ((ahc->qoutfifonext & 0x03) == 0x03) { 324 u_int modnext; 325 326 /* 327 * Clear 32bits of QOUTFIFO at a time 328 * so that we don't clobber an incoming 329 * byte DMA to the array on architectures 330 * that only support 32bit load and store 331 * operations. 332 */ 333 modnext = ahc->qoutfifonext & ~0x3; 334 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 335 aic_dmamap_sync(ahc, ahc->shared_data_dmat, 336 ahc->shared_data_dmamap, 337 /*offset*/modnext, /*len*/4, 338 BUS_DMASYNC_PREREAD); 339 } 340 ahc->qoutfifonext++; 341 342 scb = ahc_lookup_scb(ahc, scb_index); 343 if (scb == NULL) { 344 printf("%s: WARNING no command for scb %d " 345 "(cmdcmplt)\nQOUTPOS = %d\n", 346 ahc_name(ahc), scb_index, 347 (ahc->qoutfifonext - 1) & 0xFF); 348 continue; 349 } 350 351 /* 352 * Save off the residual 353 * if there is one. 354 */ 355 ahc_update_residual(ahc, scb); 356 ahc_done(ahc, scb); 357 } 358 } 359 360 void 361 ahc_run_untagged_queues(struct ahc_softc *ahc) 362 { 363 int i; 364 365 for (i = 0; i < 16; i++) 366 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 367 } 368 369 void 370 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 371 { 372 struct scb *scb; 373 374 if (ahc->untagged_queue_lock != 0) 375 return; 376 377 if ((scb = TAILQ_FIRST(queue)) != NULL 378 && (scb->flags & SCB_ACTIVE) == 0) { 379 scb->flags |= SCB_ACTIVE; 380 aic_scb_timer_start(scb); 381 ahc_queue_scb(ahc, scb); 382 } 383 } 384 385 /************************* Interrupt Handling *********************************/ 386 void 387 ahc_handle_brkadrint(struct ahc_softc *ahc) 388 { 389 /* 390 * We upset the sequencer :-( 391 * Lookup the error message 392 */ 393 int i; 394 int error; 395 396 error = ahc_inb(ahc, ERROR); 397 for (i = 0; error != 1 && i < num_errors; i++) 398 error >>= 1; 399 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 400 ahc_name(ahc), ahc_hard_errors[i].errmesg, 401 ahc_inb(ahc, SEQADDR0) | 402 (ahc_inb(ahc, SEQADDR1) << 8)); 403 404 ahc_dump_card_state(ahc); 405 406 /* Tell everyone that this HBA is no longer available */ 407 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 408 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 409 CAM_NO_HBA); 410 411 /* Disable all interrupt sources by resetting the controller */ 412 ahc_shutdown(ahc); 413 } 414 415 void 416 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 417 { 418 struct scb *scb; 419 struct ahc_devinfo devinfo; 420 421 ahc_fetch_devinfo(ahc, &devinfo); 422 423 /* 424 * Clear the upper byte that holds SEQINT status 425 * codes and clear the SEQINT bit. We will unpause 426 * the sequencer, if appropriate, after servicing 427 * the request. 428 */ 429 ahc_outb(ahc, CLRINT, CLRSEQINT); 430 switch (intstat & SEQINT_MASK) { 431 case BAD_STATUS: 432 { 433 u_int scb_index; 434 struct hardware_scb *hscb; 435 436 /* 437 * Set the default return value to 0 (don't 438 * send sense). The sense code will change 439 * this if needed. 440 */ 441 ahc_outb(ahc, RETURN_1, 0); 442 443 /* 444 * The sequencer will notify us when a command 445 * has an error that would be of interest to 446 * the kernel. This allows us to leave the sequencer 447 * running in the common case of command completes 448 * without error. The sequencer will already have 449 * dma'd the SCB back up to us, so we can reference 450 * the in kernel copy directly. 451 */ 452 scb_index = ahc_inb(ahc, SCB_TAG); 453 scb = ahc_lookup_scb(ahc, scb_index); 454 if (scb == NULL) { 455 ahc_print_devinfo(ahc, &devinfo); 456 printf("ahc_intr - referenced scb " 457 "not valid during seqint 0x%x scb(%d)\n", 458 intstat, scb_index); 459 ahc_dump_card_state(ahc); 460 panic("for safety"); 461 goto unpause; 462 } 463 464 hscb = scb->hscb; 465 466 /* Don't want to clobber the original sense code */ 467 if ((scb->flags & SCB_SENSE) != 0) { 468 /* 469 * Clear the SCB_SENSE Flag and have 470 * the sequencer do a normal command 471 * complete. 472 */ 473 scb->flags &= ~SCB_SENSE; 474 aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 475 break; 476 } 477 aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 478 /* Freeze the queue until the client sees the error. */ 479 ahc_freeze_devq(ahc, scb); 480 aic_freeze_scb(scb); 481 aic_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 482 switch (hscb->shared_data.status.scsi_status) { 483 case SCSI_STATUS_OK: 484 printf("%s: Interrupted for staus of 0???\n", 485 ahc_name(ahc)); 486 break; 487 case SCSI_STATUS_CMD_TERMINATED: 488 case SCSI_STATUS_CHECK_COND: 489 { 490 struct ahc_dma_seg *sg; 491 struct scsi_sense *sc; 492 struct ahc_initiator_tinfo *targ_info; 493 struct ahc_tmode_tstate *tstate; 494 struct ahc_transinfo *tinfo; 495 #ifdef AHC_DEBUG 496 if (ahc_debug & AHC_SHOW_SENSE) { 497 ahc_print_path(ahc, scb); 498 printf("SCB %d: requests Check Status\n", 499 scb->hscb->tag); 500 } 501 #endif 502 503 if (aic_perform_autosense(scb) == 0) 504 break; 505 506 targ_info = ahc_fetch_transinfo(ahc, 507 devinfo.channel, 508 devinfo.our_scsiid, 509 devinfo.target, 510 &tstate); 511 tinfo = &targ_info->curr; 512 sg = scb->sg_list; 513 sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 514 /* 515 * Save off the residual if there is one. 516 */ 517 ahc_update_residual(ahc, scb); 518 #ifdef AHC_DEBUG 519 if (ahc_debug & AHC_SHOW_SENSE) { 520 ahc_print_path(ahc, scb); 521 printf("Sending Sense\n"); 522 } 523 #endif 524 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 525 sg->len = aic_get_sense_bufsize(ahc, scb); 526 sg->len |= AHC_DMA_LAST_SEG; 527 528 /* Fixup byte order */ 529 sg->addr = aic_htole32(sg->addr); 530 sg->len = aic_htole32(sg->len); 531 532 sc->opcode = REQUEST_SENSE; 533 sc->byte2 = 0; 534 if (tinfo->protocol_version <= SCSI_REV_2 535 && SCB_GET_LUN(scb) < 8) 536 sc->byte2 = SCB_GET_LUN(scb) << 5; 537 sc->unused[0] = 0; 538 sc->unused[1] = 0; 539 sc->length = sg->len; 540 sc->control = 0; 541 542 /* 543 * We can't allow the target to disconnect. 544 * This will be an untagged transaction and 545 * having the target disconnect will make this 546 * transaction indestinguishable from outstanding 547 * tagged transactions. 548 */ 549 hscb->control = 0; 550 551 /* 552 * This request sense could be because the 553 * the device lost power or in some other 554 * way has lost our transfer negotiations. 555 * Renegotiate if appropriate. Unit attention 556 * errors will be reported before any data 557 * phases occur. 558 */ 559 if (aic_get_residual(scb) 560 == aic_get_transfer_length(scb)) { 561 ahc_update_neg_request(ahc, &devinfo, 562 tstate, targ_info, 563 AHC_NEG_IF_NON_ASYNC); 564 } 565 if (tstate->auto_negotiate & devinfo.target_mask) { 566 hscb->control |= MK_MESSAGE; 567 scb->flags &= ~SCB_NEGOTIATE; 568 scb->flags |= SCB_AUTO_NEGOTIATE; 569 } 570 hscb->cdb_len = sizeof(*sc); 571 hscb->dataptr = sg->addr; 572 hscb->datacnt = sg->len; 573 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 574 hscb->sgptr = aic_htole32(hscb->sgptr); 575 scb->sg_count = 1; 576 scb->flags |= SCB_SENSE; 577 ahc_qinfifo_requeue_tail(ahc, scb); 578 ahc_outb(ahc, RETURN_1, SEND_SENSE); 579 /* 580 * Ensure we have enough time to actually 581 * retrieve the sense. 582 */ 583 aic_scb_timer_reset(scb, 5 * 1000000); 584 break; 585 } 586 default: 587 break; 588 } 589 break; 590 } 591 case NO_MATCH: 592 { 593 /* Ensure we don't leave the selection hardware on */ 594 ahc_outb(ahc, SCSISEQ, 595 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 596 597 printf("%s:%c:%d: no active SCB for reconnecting " 598 "target - issuing BUS DEVICE RESET\n", 599 ahc_name(ahc), devinfo.channel, devinfo.target); 600 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 601 "ARG_1 == 0x%x ACCUM = 0x%x\n", 602 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 603 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 604 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 605 "SINDEX == 0x%x\n", 606 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 607 ahc_index_busy_tcl(ahc, 608 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 609 ahc_inb(ahc, SAVED_LUN))), 610 ahc_inb(ahc, SINDEX)); 611 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 612 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 613 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 614 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 615 ahc_inb(ahc, SCB_CONTROL)); 616 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 617 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 618 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 619 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 620 ahc_dump_card_state(ahc); 621 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 622 ahc->msgout_len = 1; 623 ahc->msgout_index = 0; 624 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 625 ahc_outb(ahc, MSG_OUT, HOST_MSG); 626 ahc_assert_atn(ahc); 627 break; 628 } 629 case SEND_REJECT: 630 { 631 u_int rejbyte = ahc_inb(ahc, ACCUM); 632 printf("%s:%c:%d: Warning - unknown message received from " 633 "target (0x%x). Rejecting\n", 634 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 635 break; 636 } 637 case PROTO_VIOLATION: 638 { 639 ahc_handle_proto_violation(ahc); 640 break; 641 } 642 case IGN_WIDE_RES: 643 ahc_handle_ign_wide_residue(ahc, &devinfo); 644 break; 645 case PDATA_REINIT: 646 ahc_reinitialize_dataptrs(ahc); 647 break; 648 case BAD_PHASE: 649 { 650 u_int lastphase; 651 652 lastphase = ahc_inb(ahc, LASTPHASE); 653 printf("%s:%c:%d: unknown scsi bus phase %x, " 654 "lastphase = 0x%x. Attempting to continue\n", 655 ahc_name(ahc), devinfo.channel, devinfo.target, 656 lastphase, ahc_inb(ahc, SCSISIGI)); 657 break; 658 } 659 case MISSED_BUSFREE: 660 { 661 u_int lastphase; 662 663 lastphase = ahc_inb(ahc, LASTPHASE); 664 printf("%s:%c:%d: Missed busfree. " 665 "Lastphase = 0x%x, Curphase = 0x%x\n", 666 ahc_name(ahc), devinfo.channel, devinfo.target, 667 lastphase, ahc_inb(ahc, SCSISIGI)); 668 ahc_restart(ahc); 669 return; 670 } 671 case HOST_MSG_LOOP: 672 { 673 /* 674 * The sequencer has encountered a message phase 675 * that requires host assistance for completion. 676 * While handling the message phase(s), we will be 677 * notified by the sequencer after each byte is 678 * transfered so we can track bus phase changes. 679 * 680 * If this is the first time we've seen a HOST_MSG_LOOP 681 * interrupt, initialize the state of the host message 682 * loop. 683 */ 684 if (ahc->msg_type == MSG_TYPE_NONE) { 685 struct scb *scb; 686 u_int scb_index; 687 u_int bus_phase; 688 689 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 690 if (bus_phase != P_MESGIN 691 && bus_phase != P_MESGOUT) { 692 printf("ahc_intr: HOST_MSG_LOOP bad " 693 "phase 0x%x\n", 694 bus_phase); 695 /* 696 * Probably transitioned to bus free before 697 * we got here. Just punt the message. 698 */ 699 ahc_clear_intstat(ahc); 700 ahc_restart(ahc); 701 return; 702 } 703 704 scb_index = ahc_inb(ahc, SCB_TAG); 705 scb = ahc_lookup_scb(ahc, scb_index); 706 if (devinfo.role == ROLE_INITIATOR) { 707 if (scb == NULL) 708 panic("HOST_MSG_LOOP with " 709 "invalid SCB %x\n", scb_index); 710 711 if (bus_phase == P_MESGOUT) 712 ahc_setup_initiator_msgout(ahc, 713 &devinfo, 714 scb); 715 else { 716 ahc->msg_type = 717 MSG_TYPE_INITIATOR_MSGIN; 718 ahc->msgin_index = 0; 719 } 720 } 721 #ifdef AHC_TARGET_MODE 722 else { 723 if (bus_phase == P_MESGOUT) { 724 ahc->msg_type = 725 MSG_TYPE_TARGET_MSGOUT; 726 ahc->msgin_index = 0; 727 } 728 else 729 ahc_setup_target_msgin(ahc, 730 &devinfo, 731 scb); 732 } 733 #endif 734 } 735 736 ahc_handle_message_phase(ahc); 737 break; 738 } 739 case PERR_DETECTED: 740 { 741 /* 742 * If we've cleared the parity error interrupt 743 * but the sequencer still believes that SCSIPERR 744 * is true, it must be that the parity error is 745 * for the currently presented byte on the bus, 746 * and we are not in a phase (data-in) where we will 747 * eventually ack this byte. Ack the byte and 748 * throw it away in the hope that the target will 749 * take us to message out to deliver the appropriate 750 * error message. 751 */ 752 if ((intstat & SCSIINT) == 0 753 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 754 755 if ((ahc->features & AHC_DT) == 0) { 756 u_int curphase; 757 758 /* 759 * The hardware will only let you ack bytes 760 * if the expected phase in SCSISIGO matches 761 * the current phase. Make sure this is 762 * currently the case. 763 */ 764 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 765 ahc_outb(ahc, LASTPHASE, curphase); 766 ahc_outb(ahc, SCSISIGO, curphase); 767 } 768 if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { 769 int wait; 770 771 /* 772 * In a data phase. Faster to bitbucket 773 * the data than to individually ack each 774 * byte. This is also the only strategy 775 * that will work with AUTOACK enabled. 776 */ 777 ahc_outb(ahc, SXFRCTL1, 778 ahc_inb(ahc, SXFRCTL1) | BITBUCKET); 779 wait = 5000; 780 while (--wait != 0) { 781 if ((ahc_inb(ahc, SCSISIGI) 782 & (CDI|MSGI)) != 0) 783 break; 784 aic_delay(100); 785 } 786 ahc_outb(ahc, SXFRCTL1, 787 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 788 if (wait == 0) { 789 struct scb *scb; 790 u_int scb_index; 791 792 ahc_print_devinfo(ahc, &devinfo); 793 printf("Unable to clear parity error. " 794 "Resetting bus.\n"); 795 scb_index = ahc_inb(ahc, SCB_TAG); 796 scb = ahc_lookup_scb(ahc, scb_index); 797 if (scb != NULL) 798 aic_set_transaction_status(scb, 799 CAM_UNCOR_PARITY); 800 ahc_reset_channel(ahc, devinfo.channel, 801 /*init reset*/TRUE); 802 } 803 } else { 804 ahc_inb(ahc, SCSIDATL); 805 } 806 } 807 break; 808 } 809 case DATA_OVERRUN: 810 { 811 /* 812 * When the sequencer detects an overrun, it 813 * places the controller in "BITBUCKET" mode 814 * and allows the target to complete its transfer. 815 * Unfortunately, none of the counters get updated 816 * when the controller is in this mode, so we have 817 * no way of knowing how large the overrun was. 818 */ 819 u_int scbindex = ahc_inb(ahc, SCB_TAG); 820 u_int lastphase = ahc_inb(ahc, LASTPHASE); 821 u_int i; 822 823 scb = ahc_lookup_scb(ahc, scbindex); 824 for (i = 0; i < num_phases; i++) { 825 if (lastphase == ahc_phase_table[i].phase) 826 break; 827 } 828 ahc_print_path(ahc, scb); 829 printf("data overrun detected %s." 830 " Tag == 0x%x.\n", 831 ahc_phase_table[i].phasemsg, 832 scb->hscb->tag); 833 ahc_print_path(ahc, scb); 834 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 835 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 836 aic_get_transfer_length(scb), scb->sg_count); 837 if (scb->sg_count > 0) { 838 for (i = 0; i < scb->sg_count; i++) { 839 840 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 841 i, 842 (aic_le32toh(scb->sg_list[i].len) >> 24 843 & SG_HIGH_ADDR_BITS), 844 aic_le32toh(scb->sg_list[i].addr), 845 aic_le32toh(scb->sg_list[i].len) 846 & AHC_SG_LEN_MASK); 847 } 848 } 849 /* 850 * Set this and it will take effect when the 851 * target does a command complete. 852 */ 853 ahc_freeze_devq(ahc, scb); 854 if ((scb->flags & SCB_SENSE) == 0) { 855 aic_set_transaction_status(scb, CAM_DATA_RUN_ERR); 856 } else { 857 scb->flags &= ~SCB_SENSE; 858 aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 859 } 860 aic_freeze_scb(scb); 861 862 if ((ahc->features & AHC_ULTRA2) != 0) { 863 /* 864 * Clear the channel in case we return 865 * to data phase later. 866 */ 867 ahc_outb(ahc, SXFRCTL0, 868 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 869 ahc_outb(ahc, SXFRCTL0, 870 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 871 } 872 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 873 u_int dscommand1; 874 875 /* Ensure HHADDR is 0 for future DMA operations. */ 876 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 877 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 878 ahc_outb(ahc, HADDR, 0); 879 ahc_outb(ahc, DSCOMMAND1, dscommand1); 880 } 881 break; 882 } 883 case MKMSG_FAILED: 884 { 885 u_int scbindex; 886 887 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 888 ahc_name(ahc), devinfo.channel, devinfo.target, 889 devinfo.lun); 890 scbindex = ahc_inb(ahc, SCB_TAG); 891 scb = ahc_lookup_scb(ahc, scbindex); 892 if (scb != NULL 893 && (scb->flags & SCB_RECOVERY_SCB) != 0) 894 /* 895 * Ensure that we didn't put a second instance of this 896 * SCB into the QINFIFO. 897 */ 898 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 899 SCB_GET_CHANNEL(ahc, scb), 900 SCB_GET_LUN(scb), scb->hscb->tag, 901 ROLE_INITIATOR, /*status*/0, 902 SEARCH_REMOVE); 903 break; 904 } 905 case NO_FREE_SCB: 906 { 907 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 908 ahc_dump_card_state(ahc); 909 panic("for safety"); 910 break; 911 } 912 case SCB_MISMATCH: 913 { 914 u_int scbptr; 915 916 scbptr = ahc_inb(ahc, SCBPTR); 917 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 918 scbptr, ahc_inb(ahc, ARG_1), 919 ahc->scb_data->hscbs[scbptr].tag); 920 ahc_dump_card_state(ahc); 921 panic("for saftey"); 922 break; 923 } 924 case OUT_OF_RANGE: 925 { 926 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 927 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 928 "ARG_1 == 0x%x ACCUM = 0x%x\n", 929 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 930 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 931 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 932 "SINDEX == 0x%x\n, A == 0x%x\n", 933 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 934 ahc_index_busy_tcl(ahc, 935 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 936 ahc_inb(ahc, SAVED_LUN))), 937 ahc_inb(ahc, SINDEX), 938 ahc_inb(ahc, ACCUM)); 939 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 940 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 941 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 942 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 943 ahc_inb(ahc, SCB_CONTROL)); 944 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 945 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 946 ahc_dump_card_state(ahc); 947 panic("for safety"); 948 break; 949 } 950 default: 951 printf("ahc_intr: seqint, " 952 "intstat == 0x%x, scsisigi = 0x%x\n", 953 intstat, ahc_inb(ahc, SCSISIGI)); 954 break; 955 } 956 unpause: 957 /* 958 * The sequencer is paused immediately on 959 * a SEQINT, so we should restart it when 960 * we're done. 961 */ 962 ahc_unpause(ahc); 963 } 964 965 void 966 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 967 { 968 u_int scb_index; 969 u_int status0; 970 u_int status; 971 struct scb *scb; 972 char cur_channel; 973 char intr_channel; 974 975 if ((ahc->features & AHC_TWIN) != 0 976 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 977 cur_channel = 'B'; 978 else 979 cur_channel = 'A'; 980 intr_channel = cur_channel; 981 982 if ((ahc->features & AHC_ULTRA2) != 0) 983 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 984 else 985 status0 = 0; 986 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 987 if (status == 0 && status0 == 0) { 988 if ((ahc->features & AHC_TWIN) != 0) { 989 /* Try the other channel */ 990 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 991 status = ahc_inb(ahc, SSTAT1) 992 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 993 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 994 } 995 if (status == 0) { 996 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 997 ahc_outb(ahc, CLRINT, CLRSCSIINT); 998 ahc_unpause(ahc); 999 return; 1000 } 1001 } 1002 1003 /* Make sure the sequencer is in a safe location. */ 1004 ahc_clear_critical_section(ahc); 1005 1006 scb_index = ahc_inb(ahc, SCB_TAG); 1007 scb = ahc_lookup_scb(ahc, scb_index); 1008 if (scb != NULL 1009 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 1010 scb = NULL; 1011 1012 if ((ahc->features & AHC_ULTRA2) != 0 1013 && (status0 & IOERR) != 0) { 1014 int now_lvd; 1015 1016 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 1017 printf("%s: Transceiver State Has Changed to %s mode\n", 1018 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 1019 ahc_outb(ahc, CLRSINT0, CLRIOERR); 1020 /* 1021 * When transitioning to SE mode, the reset line 1022 * glitches, triggering an arbitration bug in some 1023 * Ultra2 controllers. This bug is cleared when we 1024 * assert the reset line. Since a reset glitch has 1025 * already occurred with this transition and a 1026 * transceiver state change is handled just like 1027 * a bus reset anyway, asserting the reset line 1028 * ourselves is safe. 1029 */ 1030 ahc_reset_channel(ahc, intr_channel, 1031 /*Initiate Reset*/now_lvd == 0); 1032 } else if ((status & SCSIRSTI) != 0) { 1033 printf("%s: Someone reset channel %c\n", 1034 ahc_name(ahc), intr_channel); 1035 if (intr_channel != cur_channel) 1036 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1037 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1038 } else if ((status & SCSIPERR) != 0) { 1039 /* 1040 * Determine the bus phase and queue an appropriate message. 1041 * SCSIPERR is latched true as soon as a parity error 1042 * occurs. If the sequencer acked the transfer that 1043 * caused the parity error and the currently presented 1044 * transfer on the bus has correct parity, SCSIPERR will 1045 * be cleared by CLRSCSIPERR. Use this to determine if 1046 * we should look at the last phase the sequencer recorded, 1047 * or the current phase presented on the bus. 1048 */ 1049 struct ahc_devinfo devinfo; 1050 u_int mesg_out; 1051 u_int curphase; 1052 u_int errorphase; 1053 u_int lastphase; 1054 u_int scsirate; 1055 u_int i; 1056 u_int sstat2; 1057 int silent; 1058 1059 lastphase = ahc_inb(ahc, LASTPHASE); 1060 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1061 sstat2 = ahc_inb(ahc, SSTAT2); 1062 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1063 /* 1064 * For all phases save DATA, the sequencer won't 1065 * automatically ack a byte that has a parity error 1066 * in it. So the only way that the current phase 1067 * could be 'data-in' is if the parity error is for 1068 * an already acked byte in the data phase. During 1069 * synchronous data-in transfers, we may actually 1070 * ack bytes before latching the current phase in 1071 * LASTPHASE, leading to the discrepancy between 1072 * curphase and lastphase. 1073 */ 1074 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1075 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1076 errorphase = curphase; 1077 else 1078 errorphase = lastphase; 1079 1080 for (i = 0; i < num_phases; i++) { 1081 if (errorphase == ahc_phase_table[i].phase) 1082 break; 1083 } 1084 mesg_out = ahc_phase_table[i].mesg_out; 1085 silent = FALSE; 1086 if (scb != NULL) { 1087 if (SCB_IS_SILENT(scb)) 1088 silent = TRUE; 1089 else 1090 ahc_print_path(ahc, scb); 1091 scb->flags |= SCB_TRANSMISSION_ERROR; 1092 } else 1093 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1094 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1095 scsirate = ahc_inb(ahc, SCSIRATE); 1096 if (silent == FALSE) { 1097 printf("parity error detected %s. " 1098 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1099 ahc_phase_table[i].phasemsg, 1100 ahc_inw(ahc, SEQADDR0), 1101 scsirate); 1102 if ((ahc->features & AHC_DT) != 0) { 1103 if ((sstat2 & CRCVALERR) != 0) 1104 printf("\tCRC Value Mismatch\n"); 1105 if ((sstat2 & CRCENDERR) != 0) 1106 printf("\tNo terminal CRC packet " 1107 "recevied\n"); 1108 if ((sstat2 & CRCREQERR) != 0) 1109 printf("\tIllegal CRC packet " 1110 "request\n"); 1111 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1112 printf("\tUnexpected %sDT Data Phase\n", 1113 (scsirate & SINGLE_EDGE) 1114 ? "" : "non-"); 1115 } 1116 } 1117 1118 if ((ahc->features & AHC_DT) != 0 1119 && (sstat2 & DUAL_EDGE_ERR) != 0) { 1120 /* 1121 * This error applies regardless of 1122 * data direction, so ignore the value 1123 * in the phase table. 1124 */ 1125 mesg_out = MSG_INITIATOR_DET_ERR; 1126 } 1127 1128 /* 1129 * We've set the hardware to assert ATN if we 1130 * get a parity error on "in" phases, so all we 1131 * need to do is stuff the message buffer with 1132 * the appropriate message. "In" phases have set 1133 * mesg_out to something other than MSG_NOP. 1134 */ 1135 if (mesg_out != MSG_NOOP) { 1136 if (ahc->msg_type != MSG_TYPE_NONE) 1137 ahc->send_msg_perror = TRUE; 1138 else 1139 ahc_outb(ahc, MSG_OUT, mesg_out); 1140 } 1141 /* 1142 * Force a renegotiation with this target just in 1143 * case we are out of sync for some external reason 1144 * unknown (or unreported) by the target. 1145 */ 1146 ahc_fetch_devinfo(ahc, &devinfo); 1147 ahc_force_renegotiation(ahc, &devinfo); 1148 1149 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1150 ahc_unpause(ahc); 1151 } else if ((status & SELTO) != 0) { 1152 u_int scbptr; 1153 1154 /* Stop the selection */ 1155 ahc_outb(ahc, SCSISEQ, 0); 1156 1157 /* No more pending messages */ 1158 ahc_clear_msg_state(ahc); 1159 1160 /* Clear interrupt state */ 1161 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1162 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1163 1164 /* 1165 * Although the driver does not care about the 1166 * 'Selection in Progress' status bit, the busy 1167 * LED does. SELINGO is only cleared by a sucessfull 1168 * selection, so we must manually clear it to insure 1169 * the LED turns off just incase no future successful 1170 * selections occur (e.g. no devices on the bus). 1171 */ 1172 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1173 1174 scbptr = ahc_inb(ahc, WAITING_SCBH); 1175 ahc_outb(ahc, SCBPTR, scbptr); 1176 scb_index = ahc_inb(ahc, SCB_TAG); 1177 1178 scb = ahc_lookup_scb(ahc, scb_index); 1179 if (scb == NULL) { 1180 printf("%s: ahc_intr - referenced scb not " 1181 "valid during SELTO scb(%d, %d)\n", 1182 ahc_name(ahc), scbptr, scb_index); 1183 ahc_dump_card_state(ahc); 1184 } else { 1185 struct ahc_devinfo devinfo; 1186 #ifdef AHC_DEBUG 1187 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1188 ahc_print_path(ahc, scb); 1189 printf("Saw Selection Timeout for SCB 0x%x\n", 1190 scb_index); 1191 } 1192 #endif 1193 ahc_scb_devinfo(ahc, &devinfo, scb); 1194 aic_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1195 ahc_freeze_devq(ahc, scb); 1196 1197 /* 1198 * Cancel any pending transactions on the device 1199 * now that it seems to be missing. This will 1200 * also revert us to async/narrow transfers until 1201 * we can renegotiate with the device. 1202 */ 1203 ahc_handle_devreset(ahc, &devinfo, 1204 CAM_SEL_TIMEOUT, 1205 "Selection Timeout", 1206 /*verbose_level*/1); 1207 } 1208 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1209 ahc_restart(ahc); 1210 } else if ((status & BUSFREE) != 0 1211 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1212 struct ahc_devinfo devinfo; 1213 u_int lastphase; 1214 u_int saved_scsiid; 1215 u_int saved_lun; 1216 u_int target; 1217 u_int initiator_role_id; 1218 char channel; 1219 int printerror; 1220 1221 /* 1222 * Clear our selection hardware as soon as possible. 1223 * We may have an entry in the waiting Q for this target, 1224 * that is affected by this busfree and we don't want to 1225 * go about selecting the target while we handle the event. 1226 */ 1227 ahc_outb(ahc, SCSISEQ, 1228 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1229 1230 /* 1231 * Disable busfree interrupts and clear the busfree 1232 * interrupt status. We do this here so that several 1233 * bus transactions occur prior to clearing the SCSIINT 1234 * latch. It can take a bit for the clearing to take effect. 1235 */ 1236 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1237 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1238 1239 /* 1240 * Look at what phase we were last in. 1241 * If its message out, chances are pretty good 1242 * that the busfree was in response to one of 1243 * our abort requests. 1244 */ 1245 lastphase = ahc_inb(ahc, LASTPHASE); 1246 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1247 saved_lun = ahc_inb(ahc, SAVED_LUN); 1248 target = SCSIID_TARGET(ahc, saved_scsiid); 1249 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1250 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1251 ahc_compile_devinfo(&devinfo, initiator_role_id, 1252 target, saved_lun, channel, ROLE_INITIATOR); 1253 printerror = 1; 1254 1255 if (lastphase == P_MESGOUT) { 1256 u_int tag; 1257 1258 tag = SCB_LIST_NULL; 1259 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1260 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1261 if (ahc->msgout_buf[ahc->msgout_index - 1] 1262 == MSG_ABORT_TAG) 1263 tag = scb->hscb->tag; 1264 ahc_print_path(ahc, scb); 1265 printf("SCB %d - Abort%s Completed.\n", 1266 scb->hscb->tag, tag == SCB_LIST_NULL ? 1267 "" : " Tag"); 1268 ahc_abort_scbs(ahc, target, channel, 1269 saved_lun, tag, 1270 ROLE_INITIATOR, 1271 CAM_REQ_ABORTED); 1272 printerror = 0; 1273 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1274 MSG_BUS_DEV_RESET, TRUE)) { 1275 #ifdef __FreeBSD__ 1276 /* 1277 * Don't mark the user's request for this BDR 1278 * as completing with CAM_BDR_SENT. CAM3 1279 * specifies CAM_REQ_CMP. 1280 */ 1281 if (scb != NULL 1282 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1283 && ahc_match_scb(ahc, scb, target, channel, 1284 CAM_LUN_WILDCARD, 1285 SCB_LIST_NULL, 1286 ROLE_INITIATOR)) { 1287 aic_set_transaction_status(scb, CAM_REQ_CMP); 1288 } 1289 #endif 1290 ahc_compile_devinfo(&devinfo, 1291 initiator_role_id, 1292 target, 1293 CAM_LUN_WILDCARD, 1294 channel, 1295 ROLE_INITIATOR); 1296 ahc_handle_devreset(ahc, &devinfo, 1297 CAM_BDR_SENT, 1298 "Bus Device Reset", 1299 /*verbose_level*/0); 1300 printerror = 0; 1301 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1302 MSG_EXT_PPR, FALSE)) { 1303 struct ahc_initiator_tinfo *tinfo; 1304 struct ahc_tmode_tstate *tstate; 1305 1306 /* 1307 * PPR Rejected. Try non-ppr negotiation 1308 * and retry command. 1309 */ 1310 tinfo = ahc_fetch_transinfo(ahc, 1311 devinfo.channel, 1312 devinfo.our_scsiid, 1313 devinfo.target, 1314 &tstate); 1315 tinfo->curr.transport_version = 2; 1316 tinfo->goal.transport_version = 2; 1317 tinfo->goal.ppr_options = 0; 1318 ahc_qinfifo_requeue_tail(ahc, scb); 1319 printerror = 0; 1320 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1321 MSG_EXT_WDTR, FALSE)) { 1322 /* 1323 * Negotiation Rejected. Go-narrow and 1324 * retry command. 1325 */ 1326 ahc_set_width(ahc, &devinfo, 1327 MSG_EXT_WDTR_BUS_8_BIT, 1328 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1329 /*paused*/TRUE); 1330 ahc_qinfifo_requeue_tail(ahc, scb); 1331 printerror = 0; 1332 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1333 MSG_EXT_SDTR, FALSE)) { 1334 /* 1335 * Negotiation Rejected. Go-async and 1336 * retry command. 1337 */ 1338 ahc_set_syncrate(ahc, &devinfo, 1339 /*syncrate*/NULL, 1340 /*period*/0, /*offset*/0, 1341 /*ppr_options*/0, 1342 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1343 /*paused*/TRUE); 1344 ahc_qinfifo_requeue_tail(ahc, scb); 1345 printerror = 0; 1346 } 1347 } 1348 if (printerror != 0) { 1349 u_int i; 1350 1351 if (scb != NULL) { 1352 u_int tag; 1353 1354 if ((scb->hscb->control & TAG_ENB) != 0) 1355 tag = scb->hscb->tag; 1356 else 1357 tag = SCB_LIST_NULL; 1358 ahc_print_path(ahc, scb); 1359 ahc_abort_scbs(ahc, target, channel, 1360 SCB_GET_LUN(scb), tag, 1361 ROLE_INITIATOR, 1362 CAM_UNEXP_BUSFREE); 1363 } else { 1364 /* 1365 * We had not fully identified this connection, 1366 * so we cannot abort anything. 1367 */ 1368 printf("%s: ", ahc_name(ahc)); 1369 } 1370 for (i = 0; i < num_phases; i++) { 1371 if (lastphase == ahc_phase_table[i].phase) 1372 break; 1373 } 1374 if (lastphase != P_BUSFREE) { 1375 /* 1376 * Renegotiate with this device at the 1377 * next oportunity just in case this busfree 1378 * is due to a negotiation mismatch with the 1379 * device. 1380 */ 1381 ahc_force_renegotiation(ahc, &devinfo); 1382 } 1383 printf("Unexpected busfree %s\n" 1384 "SEQADDR == 0x%x\n", 1385 ahc_phase_table[i].phasemsg, 1386 ahc_inb(ahc, SEQADDR0) 1387 | (ahc_inb(ahc, SEQADDR1) << 8)); 1388 } 1389 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1390 ahc_restart(ahc); 1391 } else { 1392 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1393 ahc_name(ahc), status); 1394 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1395 } 1396 } 1397 1398 /* 1399 * Force renegotiation to occur the next time we initiate 1400 * a command to the current device. 1401 */ 1402 static void 1403 ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1404 { 1405 struct ahc_initiator_tinfo *targ_info; 1406 struct ahc_tmode_tstate *tstate; 1407 1408 targ_info = ahc_fetch_transinfo(ahc, 1409 devinfo->channel, 1410 devinfo->our_scsiid, 1411 devinfo->target, 1412 &tstate); 1413 ahc_update_neg_request(ahc, devinfo, tstate, 1414 targ_info, AHC_NEG_IF_NON_ASYNC); 1415 } 1416 1417 #define AHC_MAX_STEPS 2000 1418 void 1419 ahc_clear_critical_section(struct ahc_softc *ahc) 1420 { 1421 int stepping; 1422 int steps; 1423 u_int simode0; 1424 u_int simode1; 1425 1426 if (ahc->num_critical_sections == 0) 1427 return; 1428 1429 stepping = FALSE; 1430 steps = 0; 1431 simode0 = 0; 1432 simode1 = 0; 1433 for (;;) { 1434 struct cs *cs; 1435 u_int seqaddr; 1436 u_int i; 1437 1438 seqaddr = ahc_inb(ahc, SEQADDR0) 1439 | (ahc_inb(ahc, SEQADDR1) << 8); 1440 1441 /* 1442 * Seqaddr represents the next instruction to execute, 1443 * so we are really executing the instruction just 1444 * before it. 1445 */ 1446 cs = ahc->critical_sections; 1447 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1448 1449 if (cs->begin < seqaddr && cs->end >= seqaddr) 1450 break; 1451 } 1452 1453 if (i == ahc->num_critical_sections) 1454 break; 1455 1456 if (steps > AHC_MAX_STEPS) { 1457 printf("%s: Infinite loop in critical section\n", 1458 ahc_name(ahc)); 1459 ahc_dump_card_state(ahc); 1460 panic("critical section loop"); 1461 } 1462 1463 steps++; 1464 if (stepping == FALSE) { 1465 1466 /* 1467 * Disable all interrupt sources so that the 1468 * sequencer will not be stuck by a pausing 1469 * interrupt condition while we attempt to 1470 * leave a critical section. 1471 */ 1472 simode0 = ahc_inb(ahc, SIMODE0); 1473 ahc_outb(ahc, SIMODE0, 0); 1474 simode1 = ahc_inb(ahc, SIMODE1); 1475 if ((ahc->features & AHC_DT) != 0) 1476 /* 1477 * On DT class controllers, we 1478 * use the enhanced busfree logic. 1479 * Unfortunately we cannot re-enable 1480 * busfree detection within the 1481 * current connection, so we must 1482 * leave it on while single stepping. 1483 */ 1484 ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); 1485 else 1486 ahc_outb(ahc, SIMODE1, 0); 1487 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1488 ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); 1489 stepping = TRUE; 1490 } 1491 if ((ahc->features & AHC_DT) != 0) { 1492 ahc_outb(ahc, CLRSINT1, CLRBUSFREE); 1493 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1494 } 1495 ahc_outb(ahc, HCNTRL, ahc->unpause); 1496 while (!ahc_is_paused(ahc)) 1497 aic_delay(200); 1498 } 1499 if (stepping) { 1500 ahc_outb(ahc, SIMODE0, simode0); 1501 ahc_outb(ahc, SIMODE1, simode1); 1502 ahc_outb(ahc, SEQCTL, ahc->seqctl); 1503 } 1504 } 1505 1506 /* 1507 * Clear any pending interrupt status. 1508 */ 1509 void 1510 ahc_clear_intstat(struct ahc_softc *ahc) 1511 { 1512 /* Clear any interrupt conditions this may have caused */ 1513 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1514 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1515 CLRREQINIT); 1516 ahc_flush_device_writes(ahc); 1517 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1518 ahc_flush_device_writes(ahc); 1519 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1520 ahc_flush_device_writes(ahc); 1521 } 1522 1523 /**************************** Debugging Routines ******************************/ 1524 #ifdef AHC_DEBUG 1525 uint32_t ahc_debug = AHC_DEBUG_OPTS; 1526 #endif 1527 1528 void 1529 ahc_print_scb(struct scb *scb) 1530 { 1531 int i; 1532 1533 struct hardware_scb *hscb = scb->hscb; 1534 1535 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1536 (void *)scb, 1537 hscb->control, 1538 hscb->scsiid, 1539 hscb->lun, 1540 hscb->cdb_len); 1541 printf("Shared Data: "); 1542 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1543 printf("%#02x", hscb->shared_data.cdb[i]); 1544 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1545 aic_le32toh(hscb->dataptr), 1546 aic_le32toh(hscb->datacnt), 1547 aic_le32toh(hscb->sgptr), 1548 hscb->tag); 1549 if (scb->sg_count > 0) { 1550 for (i = 0; i < scb->sg_count; i++) { 1551 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1552 i, 1553 (aic_le32toh(scb->sg_list[i].len) >> 24 1554 & SG_HIGH_ADDR_BITS), 1555 aic_le32toh(scb->sg_list[i].addr), 1556 aic_le32toh(scb->sg_list[i].len)); 1557 } 1558 } 1559 } 1560 1561 /************************* Transfer Negotiation *******************************/ 1562 /* 1563 * Allocate per target mode instance (ID we respond to as a target) 1564 * transfer negotiation data structures. 1565 */ 1566 static struct ahc_tmode_tstate * 1567 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1568 { 1569 struct ahc_tmode_tstate *master_tstate; 1570 struct ahc_tmode_tstate *tstate; 1571 int i; 1572 1573 master_tstate = ahc->enabled_targets[ahc->our_id]; 1574 if (channel == 'B') { 1575 scsi_id += 8; 1576 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1577 } 1578 if (ahc->enabled_targets[scsi_id] != NULL 1579 && ahc->enabled_targets[scsi_id] != master_tstate) 1580 panic("%s: ahc_alloc_tstate - Target already allocated", 1581 ahc_name(ahc)); 1582 tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), 1583 M_DEVBUF, M_NOWAIT); 1584 if (tstate == NULL) 1585 return (NULL); 1586 1587 /* 1588 * If we have allocated a master tstate, copy user settings from 1589 * the master tstate (taken from SRAM or the EEPROM) for this 1590 * channel, but reset our current and goal settings to async/narrow 1591 * until an initiator talks to us. 1592 */ 1593 if (master_tstate != NULL) { 1594 memcpy(tstate, master_tstate, sizeof(*tstate)); 1595 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1596 tstate->ultraenb = 0; 1597 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1598 memset(&tstate->transinfo[i].curr, 0, 1599 sizeof(tstate->transinfo[i].curr)); 1600 memset(&tstate->transinfo[i].goal, 0, 1601 sizeof(tstate->transinfo[i].goal)); 1602 } 1603 } else 1604 memset(tstate, 0, sizeof(*tstate)); 1605 ahc->enabled_targets[scsi_id] = tstate; 1606 return (tstate); 1607 } 1608 1609 #ifdef AHC_TARGET_MODE 1610 /* 1611 * Free per target mode instance (ID we respond to as a target) 1612 * transfer negotiation data structures. 1613 */ 1614 static void 1615 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1616 { 1617 struct ahc_tmode_tstate *tstate; 1618 1619 /* 1620 * Don't clean up our "master" tstate. 1621 * It has our default user settings. 1622 */ 1623 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1624 || (channel == 'A' && scsi_id == ahc->our_id)) 1625 && force == FALSE) 1626 return; 1627 1628 if (channel == 'B') 1629 scsi_id += 8; 1630 tstate = ahc->enabled_targets[scsi_id]; 1631 if (tstate != NULL) 1632 free(tstate, M_DEVBUF); 1633 ahc->enabled_targets[scsi_id] = NULL; 1634 } 1635 #endif 1636 1637 /* 1638 * Called when we have an active connection to a target on the bus, 1639 * this function finds the nearest syncrate to the input period limited 1640 * by the capabilities of the bus connectivity of and sync settings for 1641 * the target. 1642 */ 1643 struct ahc_syncrate * 1644 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1645 struct ahc_initiator_tinfo *tinfo, 1646 u_int *period, u_int *ppr_options, role_t role) 1647 { 1648 struct ahc_transinfo *transinfo; 1649 u_int maxsync; 1650 1651 if ((ahc->features & AHC_ULTRA2) != 0) { 1652 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1653 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1654 maxsync = AHC_SYNCRATE_DT; 1655 } else { 1656 maxsync = AHC_SYNCRATE_ULTRA; 1657 /* Can't do DT on an SE bus */ 1658 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1659 } 1660 } else if ((ahc->features & AHC_ULTRA) != 0) { 1661 maxsync = AHC_SYNCRATE_ULTRA; 1662 } else { 1663 maxsync = AHC_SYNCRATE_FAST; 1664 } 1665 /* 1666 * Never allow a value higher than our current goal 1667 * period otherwise we may allow a target initiated 1668 * negotiation to go above the limit as set by the 1669 * user. In the case of an initiator initiated 1670 * sync negotiation, we limit based on the user 1671 * setting. This allows the system to still accept 1672 * incoming negotiations even if target initiated 1673 * negotiation is not performed. 1674 */ 1675 if (role == ROLE_TARGET) 1676 transinfo = &tinfo->user; 1677 else 1678 transinfo = &tinfo->goal; 1679 *ppr_options &= transinfo->ppr_options; 1680 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 1681 maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); 1682 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1683 } 1684 if (transinfo->period == 0) { 1685 *period = 0; 1686 *ppr_options = 0; 1687 return (NULL); 1688 } 1689 *period = MAX(*period, transinfo->period); 1690 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1691 } 1692 1693 /* 1694 * Look up the valid period to SCSIRATE conversion in our table. 1695 * Return the period and offset that should be sent to the target 1696 * if this was the beginning of an SDTR. 1697 */ 1698 struct ahc_syncrate * 1699 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1700 u_int *ppr_options, u_int maxsync) 1701 { 1702 struct ahc_syncrate *syncrate; 1703 1704 if ((ahc->features & AHC_DT) == 0) 1705 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1706 1707 /* Skip all DT only entries if DT is not available */ 1708 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1709 && maxsync < AHC_SYNCRATE_ULTRA2) 1710 maxsync = AHC_SYNCRATE_ULTRA2; 1711 1712 for (syncrate = &ahc_syncrates[maxsync]; 1713 syncrate->rate != NULL; 1714 syncrate++) { 1715 1716 /* 1717 * The Ultra2 table doesn't go as low 1718 * as for the Fast/Ultra cards. 1719 */ 1720 if ((ahc->features & AHC_ULTRA2) != 0 1721 && (syncrate->sxfr_u2 == 0)) 1722 break; 1723 1724 if (*period <= syncrate->period) { 1725 /* 1726 * When responding to a target that requests 1727 * sync, the requested rate may fall between 1728 * two rates that we can output, but still be 1729 * a rate that we can receive. Because of this, 1730 * we want to respond to the target with 1731 * the same rate that it sent to us even 1732 * if the period we use to send data to it 1733 * is lower. Only lower the response period 1734 * if we must. 1735 */ 1736 if (syncrate == &ahc_syncrates[maxsync]) 1737 *period = syncrate->period; 1738 1739 /* 1740 * At some speeds, we only support 1741 * ST transfers. 1742 */ 1743 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1744 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1745 break; 1746 } 1747 } 1748 1749 if ((*period == 0) 1750 || (syncrate->rate == NULL) 1751 || ((ahc->features & AHC_ULTRA2) != 0 1752 && (syncrate->sxfr_u2 == 0))) { 1753 /* Use asynchronous transfers. */ 1754 *period = 0; 1755 syncrate = NULL; 1756 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1757 } 1758 return (syncrate); 1759 } 1760 1761 /* 1762 * Convert from an entry in our syncrate table to the SCSI equivalent 1763 * sync "period" factor. 1764 */ 1765 u_int 1766 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1767 { 1768 struct ahc_syncrate *syncrate; 1769 1770 if ((ahc->features & AHC_ULTRA2) != 0) 1771 scsirate &= SXFR_ULTRA2; 1772 else 1773 scsirate &= SXFR; 1774 1775 syncrate = &ahc_syncrates[maxsync]; 1776 while (syncrate->rate != NULL) { 1777 1778 if ((ahc->features & AHC_ULTRA2) != 0) { 1779 if (syncrate->sxfr_u2 == 0) 1780 break; 1781 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1782 return (syncrate->period); 1783 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1784 return (syncrate->period); 1785 } 1786 syncrate++; 1787 } 1788 return (0); /* async */ 1789 } 1790 1791 /* 1792 * Truncate the given synchronous offset to a value the 1793 * current adapter type and syncrate are capable of. 1794 */ 1795 void 1796 ahc_validate_offset(struct ahc_softc *ahc, 1797 struct ahc_initiator_tinfo *tinfo, 1798 struct ahc_syncrate *syncrate, 1799 u_int *offset, int wide, role_t role) 1800 { 1801 u_int maxoffset; 1802 1803 /* Limit offset to what we can do */ 1804 if (syncrate == NULL) { 1805 maxoffset = 0; 1806 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1807 maxoffset = MAX_OFFSET_ULTRA2; 1808 } else { 1809 if (wide) 1810 maxoffset = MAX_OFFSET_16BIT; 1811 else 1812 maxoffset = MAX_OFFSET_8BIT; 1813 } 1814 *offset = MIN(*offset, maxoffset); 1815 if (tinfo != NULL) { 1816 if (role == ROLE_TARGET) 1817 *offset = MIN(*offset, tinfo->user.offset); 1818 else 1819 *offset = MIN(*offset, tinfo->goal.offset); 1820 } 1821 } 1822 1823 /* 1824 * Truncate the given transfer width parameter to a value the 1825 * current adapter type is capable of. 1826 */ 1827 void 1828 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1829 u_int *bus_width, role_t role) 1830 { 1831 switch (*bus_width) { 1832 default: 1833 if (ahc->features & AHC_WIDE) { 1834 /* Respond Wide */ 1835 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1836 break; 1837 } 1838 /* FALLTHROUGH */ 1839 case MSG_EXT_WDTR_BUS_8_BIT: 1840 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1841 break; 1842 } 1843 if (tinfo != NULL) { 1844 if (role == ROLE_TARGET) 1845 *bus_width = MIN(tinfo->user.width, *bus_width); 1846 else 1847 *bus_width = MIN(tinfo->goal.width, *bus_width); 1848 } 1849 } 1850 1851 /* 1852 * Update the bitmask of targets for which the controller should 1853 * negotiate with at the next convenient oportunity. This currently 1854 * means the next time we send the initial identify messages for 1855 * a new transaction. 1856 */ 1857 int 1858 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1859 struct ahc_tmode_tstate *tstate, 1860 struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) 1861 { 1862 u_int auto_negotiate_orig; 1863 1864 auto_negotiate_orig = tstate->auto_negotiate; 1865 if (neg_type == AHC_NEG_ALWAYS) { 1866 /* 1867 * Force our "current" settings to be 1868 * unknown so that unless a bus reset 1869 * occurs the need to renegotiate is 1870 * recorded persistently. 1871 */ 1872 if ((ahc->features & AHC_WIDE) != 0) 1873 tinfo->curr.width = AHC_WIDTH_UNKNOWN; 1874 tinfo->curr.period = AHC_PERIOD_UNKNOWN; 1875 tinfo->curr.offset = AHC_OFFSET_UNKNOWN; 1876 } 1877 if (tinfo->curr.period != tinfo->goal.period 1878 || tinfo->curr.width != tinfo->goal.width 1879 || tinfo->curr.offset != tinfo->goal.offset 1880 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1881 || (neg_type == AHC_NEG_IF_NON_ASYNC 1882 && (tinfo->goal.offset != 0 1883 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1884 || tinfo->goal.ppr_options != 0))) 1885 tstate->auto_negotiate |= devinfo->target_mask; 1886 else 1887 tstate->auto_negotiate &= ~devinfo->target_mask; 1888 1889 return (auto_negotiate_orig != tstate->auto_negotiate); 1890 } 1891 1892 /* 1893 * Update the user/goal/curr tables of synchronous negotiation 1894 * parameters as well as, in the case of a current or active update, 1895 * any data structures on the host controller. In the case of an 1896 * active update, the specified target is currently talking to us on 1897 * the bus, so the transfer parameter update must take effect 1898 * immediately. 1899 */ 1900 void 1901 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1902 struct ahc_syncrate *syncrate, u_int period, 1903 u_int offset, u_int ppr_options, u_int type, int paused) 1904 { 1905 struct ahc_initiator_tinfo *tinfo; 1906 struct ahc_tmode_tstate *tstate; 1907 u_int old_period; 1908 u_int old_offset; 1909 u_int old_ppr; 1910 int active; 1911 int update_needed; 1912 1913 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1914 update_needed = 0; 1915 1916 if (syncrate == NULL) { 1917 period = 0; 1918 offset = 0; 1919 } 1920 1921 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1922 devinfo->target, &tstate); 1923 1924 if ((type & AHC_TRANS_USER) != 0) { 1925 tinfo->user.period = period; 1926 tinfo->user.offset = offset; 1927 tinfo->user.ppr_options = ppr_options; 1928 } 1929 1930 if ((type & AHC_TRANS_GOAL) != 0) { 1931 tinfo->goal.period = period; 1932 tinfo->goal.offset = offset; 1933 tinfo->goal.ppr_options = ppr_options; 1934 } 1935 1936 old_period = tinfo->curr.period; 1937 old_offset = tinfo->curr.offset; 1938 old_ppr = tinfo->curr.ppr_options; 1939 1940 if ((type & AHC_TRANS_CUR) != 0 1941 && (old_period != period 1942 || old_offset != offset 1943 || old_ppr != ppr_options)) { 1944 u_int scsirate; 1945 1946 update_needed++; 1947 scsirate = tinfo->scsirate; 1948 if ((ahc->features & AHC_ULTRA2) != 0) { 1949 1950 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1951 if (syncrate != NULL) { 1952 scsirate |= syncrate->sxfr_u2; 1953 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1954 scsirate |= ENABLE_CRC; 1955 else 1956 scsirate |= SINGLE_EDGE; 1957 } 1958 } else { 1959 1960 scsirate &= ~(SXFR|SOFS); 1961 /* 1962 * Ensure Ultra mode is set properly for 1963 * this target. 1964 */ 1965 tstate->ultraenb &= ~devinfo->target_mask; 1966 if (syncrate != NULL) { 1967 if (syncrate->sxfr & ULTRA_SXFR) { 1968 tstate->ultraenb |= 1969 devinfo->target_mask; 1970 } 1971 scsirate |= syncrate->sxfr & SXFR; 1972 scsirate |= offset & SOFS; 1973 } 1974 if (active) { 1975 u_int sxfrctl0; 1976 1977 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1978 sxfrctl0 &= ~FAST20; 1979 if (tstate->ultraenb & devinfo->target_mask) 1980 sxfrctl0 |= FAST20; 1981 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1982 } 1983 } 1984 if (active) { 1985 ahc_outb(ahc, SCSIRATE, scsirate); 1986 if ((ahc->features & AHC_ULTRA2) != 0) 1987 ahc_outb(ahc, SCSIOFFSET, offset); 1988 } 1989 1990 tinfo->scsirate = scsirate; 1991 tinfo->curr.period = period; 1992 tinfo->curr.offset = offset; 1993 tinfo->curr.ppr_options = ppr_options; 1994 1995 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1996 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1997 if (bootverbose) { 1998 if (offset != 0) { 1999 printf("%s: target %d synchronous at %sMHz%s, " 2000 "offset = 0x%x\n", ahc_name(ahc), 2001 devinfo->target, syncrate->rate, 2002 (ppr_options & MSG_EXT_PPR_DT_REQ) 2003 ? " DT" : "", offset); 2004 } else { 2005 printf("%s: target %d using " 2006 "asynchronous transfers\n", 2007 ahc_name(ahc), devinfo->target); 2008 } 2009 } 2010 } 2011 2012 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2013 tinfo, AHC_NEG_TO_GOAL); 2014 2015 if (update_needed) 2016 ahc_update_pending_scbs(ahc); 2017 } 2018 2019 /* 2020 * Update the user/goal/curr tables of wide negotiation 2021 * parameters as well as, in the case of a current or active update, 2022 * any data structures on the host controller. In the case of an 2023 * active update, the specified target is currently talking to us on 2024 * the bus, so the transfer parameter update must take effect 2025 * immediately. 2026 */ 2027 void 2028 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2029 u_int width, u_int type, int paused) 2030 { 2031 struct ahc_initiator_tinfo *tinfo; 2032 struct ahc_tmode_tstate *tstate; 2033 u_int oldwidth; 2034 int active; 2035 int update_needed; 2036 2037 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 2038 update_needed = 0; 2039 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2040 devinfo->target, &tstate); 2041 2042 if ((type & AHC_TRANS_USER) != 0) 2043 tinfo->user.width = width; 2044 2045 if ((type & AHC_TRANS_GOAL) != 0) 2046 tinfo->goal.width = width; 2047 2048 oldwidth = tinfo->curr.width; 2049 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 2050 u_int scsirate; 2051 2052 update_needed++; 2053 scsirate = tinfo->scsirate; 2054 scsirate &= ~WIDEXFER; 2055 if (width == MSG_EXT_WDTR_BUS_16_BIT) 2056 scsirate |= WIDEXFER; 2057 2058 tinfo->scsirate = scsirate; 2059 2060 if (active) 2061 ahc_outb(ahc, SCSIRATE, scsirate); 2062 2063 tinfo->curr.width = width; 2064 2065 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2066 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2067 if (bootverbose) { 2068 printf("%s: target %d using %dbit transfers\n", 2069 ahc_name(ahc), devinfo->target, 2070 8 * (0x01 << width)); 2071 } 2072 } 2073 2074 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2075 tinfo, AHC_NEG_TO_GOAL); 2076 if (update_needed) 2077 ahc_update_pending_scbs(ahc); 2078 } 2079 2080 /* 2081 * Update the current state of tagged queuing for a given target. 2082 */ 2083 void 2084 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2085 ahc_queue_alg alg) 2086 { 2087 ahc_platform_set_tags(ahc, devinfo, alg); 2088 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2089 devinfo->lun, AC_TRANSFER_NEG, &alg); 2090 } 2091 2092 /* 2093 * When the transfer settings for a connection change, update any 2094 * in-transit SCBs to contain the new data so the hardware will 2095 * be set correctly during future (re)selections. 2096 */ 2097 static void 2098 ahc_update_pending_scbs(struct ahc_softc *ahc) 2099 { 2100 struct scb *pending_scb; 2101 int pending_scb_count; 2102 int i; 2103 int paused; 2104 u_int saved_scbptr; 2105 2106 /* 2107 * Traverse the pending SCB list and ensure that all of the 2108 * SCBs there have the proper settings. 2109 */ 2110 pending_scb_count = 0; 2111 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2112 struct ahc_devinfo devinfo; 2113 struct hardware_scb *pending_hscb; 2114 struct ahc_initiator_tinfo *tinfo; 2115 struct ahc_tmode_tstate *tstate; 2116 2117 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2118 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2119 devinfo.our_scsiid, 2120 devinfo.target, &tstate); 2121 pending_hscb = pending_scb->hscb; 2122 pending_hscb->control &= ~ULTRAENB; 2123 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2124 pending_hscb->control |= ULTRAENB; 2125 pending_hscb->scsirate = tinfo->scsirate; 2126 pending_hscb->scsioffset = tinfo->curr.offset; 2127 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2128 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2129 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2130 pending_hscb->control &= ~MK_MESSAGE; 2131 } 2132 ahc_sync_scb(ahc, pending_scb, 2133 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2134 pending_scb_count++; 2135 } 2136 2137 if (pending_scb_count == 0) 2138 return; 2139 2140 if (ahc_is_paused(ahc)) { 2141 paused = 1; 2142 } else { 2143 paused = 0; 2144 ahc_pause(ahc); 2145 } 2146 2147 saved_scbptr = ahc_inb(ahc, SCBPTR); 2148 /* Ensure that the hscbs down on the card match the new information */ 2149 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2150 struct hardware_scb *pending_hscb; 2151 u_int control; 2152 u_int scb_tag; 2153 2154 ahc_outb(ahc, SCBPTR, i); 2155 scb_tag = ahc_inb(ahc, SCB_TAG); 2156 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2157 if (pending_scb == NULL) 2158 continue; 2159 2160 pending_hscb = pending_scb->hscb; 2161 control = ahc_inb(ahc, SCB_CONTROL); 2162 control &= ~(ULTRAENB|MK_MESSAGE); 2163 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2164 ahc_outb(ahc, SCB_CONTROL, control); 2165 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2166 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2167 } 2168 ahc_outb(ahc, SCBPTR, saved_scbptr); 2169 2170 if (paused == 0) 2171 ahc_unpause(ahc); 2172 } 2173 2174 /**************************** Pathing Information *****************************/ 2175 static void 2176 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2177 { 2178 u_int saved_scsiid; 2179 role_t role; 2180 int our_id; 2181 2182 if (ahc_inb(ahc, SSTAT0) & TARGET) 2183 role = ROLE_TARGET; 2184 else 2185 role = ROLE_INITIATOR; 2186 2187 if (role == ROLE_TARGET 2188 && (ahc->features & AHC_MULTI_TID) != 0 2189 && (ahc_inb(ahc, SEQ_FLAGS) 2190 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { 2191 /* We were selected, so pull our id from TARGIDIN */ 2192 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2193 } else if ((ahc->features & AHC_ULTRA2) != 0) 2194 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2195 else 2196 our_id = ahc_inb(ahc, SCSIID) & OID; 2197 2198 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2199 ahc_compile_devinfo(devinfo, 2200 our_id, 2201 SCSIID_TARGET(ahc, saved_scsiid), 2202 ahc_inb(ahc, SAVED_LUN), 2203 SCSIID_CHANNEL(ahc, saved_scsiid), 2204 role); 2205 } 2206 2207 struct ahc_phase_table_entry* 2208 ahc_lookup_phase_entry(int phase) 2209 { 2210 struct ahc_phase_table_entry *entry; 2211 struct ahc_phase_table_entry *last_entry; 2212 2213 /* 2214 * num_phases doesn't include the default entry which 2215 * will be returned if the phase doesn't match. 2216 */ 2217 last_entry = &ahc_phase_table[num_phases]; 2218 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2219 if (phase == entry->phase) 2220 break; 2221 } 2222 return (entry); 2223 } 2224 2225 void 2226 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2227 u_int lun, char channel, role_t role) 2228 { 2229 devinfo->our_scsiid = our_id; 2230 devinfo->target = target; 2231 devinfo->lun = lun; 2232 devinfo->target_offset = target; 2233 devinfo->channel = channel; 2234 devinfo->role = role; 2235 if (channel == 'B') 2236 devinfo->target_offset += 8; 2237 devinfo->target_mask = (0x01 << devinfo->target_offset); 2238 } 2239 2240 void 2241 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2242 { 2243 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, 2244 devinfo->target, devinfo->lun); 2245 } 2246 2247 static void 2248 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2249 struct scb *scb) 2250 { 2251 role_t role; 2252 int our_id; 2253 2254 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2255 role = ROLE_INITIATOR; 2256 if ((scb->flags & SCB_TARGET_SCB) != 0) 2257 role = ROLE_TARGET; 2258 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2259 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2260 } 2261 2262 2263 /************************ Message Phase Processing ****************************/ 2264 static void 2265 ahc_assert_atn(struct ahc_softc *ahc) 2266 { 2267 u_int scsisigo; 2268 2269 scsisigo = ATNO; 2270 if ((ahc->features & AHC_DT) == 0) 2271 scsisigo |= ahc_inb(ahc, SCSISIGI); 2272 ahc_outb(ahc, SCSISIGO, scsisigo); 2273 } 2274 2275 /* 2276 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2277 * or enters the initial message out phase, we are interrupted. Fill our 2278 * outgoing message buffer with the appropriate message and beging handing 2279 * the message phase(s) manually. 2280 */ 2281 static void 2282 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2283 struct scb *scb) 2284 { 2285 /* 2286 * To facilitate adding multiple messages together, 2287 * each routine should increment the index and len 2288 * variables instead of setting them explicitly. 2289 */ 2290 ahc->msgout_index = 0; 2291 ahc->msgout_len = 0; 2292 2293 if ((scb->flags & SCB_DEVICE_RESET) == 0 2294 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2295 u_int identify_msg; 2296 2297 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2298 if ((scb->hscb->control & DISCENB) != 0) 2299 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2300 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2301 ahc->msgout_len++; 2302 2303 if ((scb->hscb->control & TAG_ENB) != 0) { 2304 ahc->msgout_buf[ahc->msgout_index++] = 2305 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2306 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2307 ahc->msgout_len += 2; 2308 } 2309 } 2310 2311 if (scb->flags & SCB_DEVICE_RESET) { 2312 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2313 ahc->msgout_len++; 2314 ahc_print_path(ahc, scb); 2315 printf("Bus Device Reset Message Sent\n"); 2316 /* 2317 * Clear our selection hardware in advance of 2318 * the busfree. We may have an entry in the waiting 2319 * Q for this target, and we don't want to go about 2320 * selecting while we handle the busfree and blow it 2321 * away. 2322 */ 2323 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2324 } else if ((scb->flags & SCB_ABORT) != 0) { 2325 if ((scb->hscb->control & TAG_ENB) != 0) 2326 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2327 else 2328 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2329 ahc->msgout_len++; 2330 ahc_print_path(ahc, scb); 2331 printf("Abort%s Message Sent\n", 2332 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2333 /* 2334 * Clear our selection hardware in advance of 2335 * the busfree. We may have an entry in the waiting 2336 * Q for this target, and we don't want to go about 2337 * selecting while we handle the busfree and blow it 2338 * away. 2339 */ 2340 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2341 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2342 ahc_build_transfer_msg(ahc, devinfo); 2343 } else { 2344 printf("ahc_intr: AWAITING_MSG for an SCB that " 2345 "does not have a waiting message\n"); 2346 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2347 devinfo->target_mask); 2348 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2349 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2350 ahc_inb(ahc, MSG_OUT), scb->flags); 2351 } 2352 2353 /* 2354 * Clear the MK_MESSAGE flag from the SCB so we aren't 2355 * asked to send this message again. 2356 */ 2357 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2358 scb->hscb->control &= ~MK_MESSAGE; 2359 ahc->msgout_index = 0; 2360 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2361 } 2362 2363 /* 2364 * Build an appropriate transfer negotiation message for the 2365 * currently active target. 2366 */ 2367 static void 2368 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2369 { 2370 /* 2371 * We need to initiate transfer negotiations. 2372 * If our current and goal settings are identical, 2373 * we want to renegotiate due to a check condition. 2374 */ 2375 struct ahc_initiator_tinfo *tinfo; 2376 struct ahc_tmode_tstate *tstate; 2377 struct ahc_syncrate *rate; 2378 int dowide; 2379 int dosync; 2380 int doppr; 2381 u_int period; 2382 u_int ppr_options; 2383 u_int offset; 2384 2385 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2386 devinfo->target, &tstate); 2387 /* 2388 * Filter our period based on the current connection. 2389 * If we can't perform DT transfers on this segment (not in LVD 2390 * mode for instance), then our decision to issue a PPR message 2391 * may change. 2392 */ 2393 period = tinfo->goal.period; 2394 offset = tinfo->goal.offset; 2395 ppr_options = tinfo->goal.ppr_options; 2396 /* Target initiated PPR is not allowed in the SCSI spec */ 2397 if (devinfo->role == ROLE_TARGET) 2398 ppr_options = 0; 2399 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2400 &ppr_options, devinfo->role); 2401 dowide = tinfo->curr.width != tinfo->goal.width; 2402 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; 2403 /* 2404 * Only use PPR if we have options that need it, even if the device 2405 * claims to support it. There might be an expander in the way 2406 * that doesn't. 2407 */ 2408 doppr = ppr_options != 0; 2409 2410 if (!dowide && !dosync && !doppr) { 2411 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2412 dosync = tinfo->goal.offset != 0; 2413 } 2414 2415 if (!dowide && !dosync && !doppr) { 2416 /* 2417 * Force async with a WDTR message if we have a wide bus, 2418 * or just issue an SDTR with a 0 offset. 2419 */ 2420 if ((ahc->features & AHC_WIDE) != 0) 2421 dowide = 1; 2422 else 2423 dosync = 1; 2424 2425 if (bootverbose) { 2426 ahc_print_devinfo(ahc, devinfo); 2427 printf("Ensuring async\n"); 2428 } 2429 } 2430 2431 /* Target initiated PPR is not allowed in the SCSI spec */ 2432 if (devinfo->role == ROLE_TARGET) 2433 doppr = 0; 2434 2435 /* 2436 * Both the PPR message and SDTR message require the 2437 * goal syncrate to be limited to what the target device 2438 * is capable of handling (based on whether an LVD->SE 2439 * expander is on the bus), so combine these two cases. 2440 * Regardless, guarantee that if we are using WDTR and SDTR 2441 * messages that WDTR comes first. 2442 */ 2443 if (doppr || (dosync && !dowide)) { 2444 2445 offset = tinfo->goal.offset; 2446 ahc_validate_offset(ahc, tinfo, rate, &offset, 2447 doppr ? tinfo->goal.width 2448 : tinfo->curr.width, 2449 devinfo->role); 2450 if (doppr) { 2451 ahc_construct_ppr(ahc, devinfo, period, offset, 2452 tinfo->goal.width, ppr_options); 2453 } else { 2454 ahc_construct_sdtr(ahc, devinfo, period, offset); 2455 } 2456 } else { 2457 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2458 } 2459 } 2460 2461 /* 2462 * Build a synchronous negotiation message in our message 2463 * buffer based on the input parameters. 2464 */ 2465 static void 2466 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2467 u_int period, u_int offset) 2468 { 2469 if (offset == 0) 2470 period = AHC_ASYNC_XFER_PERIOD; 2471 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2472 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2473 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2474 ahc->msgout_buf[ahc->msgout_index++] = period; 2475 ahc->msgout_buf[ahc->msgout_index++] = offset; 2476 ahc->msgout_len += 5; 2477 if (bootverbose) { 2478 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2479 ahc_name(ahc), devinfo->channel, devinfo->target, 2480 devinfo->lun, period, offset); 2481 } 2482 } 2483 2484 /* 2485 * Build a wide negotiation message in our message 2486 * buffer based on the input parameters. 2487 */ 2488 static void 2489 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2490 u_int bus_width) 2491 { 2492 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2493 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2494 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2495 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2496 ahc->msgout_len += 4; 2497 if (bootverbose) { 2498 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2499 ahc_name(ahc), devinfo->channel, devinfo->target, 2500 devinfo->lun, bus_width); 2501 } 2502 } 2503 2504 /* 2505 * Build a parallel protocol request message in our message 2506 * buffer based on the input parameters. 2507 */ 2508 static void 2509 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2510 u_int period, u_int offset, u_int bus_width, 2511 u_int ppr_options) 2512 { 2513 if (offset == 0) 2514 period = AHC_ASYNC_XFER_PERIOD; 2515 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2516 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2517 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2518 ahc->msgout_buf[ahc->msgout_index++] = period; 2519 ahc->msgout_buf[ahc->msgout_index++] = 0; 2520 ahc->msgout_buf[ahc->msgout_index++] = offset; 2521 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2522 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2523 ahc->msgout_len += 8; 2524 if (bootverbose) { 2525 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2526 "offset %x, ppr_options %x\n", ahc_name(ahc), 2527 devinfo->channel, devinfo->target, devinfo->lun, 2528 bus_width, period, offset, ppr_options); 2529 } 2530 } 2531 2532 /* 2533 * Clear any active message state. 2534 */ 2535 static void 2536 ahc_clear_msg_state(struct ahc_softc *ahc) 2537 { 2538 ahc->msgout_len = 0; 2539 ahc->msgin_index = 0; 2540 ahc->msg_type = MSG_TYPE_NONE; 2541 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2542 /* 2543 * The target didn't care to respond to our 2544 * message request, so clear ATN. 2545 */ 2546 ahc_outb(ahc, CLRSINT1, CLRATNO); 2547 } 2548 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2549 ahc_outb(ahc, SEQ_FLAGS2, 2550 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2551 } 2552 2553 static void 2554 ahc_handle_proto_violation(struct ahc_softc *ahc) 2555 { 2556 struct ahc_devinfo devinfo; 2557 struct scb *scb; 2558 u_int scbid; 2559 u_int seq_flags; 2560 u_int curphase; 2561 u_int lastphase; 2562 int found; 2563 2564 ahc_fetch_devinfo(ahc, &devinfo); 2565 scbid = ahc_inb(ahc, SCB_TAG); 2566 scb = ahc_lookup_scb(ahc, scbid); 2567 seq_flags = ahc_inb(ahc, SEQ_FLAGS); 2568 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2569 lastphase = ahc_inb(ahc, LASTPHASE); 2570 if ((seq_flags & NOT_IDENTIFIED) != 0) { 2571 2572 /* 2573 * The reconnecting target either did not send an 2574 * identify message, or did, but we didn't find an SCB 2575 * to match. 2576 */ 2577 ahc_print_devinfo(ahc, &devinfo); 2578 printf("Target did not send an IDENTIFY message. " 2579 "LASTPHASE = 0x%x.\n", lastphase); 2580 scb = NULL; 2581 } else if (scb == NULL) { 2582 /* 2583 * We don't seem to have an SCB active for this 2584 * transaction. Print an error and reset the bus. 2585 */ 2586 ahc_print_devinfo(ahc, &devinfo); 2587 printf("No SCB found during protocol violation\n"); 2588 goto proto_violation_reset; 2589 } else { 2590 aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 2591 if ((seq_flags & NO_CDB_SENT) != 0) { 2592 ahc_print_path(ahc, scb); 2593 printf("No or incomplete CDB sent to device.\n"); 2594 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { 2595 /* 2596 * The target never bothered to provide status to 2597 * us prior to completing the command. Since we don't 2598 * know the disposition of this command, we must attempt 2599 * to abort it. Assert ATN and prepare to send an abort 2600 * message. 2601 */ 2602 ahc_print_path(ahc, scb); 2603 printf("Completed command without status.\n"); 2604 } else { 2605 ahc_print_path(ahc, scb); 2606 printf("Unknown protocol violation.\n"); 2607 ahc_dump_card_state(ahc); 2608 } 2609 } 2610 if ((lastphase & ~P_DATAIN_DT) == 0 2611 || lastphase == P_COMMAND) { 2612 proto_violation_reset: 2613 /* 2614 * Target either went directly to data/command 2615 * phase or didn't respond to our ATN. 2616 * The only safe thing to do is to blow 2617 * it away with a bus reset. 2618 */ 2619 found = ahc_reset_channel(ahc, 'A', TRUE); 2620 printf("%s: Issued Channel %c Bus Reset. " 2621 "%d SCBs aborted\n", ahc_name(ahc), 'A', found); 2622 } else { 2623 /* 2624 * Leave the selection hardware off in case 2625 * this abort attempt will affect yet to 2626 * be sent commands. 2627 */ 2628 ahc_outb(ahc, SCSISEQ, 2629 ahc_inb(ahc, SCSISEQ) & ~ENSELO); 2630 ahc_assert_atn(ahc); 2631 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2632 if (scb == NULL) { 2633 ahc_print_devinfo(ahc, &devinfo); 2634 ahc->msgout_buf[0] = MSG_ABORT_TASK; 2635 ahc->msgout_len = 1; 2636 ahc->msgout_index = 0; 2637 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2638 } else { 2639 ahc_print_path(ahc, scb); 2640 scb->flags |= SCB_ABORT; 2641 } 2642 printf("Protocol violation %s. Attempting to abort.\n", 2643 ahc_lookup_phase_entry(curphase)->phasemsg); 2644 } 2645 } 2646 2647 /* 2648 * Manual message loop handler. 2649 */ 2650 static void 2651 ahc_handle_message_phase(struct ahc_softc *ahc) 2652 { 2653 struct ahc_devinfo devinfo; 2654 u_int bus_phase; 2655 int end_session; 2656 2657 ahc_fetch_devinfo(ahc, &devinfo); 2658 end_session = FALSE; 2659 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2660 2661 reswitch: 2662 switch (ahc->msg_type) { 2663 case MSG_TYPE_INITIATOR_MSGOUT: 2664 { 2665 int lastbyte; 2666 int phasemis; 2667 int msgdone; 2668 2669 if (ahc->msgout_len == 0) 2670 panic("HOST_MSG_LOOP interrupt with no active message"); 2671 2672 #ifdef AHC_DEBUG 2673 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2674 ahc_print_devinfo(ahc, &devinfo); 2675 printf("INITIATOR_MSG_OUT"); 2676 } 2677 #endif 2678 phasemis = bus_phase != P_MESGOUT; 2679 if (phasemis) { 2680 #ifdef AHC_DEBUG 2681 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2682 printf(" PHASEMIS %s\n", 2683 ahc_lookup_phase_entry(bus_phase) 2684 ->phasemsg); 2685 } 2686 #endif 2687 if (bus_phase == P_MESGIN) { 2688 /* 2689 * Change gears and see if 2690 * this messages is of interest to 2691 * us or should be passed back to 2692 * the sequencer. 2693 */ 2694 ahc_outb(ahc, CLRSINT1, CLRATNO); 2695 ahc->send_msg_perror = FALSE; 2696 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2697 ahc->msgin_index = 0; 2698 goto reswitch; 2699 } 2700 end_session = TRUE; 2701 break; 2702 } 2703 2704 if (ahc->send_msg_perror) { 2705 ahc_outb(ahc, CLRSINT1, CLRATNO); 2706 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2707 #ifdef AHC_DEBUG 2708 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2709 printf(" byte 0x%x\n", ahc->send_msg_perror); 2710 #endif 2711 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2712 break; 2713 } 2714 2715 msgdone = ahc->msgout_index == ahc->msgout_len; 2716 if (msgdone) { 2717 /* 2718 * The target has requested a retry. 2719 * Re-assert ATN, reset our message index to 2720 * 0, and try again. 2721 */ 2722 ahc->msgout_index = 0; 2723 ahc_assert_atn(ahc); 2724 } 2725 2726 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2727 if (lastbyte) { 2728 /* Last byte is signified by dropping ATN */ 2729 ahc_outb(ahc, CLRSINT1, CLRATNO); 2730 } 2731 2732 /* 2733 * Clear our interrupt status and present 2734 * the next byte on the bus. 2735 */ 2736 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2737 #ifdef AHC_DEBUG 2738 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2739 printf(" byte 0x%x\n", 2740 ahc->msgout_buf[ahc->msgout_index]); 2741 #endif 2742 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2743 break; 2744 } 2745 case MSG_TYPE_INITIATOR_MSGIN: 2746 { 2747 int phasemis; 2748 int message_done; 2749 2750 #ifdef AHC_DEBUG 2751 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2752 ahc_print_devinfo(ahc, &devinfo); 2753 printf("INITIATOR_MSG_IN"); 2754 } 2755 #endif 2756 phasemis = bus_phase != P_MESGIN; 2757 if (phasemis) { 2758 #ifdef AHC_DEBUG 2759 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2760 printf(" PHASEMIS %s\n", 2761 ahc_lookup_phase_entry(bus_phase) 2762 ->phasemsg); 2763 } 2764 #endif 2765 ahc->msgin_index = 0; 2766 if (bus_phase == P_MESGOUT 2767 && (ahc->send_msg_perror == TRUE 2768 || (ahc->msgout_len != 0 2769 && ahc->msgout_index == 0))) { 2770 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2771 goto reswitch; 2772 } 2773 end_session = TRUE; 2774 break; 2775 } 2776 2777 /* Pull the byte in without acking it */ 2778 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2779 #ifdef AHC_DEBUG 2780 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2781 printf(" byte 0x%x\n", 2782 ahc->msgin_buf[ahc->msgin_index]); 2783 #endif 2784 2785 message_done = ahc_parse_msg(ahc, &devinfo); 2786 2787 if (message_done) { 2788 /* 2789 * Clear our incoming message buffer in case there 2790 * is another message following this one. 2791 */ 2792 ahc->msgin_index = 0; 2793 2794 /* 2795 * If this message illicited a response, 2796 * assert ATN so the target takes us to the 2797 * message out phase. 2798 */ 2799 if (ahc->msgout_len != 0) { 2800 #ifdef AHC_DEBUG 2801 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2802 ahc_print_devinfo(ahc, &devinfo); 2803 printf("Asserting ATN for response\n"); 2804 } 2805 #endif 2806 ahc_assert_atn(ahc); 2807 } 2808 } else 2809 ahc->msgin_index++; 2810 2811 if (message_done == MSGLOOP_TERMINATED) { 2812 end_session = TRUE; 2813 } else { 2814 /* Ack the byte */ 2815 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2816 ahc_inb(ahc, SCSIDATL); 2817 } 2818 break; 2819 } 2820 case MSG_TYPE_TARGET_MSGIN: 2821 { 2822 int msgdone; 2823 int msgout_request; 2824 2825 if (ahc->msgout_len == 0) 2826 panic("Target MSGIN with no active message"); 2827 2828 /* 2829 * If we interrupted a mesgout session, the initiator 2830 * will not know this until our first REQ. So, we 2831 * only honor mesgout requests after we've sent our 2832 * first byte. 2833 */ 2834 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2835 && ahc->msgout_index > 0) 2836 msgout_request = TRUE; 2837 else 2838 msgout_request = FALSE; 2839 2840 if (msgout_request) { 2841 2842 /* 2843 * Change gears and see if 2844 * this messages is of interest to 2845 * us or should be passed back to 2846 * the sequencer. 2847 */ 2848 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2849 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2850 ahc->msgin_index = 0; 2851 /* Dummy read to REQ for first byte */ 2852 ahc_inb(ahc, SCSIDATL); 2853 ahc_outb(ahc, SXFRCTL0, 2854 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2855 break; 2856 } 2857 2858 msgdone = ahc->msgout_index == ahc->msgout_len; 2859 if (msgdone) { 2860 ahc_outb(ahc, SXFRCTL0, 2861 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2862 end_session = TRUE; 2863 break; 2864 } 2865 2866 /* 2867 * Present the next byte on the bus. 2868 */ 2869 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2870 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2871 break; 2872 } 2873 case MSG_TYPE_TARGET_MSGOUT: 2874 { 2875 int lastbyte; 2876 int msgdone; 2877 2878 /* 2879 * The initiator signals that this is 2880 * the last byte by dropping ATN. 2881 */ 2882 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2883 2884 /* 2885 * Read the latched byte, but turn off SPIOEN first 2886 * so that we don't inadvertently cause a REQ for the 2887 * next byte. 2888 */ 2889 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2890 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2891 msgdone = ahc_parse_msg(ahc, &devinfo); 2892 if (msgdone == MSGLOOP_TERMINATED) { 2893 /* 2894 * The message is *really* done in that it caused 2895 * us to go to bus free. The sequencer has already 2896 * been reset at this point, so pull the ejection 2897 * handle. 2898 */ 2899 return; 2900 } 2901 2902 ahc->msgin_index++; 2903 2904 /* 2905 * XXX Read spec about initiator dropping ATN too soon 2906 * and use msgdone to detect it. 2907 */ 2908 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2909 ahc->msgin_index = 0; 2910 2911 /* 2912 * If this message illicited a response, transition 2913 * to the Message in phase and send it. 2914 */ 2915 if (ahc->msgout_len != 0) { 2916 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2917 ahc_outb(ahc, SXFRCTL0, 2918 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2919 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2920 ahc->msgin_index = 0; 2921 break; 2922 } 2923 } 2924 2925 if (lastbyte) 2926 end_session = TRUE; 2927 else { 2928 /* Ask for the next byte. */ 2929 ahc_outb(ahc, SXFRCTL0, 2930 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2931 } 2932 2933 break; 2934 } 2935 default: 2936 panic("Unknown REQINIT message type"); 2937 } 2938 2939 if (end_session) { 2940 ahc_clear_msg_state(ahc); 2941 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2942 } else 2943 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2944 } 2945 2946 /* 2947 * See if we sent a particular extended message to the target. 2948 * If "full" is true, return true only if the target saw the full 2949 * message. If "full" is false, return true if the target saw at 2950 * least the first byte of the message. 2951 */ 2952 static int 2953 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2954 { 2955 int found; 2956 u_int index; 2957 2958 found = FALSE; 2959 index = 0; 2960 2961 while (index < ahc->msgout_len) { 2962 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2963 u_int end_index; 2964 2965 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2966 if (ahc->msgout_buf[index+2] == msgval 2967 && type == AHCMSG_EXT) { 2968 2969 if (full) { 2970 if (ahc->msgout_index > end_index) 2971 found = TRUE; 2972 } else if (ahc->msgout_index > index) 2973 found = TRUE; 2974 } 2975 index = end_index; 2976 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2977 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2978 2979 /* Skip tag type and tag id or residue param*/ 2980 index += 2; 2981 } else { 2982 /* Single byte message */ 2983 if (type == AHCMSG_1B 2984 && ahc->msgout_buf[index] == msgval 2985 && ahc->msgout_index > index) 2986 found = TRUE; 2987 index++; 2988 } 2989 2990 if (found) 2991 break; 2992 } 2993 return (found); 2994 } 2995 2996 /* 2997 * Wait for a complete incoming message, parse it, and respond accordingly. 2998 */ 2999 static int 3000 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3001 { 3002 struct ahc_initiator_tinfo *tinfo; 3003 struct ahc_tmode_tstate *tstate; 3004 int reject; 3005 int done; 3006 int response; 3007 u_int targ_scsirate; 3008 3009 done = MSGLOOP_IN_PROG; 3010 response = FALSE; 3011 reject = FALSE; 3012 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3013 devinfo->target, &tstate); 3014 targ_scsirate = tinfo->scsirate; 3015 3016 /* 3017 * Parse as much of the message as is available, 3018 * rejecting it if we don't support it. When 3019 * the entire message is available and has been 3020 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3021 * that we have parsed an entire message. 3022 * 3023 * In the case of extended messages, we accept the length 3024 * byte outright and perform more checking once we know the 3025 * extended message type. 3026 */ 3027 switch (ahc->msgin_buf[0]) { 3028 case MSG_DISCONNECT: 3029 case MSG_SAVEDATAPOINTER: 3030 case MSG_CMDCOMPLETE: 3031 case MSG_RESTOREPOINTERS: 3032 case MSG_IGN_WIDE_RESIDUE: 3033 /* 3034 * End our message loop as these are messages 3035 * the sequencer handles on its own. 3036 */ 3037 done = MSGLOOP_TERMINATED; 3038 break; 3039 case MSG_MESSAGE_REJECT: 3040 response = ahc_handle_msg_reject(ahc, devinfo); 3041 /* FALLTHROUGH */ 3042 case MSG_NOOP: 3043 done = MSGLOOP_MSGCOMPLETE; 3044 break; 3045 case MSG_EXTENDED: 3046 { 3047 /* Wait for enough of the message to begin validation */ 3048 if (ahc->msgin_index < 2) 3049 break; 3050 switch (ahc->msgin_buf[2]) { 3051 case MSG_EXT_SDTR: 3052 { 3053 struct ahc_syncrate *syncrate; 3054 u_int period; 3055 u_int ppr_options; 3056 u_int offset; 3057 u_int saved_offset; 3058 3059 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3060 reject = TRUE; 3061 break; 3062 } 3063 3064 /* 3065 * Wait until we have both args before validating 3066 * and acting on this message. 3067 * 3068 * Add one to MSG_EXT_SDTR_LEN to account for 3069 * the extended message preamble. 3070 */ 3071 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3072 break; 3073 3074 period = ahc->msgin_buf[3]; 3075 ppr_options = 0; 3076 saved_offset = offset = ahc->msgin_buf[4]; 3077 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3078 &ppr_options, 3079 devinfo->role); 3080 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 3081 targ_scsirate & WIDEXFER, 3082 devinfo->role); 3083 if (bootverbose) { 3084 printf("(%s:%c:%d:%d): Received " 3085 "SDTR period %x, offset %x\n\t" 3086 "Filtered to period %x, offset %x\n", 3087 ahc_name(ahc), devinfo->channel, 3088 devinfo->target, devinfo->lun, 3089 ahc->msgin_buf[3], saved_offset, 3090 period, offset); 3091 } 3092 ahc_set_syncrate(ahc, devinfo, 3093 syncrate, period, 3094 offset, ppr_options, 3095 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3096 /*paused*/TRUE); 3097 3098 /* 3099 * See if we initiated Sync Negotiation 3100 * and didn't have to fall down to async 3101 * transfers. 3102 */ 3103 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 3104 /* We started it */ 3105 if (saved_offset != offset) { 3106 /* Went too low - force async */ 3107 reject = TRUE; 3108 } 3109 } else { 3110 /* 3111 * Send our own SDTR in reply 3112 */ 3113 if (bootverbose 3114 && devinfo->role == ROLE_INITIATOR) { 3115 printf("(%s:%c:%d:%d): Target " 3116 "Initiated SDTR\n", 3117 ahc_name(ahc), devinfo->channel, 3118 devinfo->target, devinfo->lun); 3119 } 3120 ahc->msgout_index = 0; 3121 ahc->msgout_len = 0; 3122 ahc_construct_sdtr(ahc, devinfo, 3123 period, offset); 3124 ahc->msgout_index = 0; 3125 response = TRUE; 3126 } 3127 done = MSGLOOP_MSGCOMPLETE; 3128 break; 3129 } 3130 case MSG_EXT_WDTR: 3131 { 3132 u_int bus_width; 3133 u_int saved_width; 3134 u_int sending_reply; 3135 3136 sending_reply = FALSE; 3137 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3138 reject = TRUE; 3139 break; 3140 } 3141 3142 /* 3143 * Wait until we have our arg before validating 3144 * and acting on this message. 3145 * 3146 * Add one to MSG_EXT_WDTR_LEN to account for 3147 * the extended message preamble. 3148 */ 3149 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3150 break; 3151 3152 bus_width = ahc->msgin_buf[3]; 3153 saved_width = bus_width; 3154 ahc_validate_width(ahc, tinfo, &bus_width, 3155 devinfo->role); 3156 if (bootverbose) { 3157 printf("(%s:%c:%d:%d): Received WDTR " 3158 "%x filtered to %x\n", 3159 ahc_name(ahc), devinfo->channel, 3160 devinfo->target, devinfo->lun, 3161 saved_width, bus_width); 3162 } 3163 3164 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 3165 /* 3166 * Don't send a WDTR back to the 3167 * target, since we asked first. 3168 * If the width went higher than our 3169 * request, reject it. 3170 */ 3171 if (saved_width > bus_width) { 3172 reject = TRUE; 3173 printf("(%s:%c:%d:%d): requested %dBit " 3174 "transfers. Rejecting...\n", 3175 ahc_name(ahc), devinfo->channel, 3176 devinfo->target, devinfo->lun, 3177 8 * (0x01 << bus_width)); 3178 bus_width = 0; 3179 } 3180 } else { 3181 /* 3182 * Send our own WDTR in reply 3183 */ 3184 if (bootverbose 3185 && devinfo->role == ROLE_INITIATOR) { 3186 printf("(%s:%c:%d:%d): Target " 3187 "Initiated WDTR\n", 3188 ahc_name(ahc), devinfo->channel, 3189 devinfo->target, devinfo->lun); 3190 } 3191 ahc->msgout_index = 0; 3192 ahc->msgout_len = 0; 3193 ahc_construct_wdtr(ahc, devinfo, bus_width); 3194 ahc->msgout_index = 0; 3195 response = TRUE; 3196 sending_reply = TRUE; 3197 } 3198 /* 3199 * After a wide message, we are async, but 3200 * some devices don't seem to honor this portion 3201 * of the spec. Force a renegotiation of the 3202 * sync component of our transfer agreement even 3203 * if our goal is async. By updating our width 3204 * after forcing the negotiation, we avoid 3205 * renegotiating for width. 3206 */ 3207 ahc_update_neg_request(ahc, devinfo, tstate, 3208 tinfo, AHC_NEG_ALWAYS); 3209 ahc_set_width(ahc, devinfo, bus_width, 3210 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3211 /*paused*/TRUE); 3212 if (sending_reply == FALSE && reject == FALSE) { 3213 3214 /* 3215 * We will always have an SDTR to send. 3216 */ 3217 ahc->msgout_index = 0; 3218 ahc->msgout_len = 0; 3219 ahc_build_transfer_msg(ahc, devinfo); 3220 ahc->msgout_index = 0; 3221 response = TRUE; 3222 } 3223 done = MSGLOOP_MSGCOMPLETE; 3224 break; 3225 } 3226 case MSG_EXT_PPR: 3227 { 3228 struct ahc_syncrate *syncrate; 3229 u_int period; 3230 u_int offset; 3231 u_int bus_width; 3232 u_int ppr_options; 3233 u_int saved_width; 3234 u_int saved_offset; 3235 u_int saved_ppr_options; 3236 3237 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 3238 reject = TRUE; 3239 break; 3240 } 3241 3242 /* 3243 * Wait until we have all args before validating 3244 * and acting on this message. 3245 * 3246 * Add one to MSG_EXT_PPR_LEN to account for 3247 * the extended message preamble. 3248 */ 3249 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 3250 break; 3251 3252 period = ahc->msgin_buf[3]; 3253 offset = ahc->msgin_buf[5]; 3254 bus_width = ahc->msgin_buf[6]; 3255 saved_width = bus_width; 3256 ppr_options = ahc->msgin_buf[7]; 3257 /* 3258 * According to the spec, a DT only 3259 * period factor with no DT option 3260 * set implies async. 3261 */ 3262 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3263 && period == 9) 3264 offset = 0; 3265 saved_ppr_options = ppr_options; 3266 saved_offset = offset; 3267 3268 /* 3269 * Mask out any options we don't support 3270 * on any controller. Transfer options are 3271 * only available if we are negotiating wide. 3272 */ 3273 ppr_options &= MSG_EXT_PPR_DT_REQ; 3274 if (bus_width == 0) 3275 ppr_options = 0; 3276 3277 ahc_validate_width(ahc, tinfo, &bus_width, 3278 devinfo->role); 3279 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3280 &ppr_options, 3281 devinfo->role); 3282 ahc_validate_offset(ahc, tinfo, syncrate, 3283 &offset, bus_width, 3284 devinfo->role); 3285 3286 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3287 /* 3288 * If we are unable to do any of the 3289 * requested options (we went too low), 3290 * then we'll have to reject the message. 3291 */ 3292 if (saved_width > bus_width 3293 || saved_offset != offset 3294 || saved_ppr_options != ppr_options) { 3295 reject = TRUE; 3296 period = 0; 3297 offset = 0; 3298 bus_width = 0; 3299 ppr_options = 0; 3300 syncrate = NULL; 3301 } 3302 } else { 3303 if (devinfo->role != ROLE_TARGET) 3304 printf("(%s:%c:%d:%d): Target " 3305 "Initiated PPR\n", 3306 ahc_name(ahc), devinfo->channel, 3307 devinfo->target, devinfo->lun); 3308 else 3309 printf("(%s:%c:%d:%d): Initiator " 3310 "Initiated PPR\n", 3311 ahc_name(ahc), devinfo->channel, 3312 devinfo->target, devinfo->lun); 3313 ahc->msgout_index = 0; 3314 ahc->msgout_len = 0; 3315 ahc_construct_ppr(ahc, devinfo, period, offset, 3316 bus_width, ppr_options); 3317 ahc->msgout_index = 0; 3318 response = TRUE; 3319 } 3320 if (bootverbose) { 3321 printf("(%s:%c:%d:%d): Received PPR width %x, " 3322 "period %x, offset %x,options %x\n" 3323 "\tFiltered to width %x, period %x, " 3324 "offset %x, options %x\n", 3325 ahc_name(ahc), devinfo->channel, 3326 devinfo->target, devinfo->lun, 3327 saved_width, ahc->msgin_buf[3], 3328 saved_offset, saved_ppr_options, 3329 bus_width, period, offset, ppr_options); 3330 } 3331 ahc_set_width(ahc, devinfo, bus_width, 3332 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3333 /*paused*/TRUE); 3334 ahc_set_syncrate(ahc, devinfo, 3335 syncrate, period, 3336 offset, ppr_options, 3337 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3338 /*paused*/TRUE); 3339 done = MSGLOOP_MSGCOMPLETE; 3340 break; 3341 } 3342 default: 3343 /* Unknown extended message. Reject it. */ 3344 reject = TRUE; 3345 break; 3346 } 3347 break; 3348 } 3349 #ifdef AHC_TARGET_MODE 3350 case MSG_BUS_DEV_RESET: 3351 ahc_handle_devreset(ahc, devinfo, 3352 CAM_BDR_SENT, 3353 "Bus Device Reset Received", 3354 /*verbose_level*/0); 3355 ahc_restart(ahc); 3356 done = MSGLOOP_TERMINATED; 3357 break; 3358 case MSG_ABORT_TAG: 3359 case MSG_ABORT: 3360 case MSG_CLEAR_QUEUE: 3361 { 3362 int tag; 3363 3364 /* Target mode messages */ 3365 if (devinfo->role != ROLE_TARGET) { 3366 reject = TRUE; 3367 break; 3368 } 3369 tag = SCB_LIST_NULL; 3370 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3371 tag = ahc_inb(ahc, INITIATOR_TAG); 3372 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3373 devinfo->lun, tag, ROLE_TARGET, 3374 CAM_REQ_ABORTED); 3375 3376 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3377 if (tstate != NULL) { 3378 struct ahc_tmode_lstate* lstate; 3379 3380 lstate = tstate->enabled_luns[devinfo->lun]; 3381 if (lstate != NULL) { 3382 ahc_queue_lstate_event(ahc, lstate, 3383 devinfo->our_scsiid, 3384 ahc->msgin_buf[0], 3385 /*arg*/tag); 3386 ahc_send_lstate_events(ahc, lstate); 3387 } 3388 } 3389 ahc_restart(ahc); 3390 done = MSGLOOP_TERMINATED; 3391 break; 3392 } 3393 #endif 3394 case MSG_TERM_IO_PROC: 3395 default: 3396 reject = TRUE; 3397 break; 3398 } 3399 3400 if (reject) { 3401 /* 3402 * Setup to reject the message. 3403 */ 3404 ahc->msgout_index = 0; 3405 ahc->msgout_len = 1; 3406 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3407 done = MSGLOOP_MSGCOMPLETE; 3408 response = TRUE; 3409 } 3410 3411 if (done != MSGLOOP_IN_PROG && !response) 3412 /* Clear the outgoing message buffer */ 3413 ahc->msgout_len = 0; 3414 3415 return (done); 3416 } 3417 3418 /* 3419 * Process a message reject message. 3420 */ 3421 static int 3422 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3423 { 3424 /* 3425 * What we care about here is if we had an 3426 * outstanding SDTR or WDTR message for this 3427 * target. If we did, this is a signal that 3428 * the target is refusing negotiation. 3429 */ 3430 struct scb *scb; 3431 struct ahc_initiator_tinfo *tinfo; 3432 struct ahc_tmode_tstate *tstate; 3433 u_int scb_index; 3434 u_int last_msg; 3435 int response = 0; 3436 3437 scb_index = ahc_inb(ahc, SCB_TAG); 3438 scb = ahc_lookup_scb(ahc, scb_index); 3439 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3440 devinfo->our_scsiid, 3441 devinfo->target, &tstate); 3442 /* Might be necessary */ 3443 last_msg = ahc_inb(ahc, LAST_MSG); 3444 3445 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3446 /* 3447 * Target does not support the PPR message. 3448 * Attempt to negotiate SPI-2 style. 3449 */ 3450 if (bootverbose) { 3451 printf("(%s:%c:%d:%d): PPR Rejected. " 3452 "Trying WDTR/SDTR\n", 3453 ahc_name(ahc), devinfo->channel, 3454 devinfo->target, devinfo->lun); 3455 } 3456 tinfo->goal.ppr_options = 0; 3457 tinfo->curr.transport_version = 2; 3458 tinfo->goal.transport_version = 2; 3459 ahc->msgout_index = 0; 3460 ahc->msgout_len = 0; 3461 ahc_build_transfer_msg(ahc, devinfo); 3462 ahc->msgout_index = 0; 3463 response = 1; 3464 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3465 3466 /* note 8bit xfers */ 3467 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3468 "8bit transfers\n", ahc_name(ahc), 3469 devinfo->channel, devinfo->target, devinfo->lun); 3470 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3471 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3472 /*paused*/TRUE); 3473 /* 3474 * No need to clear the sync rate. If the target 3475 * did not accept the command, our syncrate is 3476 * unaffected. If the target started the negotiation, 3477 * but rejected our response, we already cleared the 3478 * sync rate before sending our WDTR. 3479 */ 3480 if (tinfo->goal.offset != tinfo->curr.offset) { 3481 3482 /* Start the sync negotiation */ 3483 ahc->msgout_index = 0; 3484 ahc->msgout_len = 0; 3485 ahc_build_transfer_msg(ahc, devinfo); 3486 ahc->msgout_index = 0; 3487 response = 1; 3488 } 3489 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3490 /* note asynch xfers and clear flag */ 3491 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3492 /*offset*/0, /*ppr_options*/0, 3493 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3494 /*paused*/TRUE); 3495 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 3496 "Using asynchronous transfers\n", 3497 ahc_name(ahc), devinfo->channel, 3498 devinfo->target, devinfo->lun); 3499 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3500 int tag_type; 3501 int mask; 3502 3503 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3504 3505 if (tag_type == MSG_SIMPLE_TASK) { 3506 printf("(%s:%c:%d:%d): refuses tagged commands. " 3507 "Performing non-tagged I/O\n", ahc_name(ahc), 3508 devinfo->channel, devinfo->target, devinfo->lun); 3509 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3510 mask = ~0x23; 3511 } else { 3512 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 3513 "Performing simple queue tagged I/O only\n", 3514 ahc_name(ahc), devinfo->channel, devinfo->target, 3515 devinfo->lun, tag_type == MSG_ORDERED_TASK 3516 ? "ordered" : "head of queue"); 3517 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3518 mask = ~0x03; 3519 } 3520 3521 /* 3522 * Resend the identify for this CCB as the target 3523 * may believe that the selection is invalid otherwise. 3524 */ 3525 ahc_outb(ahc, SCB_CONTROL, 3526 ahc_inb(ahc, SCB_CONTROL) & mask); 3527 scb->hscb->control &= mask; 3528 aic_set_transaction_tag(scb, /*enabled*/FALSE, 3529 /*type*/MSG_SIMPLE_TASK); 3530 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3531 ahc_assert_atn(ahc); 3532 3533 /* 3534 * This transaction is now at the head of 3535 * the untagged queue for this target. 3536 */ 3537 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3538 struct scb_tailq *untagged_q; 3539 3540 untagged_q = 3541 &(ahc->untagged_queues[devinfo->target_offset]); 3542 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3543 scb->flags |= SCB_UNTAGGEDQ; 3544 } 3545 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3546 scb->hscb->tag); 3547 3548 /* 3549 * Requeue all tagged commands for this target 3550 * currently in our posession so they can be 3551 * converted to untagged commands. 3552 */ 3553 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3554 SCB_GET_CHANNEL(ahc, scb), 3555 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3556 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3557 SEARCH_COMPLETE); 3558 } else { 3559 /* 3560 * Otherwise, we ignore it. 3561 */ 3562 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3563 ahc_name(ahc), devinfo->channel, devinfo->target, 3564 last_msg); 3565 } 3566 return (response); 3567 } 3568 3569 /* 3570 * Process an ingnore wide residue message. 3571 */ 3572 static void 3573 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3574 { 3575 u_int scb_index; 3576 struct scb *scb; 3577 3578 scb_index = ahc_inb(ahc, SCB_TAG); 3579 scb = ahc_lookup_scb(ahc, scb_index); 3580 /* 3581 * XXX Actually check data direction in the sequencer? 3582 * Perhaps add datadir to some spare bits in the hscb? 3583 */ 3584 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3585 || aic_get_transfer_dir(scb) != CAM_DIR_IN) { 3586 /* 3587 * Ignore the message if we haven't 3588 * seen an appropriate data phase yet. 3589 */ 3590 } else { 3591 /* 3592 * If the residual occurred on the last 3593 * transfer and the transfer request was 3594 * expected to end on an odd count, do 3595 * nothing. Otherwise, subtract a byte 3596 * and update the residual count accordingly. 3597 */ 3598 uint32_t sgptr; 3599 3600 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3601 if ((sgptr & SG_LIST_NULL) != 0 3602 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { 3603 /* 3604 * If the residual occurred on the last 3605 * transfer and the transfer request was 3606 * expected to end on an odd count, do 3607 * nothing. 3608 */ 3609 } else { 3610 struct ahc_dma_seg *sg; 3611 uint32_t data_cnt; 3612 uint32_t data_addr; 3613 uint32_t sglen; 3614 3615 /* Pull in all of the sgptr */ 3616 sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); 3617 data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); 3618 3619 if ((sgptr & SG_LIST_NULL) != 0) { 3620 /* 3621 * The residual data count is not updated 3622 * for the command run to completion case. 3623 * Explicitly zero the count. 3624 */ 3625 data_cnt &= ~AHC_SG_LEN_MASK; 3626 } 3627 3628 data_addr = ahc_inl(ahc, SHADDR); 3629 3630 data_cnt += 1; 3631 data_addr -= 1; 3632 sgptr &= SG_PTR_MASK; 3633 3634 sg = ahc_sg_bus_to_virt(scb, sgptr); 3635 3636 /* 3637 * The residual sg ptr points to the next S/G 3638 * to load so we must go back one. 3639 */ 3640 sg--; 3641 sglen = aic_le32toh(sg->len) & AHC_SG_LEN_MASK; 3642 if (sg != scb->sg_list 3643 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3644 3645 sg--; 3646 sglen = aic_le32toh(sg->len); 3647 /* 3648 * Preserve High Address and SG_LIST bits 3649 * while setting the count to 1. 3650 */ 3651 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3652 data_addr = aic_le32toh(sg->addr) 3653 + (sglen & AHC_SG_LEN_MASK) - 1; 3654 3655 /* 3656 * Increment sg so it points to the 3657 * "next" sg. 3658 */ 3659 sg++; 3660 sgptr = ahc_sg_virt_to_bus(scb, sg); 3661 } 3662 ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3663 ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3664 /* 3665 * Toggle the "oddness" of the transfer length 3666 * to handle this mid-transfer ignore wide 3667 * residue. This ensures that the oddness is 3668 * correct for subsequent data transfers. 3669 */ 3670 ahc_outb(ahc, SCB_LUN, 3671 ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); 3672 } 3673 } 3674 } 3675 3676 3677 /* 3678 * Reinitialize the data pointers for the active transfer 3679 * based on its current residual. 3680 */ 3681 static void 3682 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3683 { 3684 struct scb *scb; 3685 struct ahc_dma_seg *sg; 3686 u_int scb_index; 3687 uint32_t sgptr; 3688 uint32_t resid; 3689 uint32_t dataptr; 3690 3691 scb_index = ahc_inb(ahc, SCB_TAG); 3692 scb = ahc_lookup_scb(ahc, scb_index); 3693 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3694 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3695 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3696 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3697 3698 sgptr &= SG_PTR_MASK; 3699 sg = ahc_sg_bus_to_virt(scb, sgptr); 3700 3701 /* The residual sg_ptr always points to the next sg */ 3702 sg--; 3703 3704 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3705 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3706 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3707 3708 dataptr = aic_le32toh(sg->addr) 3709 + (aic_le32toh(sg->len) & AHC_SG_LEN_MASK) 3710 - resid; 3711 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3712 u_int dscommand1; 3713 3714 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3715 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3716 ahc_outb(ahc, HADDR, 3717 (aic_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3718 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3719 } 3720 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3721 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3722 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3723 ahc_outb(ahc, HADDR, dataptr); 3724 ahc_outb(ahc, HCNT + 2, resid >> 16); 3725 ahc_outb(ahc, HCNT + 1, resid >> 8); 3726 ahc_outb(ahc, HCNT, resid); 3727 if ((ahc->features & AHC_ULTRA2) == 0) { 3728 ahc_outb(ahc, STCNT + 2, resid >> 16); 3729 ahc_outb(ahc, STCNT + 1, resid >> 8); 3730 ahc_outb(ahc, STCNT, resid); 3731 } 3732 } 3733 3734 /* 3735 * Handle the effects of issuing a bus device reset message. 3736 */ 3737 static void 3738 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3739 cam_status status, char *message, int verbose_level) 3740 { 3741 #ifdef AHC_TARGET_MODE 3742 struct ahc_tmode_tstate* tstate; 3743 u_int lun; 3744 #endif 3745 int found; 3746 3747 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3748 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3749 status); 3750 3751 #ifdef AHC_TARGET_MODE 3752 /* 3753 * Send an immediate notify ccb to all target mord peripheral 3754 * drivers affected by this action. 3755 */ 3756 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3757 if (tstate != NULL) { 3758 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3759 struct ahc_tmode_lstate* lstate; 3760 3761 lstate = tstate->enabled_luns[lun]; 3762 if (lstate == NULL) 3763 continue; 3764 3765 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3766 MSG_BUS_DEV_RESET, /*arg*/0); 3767 ahc_send_lstate_events(ahc, lstate); 3768 } 3769 } 3770 #endif 3771 3772 /* 3773 * Go back to async/narrow transfers and renegotiate. 3774 */ 3775 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3776 AHC_TRANS_CUR, /*paused*/TRUE); 3777 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3778 /*period*/0, /*offset*/0, /*ppr_options*/0, 3779 AHC_TRANS_CUR, /*paused*/TRUE); 3780 3781 if (status != CAM_SEL_TIMEOUT) 3782 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3783 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3784 3785 if (message != NULL 3786 && (verbose_level <= bootverbose)) 3787 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3788 message, devinfo->channel, devinfo->target, found); 3789 } 3790 3791 #ifdef AHC_TARGET_MODE 3792 static void 3793 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3794 struct scb *scb) 3795 { 3796 3797 /* 3798 * To facilitate adding multiple messages together, 3799 * each routine should increment the index and len 3800 * variables instead of setting them explicitly. 3801 */ 3802 ahc->msgout_index = 0; 3803 ahc->msgout_len = 0; 3804 3805 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3806 ahc_build_transfer_msg(ahc, devinfo); 3807 else 3808 panic("ahc_intr: AWAITING target message with no message"); 3809 3810 ahc->msgout_index = 0; 3811 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3812 } 3813 #endif 3814 /**************************** Initialization **********************************/ 3815 /* 3816 * Allocate a controller structure for a new device 3817 * and perform initial initializion. 3818 */ 3819 struct ahc_softc * 3820 ahc_alloc(void *platform_arg, char *name) 3821 { 3822 struct ahc_softc *ahc; 3823 int i; 3824 3825 #ifndef __FreeBSD__ 3826 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3827 if (!ahc) { 3828 printf("aic7xxx: cannot malloc softc!\n"); 3829 free(name, M_DEVBUF); 3830 return NULL; 3831 } 3832 #else 3833 ahc = device_get_softc((device_t)platform_arg); 3834 #endif 3835 memset(ahc, 0, sizeof(*ahc)); 3836 ahc->seep_config = malloc(sizeof(*ahc->seep_config), 3837 M_DEVBUF, M_NOWAIT); 3838 if (ahc->seep_config == NULL) { 3839 #ifndef __FreeBSD__ 3840 free(ahc, M_DEVBUF); 3841 #endif 3842 free(name, M_DEVBUF); 3843 return (NULL); 3844 } 3845 LIST_INIT(&ahc->pending_scbs); 3846 /* We don't know our unit number until the OSM sets it */ 3847 ahc->name = name; 3848 ahc->unit = -1; 3849 ahc->description = NULL; 3850 ahc->channel = 'A'; 3851 ahc->channel_b = 'B'; 3852 ahc->chip = AHC_NONE; 3853 ahc->features = AHC_FENONE; 3854 ahc->bugs = AHC_BUGNONE; 3855 ahc->flags = AHC_FNONE; 3856 /* 3857 * Default to all error reporting enabled with the 3858 * sequencer operating at its fastest speed. 3859 * The bus attach code may modify this. 3860 */ 3861 ahc->seqctl = FASTMODE; 3862 3863 for (i = 0; i < AHC_NUM_TARGETS; i++) 3864 TAILQ_INIT(&ahc->untagged_queues[i]); 3865 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3866 ahc_free(ahc); 3867 ahc = NULL; 3868 } 3869 return (ahc); 3870 } 3871 3872 int 3873 ahc_softc_init(struct ahc_softc *ahc) 3874 { 3875 3876 /* The IRQMS bit is only valid on VL and EISA chips */ 3877 if ((ahc->chip & AHC_PCI) == 0) 3878 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3879 else 3880 ahc->unpause = 0; 3881 ahc->pause = ahc->unpause | PAUSE; 3882 /* XXX The shared scb data stuff should be deprecated */ 3883 if (ahc->scb_data == NULL) { 3884 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3885 M_DEVBUF, M_NOWAIT); 3886 if (ahc->scb_data == NULL) 3887 return (ENOMEM); 3888 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3889 } 3890 3891 return (0); 3892 } 3893 3894 void 3895 ahc_softc_insert(struct ahc_softc *ahc) 3896 { 3897 struct ahc_softc *list_ahc; 3898 3899 #if AIC_PCI_CONFIG > 0 3900 /* 3901 * Second Function PCI devices need to inherit some 3902 * settings from function 0. 3903 */ 3904 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3905 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3906 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3907 aic_dev_softc_t list_pci; 3908 aic_dev_softc_t pci; 3909 3910 list_pci = list_ahc->dev_softc; 3911 pci = ahc->dev_softc; 3912 if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci) 3913 && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) { 3914 struct ahc_softc *master; 3915 struct ahc_softc *slave; 3916 3917 if (aic_get_pci_function(list_pci) == 0) { 3918 master = list_ahc; 3919 slave = ahc; 3920 } else { 3921 master = ahc; 3922 slave = list_ahc; 3923 } 3924 slave->flags &= ~AHC_BIOS_ENABLED; 3925 slave->flags |= 3926 master->flags & AHC_BIOS_ENABLED; 3927 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3928 slave->flags |= 3929 master->flags & AHC_PRIMARY_CHANNEL; 3930 break; 3931 } 3932 } 3933 } 3934 #endif 3935 3936 /* 3937 * Insertion sort into our list of softcs. 3938 */ 3939 list_ahc = TAILQ_FIRST(&ahc_tailq); 3940 while (list_ahc != NULL 3941 && ahc_softc_comp(ahc, list_ahc) <= 0) 3942 list_ahc = TAILQ_NEXT(list_ahc, links); 3943 if (list_ahc != NULL) 3944 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3945 else 3946 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3947 ahc->init_level++; 3948 } 3949 3950 /* 3951 * Verify that the passed in softc pointer is for a 3952 * controller that is still configured. 3953 */ 3954 struct ahc_softc * 3955 ahc_find_softc(struct ahc_softc *ahc) 3956 { 3957 struct ahc_softc *list_ahc; 3958 3959 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3960 if (list_ahc == ahc) 3961 return (ahc); 3962 } 3963 return (NULL); 3964 } 3965 3966 void 3967 ahc_set_unit(struct ahc_softc *ahc, int unit) 3968 { 3969 ahc->unit = unit; 3970 } 3971 3972 void 3973 ahc_set_name(struct ahc_softc *ahc, char *name) 3974 { 3975 if (ahc->name != NULL) 3976 free(ahc->name, M_DEVBUF); 3977 ahc->name = name; 3978 } 3979 3980 void 3981 ahc_free(struct ahc_softc *ahc) 3982 { 3983 int i; 3984 3985 ahc_terminate_recovery_thread(ahc); 3986 switch (ahc->init_level) { 3987 default: 3988 case 5: 3989 ahc_shutdown(ahc); 3990 /* FALLTHROUGH */ 3991 case 4: 3992 aic_dmamap_unload(ahc, ahc->shared_data_dmat, 3993 ahc->shared_data_dmamap); 3994 /* FALLTHROUGH */ 3995 case 3: 3996 aic_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3997 ahc->shared_data_dmamap); 3998 aic_dmamap_destroy(ahc, ahc->shared_data_dmat, 3999 ahc->shared_data_dmamap); 4000 /* FALLTHROUGH */ 4001 case 2: 4002 aic_dma_tag_destroy(ahc, ahc->shared_data_dmat); 4003 case 1: 4004 #ifndef __linux__ 4005 aic_dma_tag_destroy(ahc, ahc->buffer_dmat); 4006 #endif 4007 break; 4008 case 0: 4009 break; 4010 } 4011 4012 #ifndef __linux__ 4013 aic_dma_tag_destroy(ahc, ahc->parent_dmat); 4014 #endif 4015 ahc_platform_free(ahc); 4016 ahc_fini_scbdata(ahc); 4017 for (i = 0; i < AHC_NUM_TARGETS; i++) { 4018 struct ahc_tmode_tstate *tstate; 4019 4020 tstate = ahc->enabled_targets[i]; 4021 if (tstate != NULL) { 4022 #ifdef AHC_TARGET_MODE 4023 int j; 4024 4025 for (j = 0; j < AHC_NUM_LUNS; j++) { 4026 struct ahc_tmode_lstate *lstate; 4027 4028 lstate = tstate->enabled_luns[j]; 4029 if (lstate != NULL) { 4030 xpt_free_path(lstate->path); 4031 free(lstate, M_DEVBUF); 4032 } 4033 } 4034 #endif 4035 free(tstate, M_DEVBUF); 4036 } 4037 } 4038 #ifdef AHC_TARGET_MODE 4039 if (ahc->black_hole != NULL) { 4040 xpt_free_path(ahc->black_hole->path); 4041 free(ahc->black_hole, M_DEVBUF); 4042 } 4043 #endif 4044 if (ahc->name != NULL) 4045 free(ahc->name, M_DEVBUF); 4046 if (ahc->seep_config != NULL) 4047 free(ahc->seep_config, M_DEVBUF); 4048 #ifndef __FreeBSD__ 4049 free(ahc, M_DEVBUF); 4050 #endif 4051 return; 4052 } 4053 4054 void 4055 ahc_shutdown(void *arg) 4056 { 4057 struct ahc_softc *ahc; 4058 int i; 4059 4060 ahc = (struct ahc_softc *)arg; 4061 4062 /* This will reset most registers to 0, but not all */ 4063 ahc_reset(ahc, /*reinit*/FALSE); 4064 ahc_outb(ahc, SCSISEQ, 0); 4065 ahc_outb(ahc, SXFRCTL0, 0); 4066 ahc_outb(ahc, DSPCISTATUS, 0); 4067 4068 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 4069 ahc_outb(ahc, i, 0); 4070 } 4071 4072 /* 4073 * Reset the controller and record some information about it 4074 * that is only available just after a reset. If "reinit" is 4075 * non-zero, this reset occured after initial configuration 4076 * and the caller requests that the chip be fully reinitialized 4077 * to a runable state. Chip interrupts are *not* enabled after 4078 * a reinitialization. The caller must enable interrupts via 4079 * ahc_intr_enable(). 4080 */ 4081 int 4082 ahc_reset(struct ahc_softc *ahc, int reinit) 4083 { 4084 u_int sblkctl; 4085 u_int sxfrctl1_a, sxfrctl1_b; 4086 int error; 4087 int wait; 4088 4089 /* 4090 * Preserve the value of the SXFRCTL1 register for all channels. 4091 * It contains settings that affect termination and we don't want 4092 * to disturb the integrity of the bus. 4093 */ 4094 ahc_pause(ahc); 4095 sxfrctl1_b = 0; 4096 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4097 u_int sblkctl; 4098 4099 /* 4100 * Save channel B's settings in case this chip 4101 * is setup for TWIN channel operation. 4102 */ 4103 sblkctl = ahc_inb(ahc, SBLKCTL); 4104 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 4105 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 4106 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 4107 } 4108 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 4109 4110 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 4111 4112 /* 4113 * Ensure that the reset has finished. We delay 1000us 4114 * prior to reading the register to make sure the chip 4115 * has sufficiently completed its reset to handle register 4116 * accesses. 4117 */ 4118 wait = 1000; 4119 do { 4120 aic_delay(1000); 4121 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 4122 4123 if (wait == 0) { 4124 printf("%s: WARNING - Failed chip reset! " 4125 "Trying to initialize anyway.\n", ahc_name(ahc)); 4126 } 4127 ahc_outb(ahc, HCNTRL, ahc->pause); 4128 4129 /* Determine channel configuration */ 4130 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 4131 /* No Twin Channel PCI cards */ 4132 if ((ahc->chip & AHC_PCI) != 0) 4133 sblkctl &= ~SELBUSB; 4134 switch (sblkctl) { 4135 case 0: 4136 /* Single Narrow Channel */ 4137 break; 4138 case 2: 4139 /* Wide Channel */ 4140 ahc->features |= AHC_WIDE; 4141 break; 4142 case 8: 4143 /* Twin Channel */ 4144 ahc->features |= AHC_TWIN; 4145 break; 4146 default: 4147 printf(" Unsupported adapter type. Ignoring\n"); 4148 return(-1); 4149 } 4150 4151 /* 4152 * Reload sxfrctl1. 4153 * 4154 * We must always initialize STPWEN to 1 before we 4155 * restore the saved values. STPWEN is initialized 4156 * to a tri-state condition which can only be cleared 4157 * by turning it on. 4158 */ 4159 if ((ahc->features & AHC_TWIN) != 0) { 4160 u_int sblkctl; 4161 4162 sblkctl = ahc_inb(ahc, SBLKCTL); 4163 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 4164 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 4165 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 4166 } 4167 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 4168 4169 error = 0; 4170 if (reinit != 0) 4171 /* 4172 * If a recovery action has forced a chip reset, 4173 * re-initialize the chip to our liking. 4174 */ 4175 error = ahc->bus_chip_init(ahc); 4176 #ifdef AHC_DUMP_SEQ 4177 else 4178 ahc_dumpseq(ahc); 4179 #endif 4180 4181 return (error); 4182 } 4183 4184 /* 4185 * Determine the number of SCBs available on the controller 4186 */ 4187 int 4188 ahc_probe_scbs(struct ahc_softc *ahc) { 4189 int i; 4190 4191 for (i = 0; i < AHC_SCB_MAX; i++) { 4192 4193 ahc_outb(ahc, SCBPTR, i); 4194 ahc_outb(ahc, SCB_BASE, i); 4195 if (ahc_inb(ahc, SCB_BASE) != i) 4196 break; 4197 ahc_outb(ahc, SCBPTR, 0); 4198 if (ahc_inb(ahc, SCB_BASE) != 0) 4199 break; 4200 } 4201 return (i); 4202 } 4203 4204 static void 4205 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4206 { 4207 bus_addr_t *baddr; 4208 4209 baddr = (bus_addr_t *)arg; 4210 *baddr = segs->ds_addr; 4211 } 4212 4213 static void 4214 ahc_build_free_scb_list(struct ahc_softc *ahc) 4215 { 4216 int scbsize; 4217 int i; 4218 4219 scbsize = 32; 4220 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 4221 scbsize = 64; 4222 4223 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 4224 int j; 4225 4226 ahc_outb(ahc, SCBPTR, i); 4227 4228 /* 4229 * Touch all SCB bytes to avoid parity errors 4230 * should one of our debugging routines read 4231 * an otherwise uninitiatlized byte. 4232 */ 4233 for (j = 0; j < scbsize; j++) 4234 ahc_outb(ahc, SCB_BASE+j, 0xFF); 4235 4236 /* Clear the control byte. */ 4237 ahc_outb(ahc, SCB_CONTROL, 0); 4238 4239 /* Set the next pointer */ 4240 if ((ahc->flags & AHC_PAGESCBS) != 0) 4241 ahc_outb(ahc, SCB_NEXT, i+1); 4242 else 4243 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4244 4245 /* Make the tag number, SCSIID, and lun invalid */ 4246 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4247 ahc_outb(ahc, SCB_SCSIID, 0xFF); 4248 ahc_outb(ahc, SCB_LUN, 0xFF); 4249 } 4250 4251 if ((ahc->flags & AHC_PAGESCBS) != 0) { 4252 /* SCB 0 heads the free list. */ 4253 ahc_outb(ahc, FREE_SCBH, 0); 4254 } else { 4255 /* No free list. */ 4256 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 4257 } 4258 4259 /* Make sure that the last SCB terminates the free list */ 4260 ahc_outb(ahc, SCBPTR, i-1); 4261 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4262 } 4263 4264 static int 4265 ahc_init_scbdata(struct ahc_softc *ahc) 4266 { 4267 struct scb_data *scb_data; 4268 4269 scb_data = ahc->scb_data; 4270 SLIST_INIT(&scb_data->free_scbs); 4271 SLIST_INIT(&scb_data->sg_maps); 4272 4273 /* Allocate SCB resources */ 4274 scb_data->scbarray = 4275 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 4276 M_DEVBUF, M_NOWAIT); 4277 if (scb_data->scbarray == NULL) 4278 return (ENOMEM); 4279 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4280 4281 /* Determine the number of hardware SCBs and initialize them */ 4282 4283 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4284 if (ahc->scb_data->maxhscbs == 0) { 4285 printf("%s: No SCB space found\n", ahc_name(ahc)); 4286 return (ENXIO); 4287 } 4288 4289 /* 4290 * Create our DMA tags. These tags define the kinds of device 4291 * accessible memory allocations and memory mappings we will 4292 * need to perform during normal operation. 4293 * 4294 * Unless we need to further restrict the allocation, we rely 4295 * on the restrictions of the parent dmat, hence the common 4296 * use of MAXADDR and MAXSIZE. 4297 */ 4298 4299 /* DMA tag for our hardware scb structures */ 4300 if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4301 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4302 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4303 /*highaddr*/BUS_SPACE_MAXADDR, 4304 /*filter*/NULL, /*filterarg*/NULL, 4305 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4306 /*nsegments*/1, 4307 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4308 /*flags*/0, &scb_data->hscb_dmat) != 0) { 4309 goto error_exit; 4310 } 4311 4312 scb_data->init_level++; 4313 4314 /* Allocation for our hscbs */ 4315 if (aic_dmamem_alloc(ahc, scb_data->hscb_dmat, 4316 (void **)&scb_data->hscbs, 4317 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 4318 goto error_exit; 4319 } 4320 4321 scb_data->init_level++; 4322 4323 /* And permanently map them */ 4324 aic_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 4325 scb_data->hscbs, 4326 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4327 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 4328 4329 scb_data->init_level++; 4330 4331 /* DMA tag for our sense buffers */ 4332 if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4333 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4334 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4335 /*highaddr*/BUS_SPACE_MAXADDR, 4336 /*filter*/NULL, /*filterarg*/NULL, 4337 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4338 /*nsegments*/1, 4339 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4340 /*flags*/0, &scb_data->sense_dmat) != 0) { 4341 goto error_exit; 4342 } 4343 4344 scb_data->init_level++; 4345 4346 /* Allocate them */ 4347 if (aic_dmamem_alloc(ahc, scb_data->sense_dmat, 4348 (void **)&scb_data->sense, 4349 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 4350 goto error_exit; 4351 } 4352 4353 scb_data->init_level++; 4354 4355 /* And permanently map them */ 4356 aic_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 4357 scb_data->sense, 4358 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4359 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 4360 4361 scb_data->init_level++; 4362 4363 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 4364 if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, 4365 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4366 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4367 /*highaddr*/BUS_SPACE_MAXADDR, 4368 /*filter*/NULL, /*filterarg*/NULL, 4369 PAGE_SIZE, /*nsegments*/1, 4370 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4371 /*flags*/0, &scb_data->sg_dmat) != 0) { 4372 goto error_exit; 4373 } 4374 4375 scb_data->init_level++; 4376 4377 /* Perform initial CCB allocation */ 4378 memset(scb_data->hscbs, 0, 4379 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4380 ahc_alloc_scbs(ahc); 4381 4382 if (scb_data->numscbs == 0) { 4383 printf("%s: ahc_init_scbdata - " 4384 "Unable to allocate initial scbs\n", 4385 ahc_name(ahc)); 4386 goto error_exit; 4387 } 4388 4389 /* 4390 * Reserve the next queued SCB. 4391 */ 4392 ahc->next_queued_scb = ahc_get_scb(ahc); 4393 4394 /* 4395 * Note that we were successfull 4396 */ 4397 return (0); 4398 4399 error_exit: 4400 4401 return (ENOMEM); 4402 } 4403 4404 static void 4405 ahc_fini_scbdata(struct ahc_softc *ahc) 4406 { 4407 struct scb_data *scb_data; 4408 4409 scb_data = ahc->scb_data; 4410 if (scb_data == NULL) 4411 return; 4412 4413 switch (scb_data->init_level) { 4414 default: 4415 case 7: 4416 { 4417 struct sg_map_node *sg_map; 4418 4419 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4420 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4421 aic_dmamap_unload(ahc, scb_data->sg_dmat, 4422 sg_map->sg_dmamap); 4423 aic_dmamem_free(ahc, scb_data->sg_dmat, 4424 sg_map->sg_vaddr, 4425 sg_map->sg_dmamap); 4426 free(sg_map, M_DEVBUF); 4427 } 4428 aic_dma_tag_destroy(ahc, scb_data->sg_dmat); 4429 } 4430 case 6: 4431 aic_dmamap_unload(ahc, scb_data->sense_dmat, 4432 scb_data->sense_dmamap); 4433 case 5: 4434 aic_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 4435 scb_data->sense_dmamap); 4436 aic_dmamap_destroy(ahc, scb_data->sense_dmat, 4437 scb_data->sense_dmamap); 4438 case 4: 4439 aic_dma_tag_destroy(ahc, scb_data->sense_dmat); 4440 case 3: 4441 aic_dmamap_unload(ahc, scb_data->hscb_dmat, 4442 scb_data->hscb_dmamap); 4443 case 2: 4444 aic_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 4445 scb_data->hscb_dmamap); 4446 aic_dmamap_destroy(ahc, scb_data->hscb_dmat, 4447 scb_data->hscb_dmamap); 4448 case 1: 4449 aic_dma_tag_destroy(ahc, scb_data->hscb_dmat); 4450 break; 4451 case 0: 4452 break; 4453 } 4454 if (scb_data->scbarray != NULL) 4455 free(scb_data->scbarray, M_DEVBUF); 4456 } 4457 4458 void 4459 ahc_alloc_scbs(struct ahc_softc *ahc) 4460 { 4461 struct scb_data *scb_data; 4462 struct scb *next_scb; 4463 struct sg_map_node *sg_map; 4464 bus_addr_t physaddr; 4465 struct ahc_dma_seg *segs; 4466 int newcount; 4467 int i; 4468 4469 scb_data = ahc->scb_data; 4470 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4471 /* Can't allocate any more */ 4472 return; 4473 4474 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4475 4476 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4477 4478 if (sg_map == NULL) 4479 return; 4480 4481 /* Allocate S/G space for the next batch of SCBS */ 4482 if (aic_dmamem_alloc(ahc, scb_data->sg_dmat, 4483 (void **)&sg_map->sg_vaddr, 4484 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 4485 free(sg_map, M_DEVBUF); 4486 return; 4487 } 4488 4489 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4490 4491 aic_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 4492 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 4493 &sg_map->sg_physaddr, /*flags*/0); 4494 4495 segs = sg_map->sg_vaddr; 4496 physaddr = sg_map->sg_physaddr; 4497 4498 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4499 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4500 for (i = 0; i < newcount; i++) { 4501 struct scb_platform_data *pdata; 4502 #ifndef __linux__ 4503 int error; 4504 #endif 4505 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 4506 M_DEVBUF, M_NOWAIT); 4507 if (pdata == NULL) 4508 break; 4509 next_scb->platform_data = pdata; 4510 next_scb->sg_map = sg_map; 4511 next_scb->sg_list = segs; 4512 /* 4513 * The sequencer always starts with the second entry. 4514 * The first entry is embedded in the scb. 4515 */ 4516 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4517 next_scb->ahc_softc = ahc; 4518 next_scb->flags = SCB_FLAG_NONE; 4519 #ifndef __linux__ 4520 error = aic_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 4521 &next_scb->dmamap); 4522 if (error != 0) 4523 break; 4524 #endif 4525 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4526 next_scb->hscb->tag = ahc->scb_data->numscbs; 4527 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4528 next_scb, links.sle); 4529 segs += AHC_NSEG; 4530 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4531 next_scb++; 4532 ahc->scb_data->numscbs++; 4533 } 4534 } 4535 4536 void 4537 ahc_controller_info(struct ahc_softc *ahc, char *buf) 4538 { 4539 int len; 4540 4541 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4542 buf += len; 4543 if ((ahc->features & AHC_TWIN) != 0) 4544 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 4545 "B SCSI Id=%d, primary %c, ", 4546 ahc->our_id, ahc->our_id_b, 4547 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4548 else { 4549 const char *speed; 4550 const char *type; 4551 4552 speed = ""; 4553 if ((ahc->features & AHC_ULTRA) != 0) { 4554 speed = "Ultra "; 4555 } else if ((ahc->features & AHC_DT) != 0) { 4556 speed = "Ultra160 "; 4557 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4558 speed = "Ultra2 "; 4559 } 4560 if ((ahc->features & AHC_WIDE) != 0) { 4561 type = "Wide"; 4562 } else { 4563 type = "Single"; 4564 } 4565 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", 4566 speed, type, ahc->channel, ahc->our_id); 4567 } 4568 buf += len; 4569 4570 if ((ahc->flags & AHC_PAGESCBS) != 0) 4571 sprintf(buf, "%d/%d SCBs", 4572 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4573 else 4574 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 4575 } 4576 4577 int 4578 ahc_chip_init(struct ahc_softc *ahc) 4579 { 4580 int term; 4581 int error; 4582 u_int i; 4583 u_int scsi_conf; 4584 u_int scsiseq_template; 4585 uint32_t physaddr; 4586 4587 ahc_outb(ahc, SEQ_FLAGS, 0); 4588 ahc_outb(ahc, SEQ_FLAGS2, 0); 4589 4590 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4591 if (ahc->features & AHC_TWIN) { 4592 4593 /* 4594 * Setup Channel B first. 4595 */ 4596 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4597 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4598 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4599 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4600 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4601 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4602 if ((ahc->features & AHC_ULTRA2) != 0) 4603 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4604 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4605 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4606 4607 /* Select Channel A */ 4608 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4609 } 4610 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4611 if ((ahc->features & AHC_ULTRA2) != 0) 4612 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4613 else 4614 ahc_outb(ahc, SCSIID, ahc->our_id); 4615 scsi_conf = ahc_inb(ahc, SCSICONF); 4616 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4617 |term|ahc->seltime 4618 |ENSTIMER|ACTNEGEN); 4619 if ((ahc->features & AHC_ULTRA2) != 0) 4620 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4621 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4622 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4623 4624 /* There are no untagged SCBs active yet. */ 4625 for (i = 0; i < 16; i++) { 4626 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4627 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4628 int lun; 4629 4630 /* 4631 * The SCB based BTT allows an entry per 4632 * target and lun pair. 4633 */ 4634 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4635 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4636 } 4637 } 4638 4639 /* All of our queues are empty */ 4640 for (i = 0; i < 256; i++) 4641 ahc->qoutfifo[i] = SCB_LIST_NULL; 4642 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4643 4644 for (i = 0; i < 256; i++) 4645 ahc->qinfifo[i] = SCB_LIST_NULL; 4646 4647 if ((ahc->features & AHC_MULTI_TID) != 0) { 4648 ahc_outb(ahc, TARGID, 0); 4649 ahc_outb(ahc, TARGID + 1, 0); 4650 } 4651 4652 /* 4653 * Tell the sequencer where it can find our arrays in memory. 4654 */ 4655 physaddr = ahc->scb_data->hscb_busaddr; 4656 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4657 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4658 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4659 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4660 4661 physaddr = ahc->shared_data_busaddr; 4662 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4663 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4664 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4665 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4666 4667 /* 4668 * Initialize the group code to command length table. 4669 * This overrides the values in TARG_SCSIRATE, so only 4670 * setup the table after we have processed that information. 4671 */ 4672 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4673 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4674 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4675 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4676 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4677 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4678 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4679 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4680 4681 if ((ahc->features & AHC_HS_MAILBOX) != 0) 4682 ahc_outb(ahc, HS_MAILBOX, 0); 4683 4684 /* Tell the sequencer of our initial queue positions */ 4685 if ((ahc->features & AHC_TARGETMODE) != 0) { 4686 ahc->tqinfifonext = 1; 4687 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4688 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4689 } 4690 ahc->qinfifonext = 0; 4691 ahc->qoutfifonext = 0; 4692 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4693 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4694 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 4695 ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); 4696 ahc_outb(ahc, SDSCB_QOFF, 0); 4697 } else { 4698 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 4699 ahc_outb(ahc, QINPOS, ahc->qinfifonext); 4700 ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); 4701 } 4702 4703 /* We don't have any waiting selections */ 4704 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4705 4706 /* Our disconnection list is empty too */ 4707 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4708 4709 /* Message out buffer starts empty */ 4710 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4711 4712 /* 4713 * Setup the allowed SCSI Sequences based on operational mode. 4714 * If we are a target, we'll enalbe select in operations once 4715 * we've had a lun enabled. 4716 */ 4717 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4718 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4719 scsiseq_template |= ENRSELI; 4720 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4721 4722 /* Initialize our list of free SCBs. */ 4723 ahc_build_free_scb_list(ahc); 4724 4725 /* 4726 * Tell the sequencer which SCB will be the next one it receives. 4727 */ 4728 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4729 4730 /* 4731 * Load the Sequencer program and Enable the adapter 4732 * in "fast" mode. 4733 */ 4734 if (bootverbose) 4735 printf("%s: Downloading Sequencer Program...", 4736 ahc_name(ahc)); 4737 4738 error = ahc_loadseq(ahc); 4739 if (error != 0) 4740 return (error); 4741 4742 if ((ahc->features & AHC_ULTRA2) != 0) { 4743 int wait; 4744 4745 /* 4746 * Wait for up to 500ms for our transceivers 4747 * to settle. If the adapter does not have 4748 * a cable attached, the transceivers may 4749 * never settle, so don't complain if we 4750 * fail here. 4751 */ 4752 for (wait = 5000; 4753 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4754 wait--) 4755 aic_delay(100); 4756 } 4757 ahc_restart(ahc); 4758 return (0); 4759 } 4760 4761 /* 4762 * Start the board, ready for normal operation 4763 */ 4764 int 4765 ahc_init(struct ahc_softc *ahc) 4766 { 4767 int max_targ; 4768 int error; 4769 u_int i; 4770 u_int scsi_conf; 4771 u_int ultraenb; 4772 u_int discenable; 4773 u_int tagenable; 4774 size_t driver_data_size; 4775 4776 #ifdef AHC_DEBUG 4777 if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) 4778 ahc->flags |= AHC_SEQUENCER_DEBUG; 4779 #endif 4780 4781 #ifdef AHC_PRINT_SRAM 4782 printf("Scratch Ram:"); 4783 for (i = 0x20; i < 0x5f; i++) { 4784 if (((i % 8) == 0) && (i != 0)) { 4785 printf ("\n "); 4786 } 4787 printf (" 0x%x", ahc_inb(ahc, i)); 4788 } 4789 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4790 for (i = 0x70; i < 0x7f; i++) { 4791 if (((i % 8) == 0) && (i != 0)) { 4792 printf ("\n "); 4793 } 4794 printf (" 0x%x", ahc_inb(ahc, i)); 4795 } 4796 } 4797 printf ("\n"); 4798 /* 4799 * Reading uninitialized scratch ram may 4800 * generate parity errors. 4801 */ 4802 ahc_outb(ahc, CLRINT, CLRPARERR); 4803 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4804 #endif 4805 max_targ = 15; 4806 4807 /* 4808 * Assume we have a board at this stage and it has been reset. 4809 */ 4810 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4811 ahc->our_id = ahc->our_id_b = 7; 4812 4813 /* 4814 * Default to allowing initiator operations. 4815 */ 4816 ahc->flags |= AHC_INITIATORROLE; 4817 4818 /* 4819 * Only allow target mode features if this unit has them enabled. 4820 */ 4821 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4822 ahc->features &= ~AHC_TARGETMODE; 4823 4824 #ifndef __linux__ 4825 /* DMA tag for mapping buffers into device visible space. */ 4826 if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4827 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4828 /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING 4829 ? (bus_addr_t)0x7FFFFFFFFFULL 4830 : BUS_SPACE_MAXADDR_32BIT, 4831 /*highaddr*/BUS_SPACE_MAXADDR, 4832 /*filter*/NULL, /*filterarg*/NULL, 4833 /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, 4834 /*nsegments*/AHC_NSEG, 4835 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4836 /*flags*/BUS_DMA_ALLOCNOW, 4837 &ahc->buffer_dmat) != 0) { 4838 return (ENOMEM); 4839 } 4840 #endif 4841 4842 ahc->init_level++; 4843 4844 /* 4845 * DMA tag for our command fifos and other data in system memory 4846 * the card's sequencer must be able to access. For initiator 4847 * roles, we need to allocate space for the qinfifo and qoutfifo. 4848 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4849 * When providing for the target mode role, we must additionally 4850 * provide space for the incoming target command fifo and an extra 4851 * byte to deal with a dma bug in some chip versions. 4852 */ 4853 driver_data_size = 2 * 256 * sizeof(uint8_t); 4854 if ((ahc->features & AHC_TARGETMODE) != 0) 4855 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4856 + /*DMA WideOdd Bug Buffer*/1; 4857 if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4858 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4859 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4860 /*highaddr*/BUS_SPACE_MAXADDR, 4861 /*filter*/NULL, /*filterarg*/NULL, 4862 driver_data_size, 4863 /*nsegments*/1, 4864 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4865 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4866 return (ENOMEM); 4867 } 4868 4869 ahc->init_level++; 4870 4871 /* Allocation of driver data */ 4872 if (aic_dmamem_alloc(ahc, ahc->shared_data_dmat, 4873 (void **)&ahc->qoutfifo, 4874 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4875 return (ENOMEM); 4876 } 4877 4878 ahc->init_level++; 4879 4880 /* And permanently map it in */ 4881 aic_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 4882 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 4883 &ahc->shared_data_busaddr, /*flags*/0); 4884 4885 if ((ahc->features & AHC_TARGETMODE) != 0) { 4886 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4887 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4888 ahc->dma_bug_buf = ahc->shared_data_busaddr 4889 + driver_data_size - 1; 4890 /* All target command blocks start out invalid. */ 4891 for (i = 0; i < AHC_TMODE_CMDS; i++) 4892 ahc->targetcmds[i].cmd_valid = 0; 4893 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4894 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4895 } 4896 ahc->qinfifo = &ahc->qoutfifo[256]; 4897 4898 ahc->init_level++; 4899 4900 /* Allocate SCB data now that buffer_dmat is initialized */ 4901 if (ahc->scb_data->maxhscbs == 0) 4902 if (ahc_init_scbdata(ahc) != 0) 4903 return (ENOMEM); 4904 4905 /* 4906 * Allocate a tstate to house information for our 4907 * initiator presence on the bus as well as the user 4908 * data for any target mode initiator. 4909 */ 4910 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4911 printf("%s: unable to allocate ahc_tmode_tstate. " 4912 "Failing attach\n", ahc_name(ahc)); 4913 return (ENOMEM); 4914 } 4915 4916 if ((ahc->features & AHC_TWIN) != 0) { 4917 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4918 printf("%s: unable to allocate ahc_tmode_tstate. " 4919 "Failing attach\n", ahc_name(ahc)); 4920 return (ENOMEM); 4921 } 4922 } 4923 4924 /* 4925 * Fire up a recovery thread for this controller. 4926 */ 4927 error = ahc_spawn_recovery_thread(ahc); 4928 if (error != 0) 4929 return (error); 4930 4931 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4932 ahc->flags |= AHC_PAGESCBS; 4933 } else { 4934 ahc->flags &= ~AHC_PAGESCBS; 4935 } 4936 4937 #ifdef AHC_DEBUG 4938 if (ahc_debug & AHC_SHOW_MISC) { 4939 printf("%s: hardware scb %u bytes; kernel scb %u bytes; " 4940 "ahc_dma %u bytes\n", 4941 ahc_name(ahc), 4942 (u_int)sizeof(struct hardware_scb), 4943 (u_int)sizeof(struct scb), 4944 (u_int)sizeof(struct ahc_dma_seg)); 4945 } 4946 #endif /* AHC_DEBUG */ 4947 4948 /* 4949 * Look at the information that board initialization or 4950 * the board bios has left us. 4951 */ 4952 if (ahc->features & AHC_TWIN) { 4953 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4954 if ((scsi_conf & RESET_SCSI) != 0 4955 && (ahc->flags & AHC_INITIATORROLE) != 0) 4956 ahc->flags |= AHC_RESET_BUS_B; 4957 } 4958 4959 scsi_conf = ahc_inb(ahc, SCSICONF); 4960 if ((scsi_conf & RESET_SCSI) != 0 4961 && (ahc->flags & AHC_INITIATORROLE) != 0) 4962 ahc->flags |= AHC_RESET_BUS_A; 4963 4964 ultraenb = 0; 4965 tagenable = ALL_TARGETS_MASK; 4966 4967 /* Grab the disconnection disable table and invert it for our needs */ 4968 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4969 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4970 "device parameters\n", ahc_name(ahc)); 4971 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4972 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4973 discenable = ALL_TARGETS_MASK; 4974 if ((ahc->features & AHC_ULTRA) != 0) 4975 ultraenb = ALL_TARGETS_MASK; 4976 } else { 4977 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4978 | ahc_inb(ahc, DISC_DSB)); 4979 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4980 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4981 | ahc_inb(ahc, ULTRA_ENB); 4982 } 4983 4984 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4985 max_targ = 7; 4986 4987 for (i = 0; i <= max_targ; i++) { 4988 struct ahc_initiator_tinfo *tinfo; 4989 struct ahc_tmode_tstate *tstate; 4990 u_int our_id; 4991 u_int target_id; 4992 char channel; 4993 4994 channel = 'A'; 4995 our_id = ahc->our_id; 4996 target_id = i; 4997 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4998 channel = 'B'; 4999 our_id = ahc->our_id_b; 5000 target_id = i % 8; 5001 } 5002 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 5003 target_id, &tstate); 5004 /* Default to async narrow across the board */ 5005 memset(tinfo, 0, sizeof(*tinfo)); 5006 if (ahc->flags & AHC_USEDEFAULTS) { 5007 if ((ahc->features & AHC_WIDE) != 0) 5008 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 5009 5010 /* 5011 * These will be truncated when we determine the 5012 * connection type we have with the target. 5013 */ 5014 tinfo->user.period = ahc_syncrates->period; 5015 tinfo->user.offset = MAX_OFFSET; 5016 } else { 5017 u_int scsirate; 5018 uint16_t mask; 5019 5020 /* Take the settings leftover in scratch RAM. */ 5021 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 5022 mask = (0x01 << i); 5023 if ((ahc->features & AHC_ULTRA2) != 0) { 5024 u_int offset; 5025 u_int maxsync; 5026 5027 if ((scsirate & SOFS) == 0x0F) { 5028 /* 5029 * Haven't negotiated yet, 5030 * so the format is different. 5031 */ 5032 scsirate = (scsirate & SXFR) >> 4 5033 | (ultraenb & mask) 5034 ? 0x08 : 0x0 5035 | (scsirate & WIDEXFER); 5036 offset = MAX_OFFSET_ULTRA2; 5037 } else 5038 offset = ahc_inb(ahc, TARG_OFFSET + i); 5039 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 5040 /* Set to the lowest sync rate, 5MHz */ 5041 scsirate |= 0x1c; 5042 maxsync = AHC_SYNCRATE_ULTRA2; 5043 if ((ahc->features & AHC_DT) != 0) 5044 maxsync = AHC_SYNCRATE_DT; 5045 tinfo->user.period = 5046 ahc_find_period(ahc, scsirate, maxsync); 5047 if (offset == 0) 5048 tinfo->user.period = 0; 5049 else 5050 tinfo->user.offset = MAX_OFFSET; 5051 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 5052 && (ahc->features & AHC_DT) != 0) 5053 tinfo->user.ppr_options = 5054 MSG_EXT_PPR_DT_REQ; 5055 } else if ((scsirate & SOFS) != 0) { 5056 if ((scsirate & SXFR) == 0x40 5057 && (ultraenb & mask) != 0) { 5058 /* Treat 10MHz as a non-ultra speed */ 5059 scsirate &= ~SXFR; 5060 ultraenb &= ~mask; 5061 } 5062 tinfo->user.period = 5063 ahc_find_period(ahc, scsirate, 5064 (ultraenb & mask) 5065 ? AHC_SYNCRATE_ULTRA 5066 : AHC_SYNCRATE_FAST); 5067 if (tinfo->user.period != 0) 5068 tinfo->user.offset = MAX_OFFSET; 5069 } 5070 if (tinfo->user.period == 0) 5071 tinfo->user.offset = 0; 5072 if ((scsirate & WIDEXFER) != 0 5073 && (ahc->features & AHC_WIDE) != 0) 5074 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 5075 tinfo->user.protocol_version = 4; 5076 if ((ahc->features & AHC_DT) != 0) 5077 tinfo->user.transport_version = 3; 5078 else 5079 tinfo->user.transport_version = 2; 5080 tinfo->goal.protocol_version = 2; 5081 tinfo->goal.transport_version = 2; 5082 tinfo->curr.protocol_version = 2; 5083 tinfo->curr.transport_version = 2; 5084 } 5085 tstate->ultraenb = 0; 5086 } 5087 ahc->user_discenable = discenable; 5088 ahc->user_tagenable = tagenable; 5089 5090 return (ahc->bus_chip_init(ahc)); 5091 } 5092 5093 void 5094 ahc_intr_enable(struct ahc_softc *ahc, int enable) 5095 { 5096 u_int hcntrl; 5097 5098 hcntrl = ahc_inb(ahc, HCNTRL); 5099 hcntrl &= ~INTEN; 5100 ahc->pause &= ~INTEN; 5101 ahc->unpause &= ~INTEN; 5102 if (enable) { 5103 hcntrl |= INTEN; 5104 ahc->pause |= INTEN; 5105 ahc->unpause |= INTEN; 5106 } 5107 ahc_outb(ahc, HCNTRL, hcntrl); 5108 } 5109 5110 /* 5111 * Ensure that the card is paused in a location 5112 * outside of all critical sections and that all 5113 * pending work is completed prior to returning. 5114 * This routine should only be called from outside 5115 * an interrupt context. 5116 */ 5117 void 5118 ahc_pause_and_flushwork(struct ahc_softc *ahc) 5119 { 5120 int intstat; 5121 int maxloops; 5122 int paused; 5123 5124 maxloops = 1000; 5125 ahc->flags |= AHC_ALL_INTERRUPTS; 5126 paused = FALSE; 5127 do { 5128 if (paused) { 5129 ahc_unpause(ahc); 5130 /* 5131 * Give the sequencer some time to service 5132 * any active selections. 5133 */ 5134 aic_delay(500); 5135 } 5136 ahc_intr(ahc); 5137 ahc_pause(ahc); 5138 paused = TRUE; 5139 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 5140 intstat = ahc_inb(ahc, INTSTAT); 5141 if ((intstat & INT_PEND) == 0) { 5142 ahc_clear_critical_section(ahc); 5143 intstat = ahc_inb(ahc, INTSTAT); 5144 } 5145 } while (--maxloops 5146 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) 5147 && ((intstat & INT_PEND) != 0 5148 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); 5149 if (maxloops == 0) { 5150 printf("Infinite interrupt loop, INTSTAT = %x", 5151 ahc_inb(ahc, INTSTAT)); 5152 } 5153 ahc_platform_flushwork(ahc); 5154 ahc->flags &= ~AHC_ALL_INTERRUPTS; 5155 } 5156 5157 int 5158 ahc_suspend(struct ahc_softc *ahc) 5159 { 5160 5161 ahc_pause_and_flushwork(ahc); 5162 5163 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 5164 ahc_unpause(ahc); 5165 return (EBUSY); 5166 } 5167 5168 #ifdef AHC_TARGET_MODE 5169 /* 5170 * XXX What about ATIOs that have not yet been serviced? 5171 * Perhaps we should just refuse to be suspended if we 5172 * are acting in a target role. 5173 */ 5174 if (ahc->pending_device != NULL) { 5175 ahc_unpause(ahc); 5176 return (EBUSY); 5177 } 5178 #endif 5179 ahc_shutdown(ahc); 5180 return (0); 5181 } 5182 5183 int 5184 ahc_resume(struct ahc_softc *ahc) 5185 { 5186 5187 ahc_reset(ahc, /*reinit*/TRUE); 5188 ahc_intr_enable(ahc, TRUE); 5189 ahc_restart(ahc); 5190 return (0); 5191 } 5192 5193 /************************** Busy Target Table *********************************/ 5194 /* 5195 * Return the untagged transaction id for a given target/channel lun. 5196 * Optionally, clear the entry. 5197 */ 5198 u_int 5199 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5200 { 5201 u_int scbid; 5202 u_int target_offset; 5203 5204 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5205 u_int saved_scbptr; 5206 5207 saved_scbptr = ahc_inb(ahc, SCBPTR); 5208 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5209 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 5210 ahc_outb(ahc, SCBPTR, saved_scbptr); 5211 } else { 5212 target_offset = TCL_TARGET_OFFSET(tcl); 5213 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 5214 } 5215 5216 return (scbid); 5217 } 5218 5219 void 5220 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5221 { 5222 u_int target_offset; 5223 5224 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5225 u_int saved_scbptr; 5226 5227 saved_scbptr = ahc_inb(ahc, SCBPTR); 5228 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5229 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5230 ahc_outb(ahc, SCBPTR, saved_scbptr); 5231 } else { 5232 target_offset = TCL_TARGET_OFFSET(tcl); 5233 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5234 } 5235 } 5236 5237 void 5238 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5239 { 5240 u_int target_offset; 5241 5242 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5243 u_int saved_scbptr; 5244 5245 saved_scbptr = ahc_inb(ahc, SCBPTR); 5246 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5247 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5248 ahc_outb(ahc, SCBPTR, saved_scbptr); 5249 } else { 5250 target_offset = TCL_TARGET_OFFSET(tcl); 5251 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5252 } 5253 } 5254 5255 /************************** SCB and SCB queue management **********************/ 5256 int 5257 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5258 char channel, int lun, u_int tag, role_t role) 5259 { 5260 int targ = SCB_GET_TARGET(ahc, scb); 5261 char chan = SCB_GET_CHANNEL(ahc, scb); 5262 int slun = SCB_GET_LUN(scb); 5263 int match; 5264 5265 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5266 if (match != 0) 5267 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5268 if (match != 0) 5269 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5270 if (match != 0) { 5271 #ifdef AHC_TARGET_MODE 5272 int group; 5273 5274 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5275 if (role == ROLE_INITIATOR) { 5276 match = (group != XPT_FC_GROUP_TMODE) 5277 && ((tag == scb->hscb->tag) 5278 || (tag == SCB_LIST_NULL)); 5279 } else if (role == ROLE_TARGET) { 5280 match = (group == XPT_FC_GROUP_TMODE) 5281 && ((tag == scb->io_ctx->csio.tag_id) 5282 || (tag == SCB_LIST_NULL)); 5283 } 5284 #else /* !AHC_TARGET_MODE */ 5285 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5286 #endif /* AHC_TARGET_MODE */ 5287 } 5288 5289 return match; 5290 } 5291 5292 void 5293 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5294 { 5295 int target; 5296 char channel; 5297 int lun; 5298 5299 target = SCB_GET_TARGET(ahc, scb); 5300 lun = SCB_GET_LUN(scb); 5301 channel = SCB_GET_CHANNEL(ahc, scb); 5302 5303 ahc_search_qinfifo(ahc, target, channel, lun, 5304 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5305 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5306 5307 ahc_platform_freeze_devq(ahc, scb); 5308 } 5309 5310 void 5311 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5312 { 5313 struct scb *prev_scb; 5314 5315 prev_scb = NULL; 5316 if (ahc_qinfifo_count(ahc) != 0) { 5317 u_int prev_tag; 5318 uint8_t prev_pos; 5319 5320 prev_pos = ahc->qinfifonext - 1; 5321 prev_tag = ahc->qinfifo[prev_pos]; 5322 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5323 } 5324 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5325 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5326 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5327 } else { 5328 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5329 } 5330 } 5331 5332 static void 5333 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5334 struct scb *scb) 5335 { 5336 if (prev_scb == NULL) { 5337 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5338 } else { 5339 prev_scb->hscb->next = scb->hscb->tag; 5340 ahc_sync_scb(ahc, prev_scb, 5341 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5342 } 5343 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5344 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5345 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5346 } 5347 5348 static int 5349 ahc_qinfifo_count(struct ahc_softc *ahc) 5350 { 5351 uint8_t qinpos; 5352 uint8_t diff; 5353 5354 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5355 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5356 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5357 } else 5358 qinpos = ahc_inb(ahc, QINPOS); 5359 diff = ahc->qinfifonext - qinpos; 5360 return (diff); 5361 } 5362 5363 int 5364 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5365 int lun, u_int tag, role_t role, uint32_t status, 5366 ahc_search_action action) 5367 { 5368 struct scb *scb; 5369 struct scb *prev_scb; 5370 uint8_t qinstart; 5371 uint8_t qinpos; 5372 uint8_t qintail; 5373 uint8_t next; 5374 uint8_t prev; 5375 uint8_t curscbptr; 5376 int found; 5377 int have_qregs; 5378 5379 qintail = ahc->qinfifonext; 5380 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5381 if (have_qregs) { 5382 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5383 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5384 } else 5385 qinstart = ahc_inb(ahc, QINPOS); 5386 qinpos = qinstart; 5387 found = 0; 5388 prev_scb = NULL; 5389 5390 if (action == SEARCH_COMPLETE) { 5391 /* 5392 * Don't attempt to run any queued untagged transactions 5393 * until we are done with the abort process. 5394 */ 5395 ahc_freeze_untagged_queues(ahc); 5396 } 5397 5398 /* 5399 * Start with an empty queue. Entries that are not chosen 5400 * for removal will be re-added to the queue as we go. 5401 */ 5402 ahc->qinfifonext = qinpos; 5403 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5404 5405 while (qinpos != qintail) { 5406 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5407 if (scb == NULL) { 5408 printf("qinpos = %d, SCB index = %d\n", 5409 qinpos, ahc->qinfifo[qinpos]); 5410 panic("Loop 1\n"); 5411 } 5412 5413 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5414 /* 5415 * We found an scb that needs to be acted on. 5416 */ 5417 found++; 5418 switch (action) { 5419 case SEARCH_COMPLETE: 5420 { 5421 cam_status ostat; 5422 cam_status cstat; 5423 5424 ostat = aic_get_transaction_status(scb); 5425 if (ostat == CAM_REQ_INPROG) 5426 aic_set_transaction_status(scb, status); 5427 cstat = aic_get_transaction_status(scb); 5428 if (cstat != CAM_REQ_CMP) 5429 aic_freeze_scb(scb); 5430 if ((scb->flags & SCB_ACTIVE) == 0) 5431 printf("Inactive SCB in qinfifo\n"); 5432 ahc_done(ahc, scb); 5433 5434 /* FALLTHROUGH */ 5435 } 5436 case SEARCH_REMOVE: 5437 break; 5438 case SEARCH_COUNT: 5439 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5440 prev_scb = scb; 5441 break; 5442 } 5443 } else { 5444 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5445 prev_scb = scb; 5446 } 5447 qinpos++; 5448 } 5449 5450 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5451 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5452 } else { 5453 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5454 } 5455 5456 if (action != SEARCH_COUNT 5457 && (found != 0) 5458 && (qinstart != ahc->qinfifonext)) { 5459 /* 5460 * The sequencer may be in the process of dmaing 5461 * down the SCB at the beginning of the queue. 5462 * This could be problematic if either the first, 5463 * or the second SCB is removed from the queue 5464 * (the first SCB includes a pointer to the "next" 5465 * SCB to dma). If we have removed any entries, swap 5466 * the first element in the queue with the next HSCB 5467 * so the sequencer will notice that NEXT_QUEUED_SCB 5468 * has changed during its dma attempt and will retry 5469 * the DMA. 5470 */ 5471 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5472 5473 if (scb == NULL) { 5474 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5475 found, qinstart, ahc->qinfifonext); 5476 panic("First/Second Qinfifo fixup\n"); 5477 } 5478 /* 5479 * ahc_swap_with_next_hscb forces our next pointer to 5480 * point to the reserved SCB for future commands. Save 5481 * and restore our original next pointer to maintain 5482 * queue integrity. 5483 */ 5484 next = scb->hscb->next; 5485 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5486 ahc_swap_with_next_hscb(ahc, scb); 5487 scb->hscb->next = next; 5488 ahc->qinfifo[qinstart] = scb->hscb->tag; 5489 5490 /* Tell the card about the new head of the qinfifo. */ 5491 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5492 5493 /* Fixup the tail "next" pointer. */ 5494 qintail = ahc->qinfifonext - 1; 5495 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5496 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5497 } 5498 5499 /* 5500 * Search waiting for selection list. 5501 */ 5502 curscbptr = ahc_inb(ahc, SCBPTR); 5503 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5504 prev = SCB_LIST_NULL; 5505 5506 while (next != SCB_LIST_NULL) { 5507 uint8_t scb_index; 5508 5509 ahc_outb(ahc, SCBPTR, next); 5510 scb_index = ahc_inb(ahc, SCB_TAG); 5511 if (scb_index >= ahc->scb_data->numscbs) { 5512 printf("Waiting List inconsistency. " 5513 "SCB index == %d, yet numscbs == %d.", 5514 scb_index, ahc->scb_data->numscbs); 5515 ahc_dump_card_state(ahc); 5516 panic("for safety"); 5517 } 5518 scb = ahc_lookup_scb(ahc, scb_index); 5519 if (scb == NULL) { 5520 printf("scb_index = %d, next = %d\n", 5521 scb_index, next); 5522 panic("Waiting List traversal\n"); 5523 } 5524 if (ahc_match_scb(ahc, scb, target, channel, 5525 lun, SCB_LIST_NULL, role)) { 5526 /* 5527 * We found an scb that needs to be acted on. 5528 */ 5529 found++; 5530 switch (action) { 5531 case SEARCH_COMPLETE: 5532 { 5533 cam_status ostat; 5534 cam_status cstat; 5535 5536 ostat = aic_get_transaction_status(scb); 5537 if (ostat == CAM_REQ_INPROG) 5538 aic_set_transaction_status(scb, 5539 status); 5540 cstat = aic_get_transaction_status(scb); 5541 if (cstat != CAM_REQ_CMP) 5542 aic_freeze_scb(scb); 5543 if ((scb->flags & SCB_ACTIVE) == 0) 5544 printf("Inactive SCB in Wait List\n"); 5545 ahc_done(ahc, scb); 5546 /* FALLTHROUGH */ 5547 } 5548 case SEARCH_REMOVE: 5549 next = ahc_rem_wscb(ahc, next, prev); 5550 break; 5551 case SEARCH_COUNT: 5552 prev = next; 5553 next = ahc_inb(ahc, SCB_NEXT); 5554 break; 5555 } 5556 } else { 5557 5558 prev = next; 5559 next = ahc_inb(ahc, SCB_NEXT); 5560 } 5561 } 5562 ahc_outb(ahc, SCBPTR, curscbptr); 5563 5564 found += ahc_search_untagged_queues(ahc, /*aic_io_ctx_t*/NULL, target, 5565 channel, lun, status, action); 5566 5567 if (action == SEARCH_COMPLETE) 5568 ahc_release_untagged_queues(ahc); 5569 return (found); 5570 } 5571 5572 int 5573 ahc_search_untagged_queues(struct ahc_softc *ahc, aic_io_ctx_t ctx, 5574 int target, char channel, int lun, uint32_t status, 5575 ahc_search_action action) 5576 { 5577 struct scb *scb; 5578 int maxtarget; 5579 int found; 5580 int i; 5581 5582 if (action == SEARCH_COMPLETE) { 5583 /* 5584 * Don't attempt to run any queued untagged transactions 5585 * until we are done with the abort process. 5586 */ 5587 ahc_freeze_untagged_queues(ahc); 5588 } 5589 5590 found = 0; 5591 i = 0; 5592 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5593 5594 maxtarget = 16; 5595 if (target != CAM_TARGET_WILDCARD) { 5596 5597 i = target; 5598 if (channel == 'B') 5599 i += 8; 5600 maxtarget = i + 1; 5601 } 5602 } else { 5603 maxtarget = 0; 5604 } 5605 5606 for (; i < maxtarget; i++) { 5607 struct scb_tailq *untagged_q; 5608 struct scb *next_scb; 5609 5610 untagged_q = &(ahc->untagged_queues[i]); 5611 next_scb = TAILQ_FIRST(untagged_q); 5612 while (next_scb != NULL) { 5613 5614 scb = next_scb; 5615 next_scb = TAILQ_NEXT(scb, links.tqe); 5616 5617 /* 5618 * The head of the list may be the currently 5619 * active untagged command for a device. 5620 * We're only searching for commands that 5621 * have not been started. A transaction 5622 * marked active but still in the qinfifo 5623 * is removed by the qinfifo scanning code 5624 * above. 5625 */ 5626 if ((scb->flags & SCB_ACTIVE) != 0) 5627 continue; 5628 5629 if (ahc_match_scb(ahc, scb, target, channel, lun, 5630 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5631 || (ctx != NULL && ctx != scb->io_ctx)) 5632 continue; 5633 5634 /* 5635 * We found an scb that needs to be acted on. 5636 */ 5637 found++; 5638 switch (action) { 5639 case SEARCH_COMPLETE: 5640 { 5641 cam_status ostat; 5642 cam_status cstat; 5643 5644 ostat = aic_get_transaction_status(scb); 5645 if (ostat == CAM_REQ_INPROG) 5646 aic_set_transaction_status(scb, status); 5647 cstat = aic_get_transaction_status(scb); 5648 if (cstat != CAM_REQ_CMP) 5649 aic_freeze_scb(scb); 5650 ahc_done(ahc, scb); 5651 break; 5652 } 5653 case SEARCH_REMOVE: 5654 scb->flags &= ~SCB_UNTAGGEDQ; 5655 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5656 break; 5657 case SEARCH_COUNT: 5658 break; 5659 } 5660 } 5661 } 5662 5663 if (action == SEARCH_COMPLETE) 5664 ahc_release_untagged_queues(ahc); 5665 return (found); 5666 } 5667 5668 int 5669 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5670 int lun, u_int tag, int stop_on_first, int remove, 5671 int save_state) 5672 { 5673 struct scb *scbp; 5674 u_int next; 5675 u_int prev; 5676 u_int count; 5677 u_int active_scb; 5678 5679 count = 0; 5680 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5681 prev = SCB_LIST_NULL; 5682 5683 if (save_state) { 5684 /* restore this when we're done */ 5685 active_scb = ahc_inb(ahc, SCBPTR); 5686 } else 5687 /* Silence compiler */ 5688 active_scb = SCB_LIST_NULL; 5689 5690 while (next != SCB_LIST_NULL) { 5691 u_int scb_index; 5692 5693 ahc_outb(ahc, SCBPTR, next); 5694 scb_index = ahc_inb(ahc, SCB_TAG); 5695 if (scb_index >= ahc->scb_data->numscbs) { 5696 printf("Disconnected List inconsistency. " 5697 "SCB index == %d, yet numscbs == %d.", 5698 scb_index, ahc->scb_data->numscbs); 5699 ahc_dump_card_state(ahc); 5700 panic("for safety"); 5701 } 5702 5703 if (next == prev) { 5704 panic("Disconnected List Loop. " 5705 "cur SCBPTR == %x, prev SCBPTR == %x.", 5706 next, prev); 5707 } 5708 scbp = ahc_lookup_scb(ahc, scb_index); 5709 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5710 tag, ROLE_INITIATOR)) { 5711 count++; 5712 if (remove) { 5713 next = 5714 ahc_rem_scb_from_disc_list(ahc, prev, next); 5715 } else { 5716 prev = next; 5717 next = ahc_inb(ahc, SCB_NEXT); 5718 } 5719 if (stop_on_first) 5720 break; 5721 } else { 5722 prev = next; 5723 next = ahc_inb(ahc, SCB_NEXT); 5724 } 5725 } 5726 if (save_state) 5727 ahc_outb(ahc, SCBPTR, active_scb); 5728 return (count); 5729 } 5730 5731 /* 5732 * Remove an SCB from the on chip list of disconnected transactions. 5733 * This is empty/unused if we are not performing SCB paging. 5734 */ 5735 static u_int 5736 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5737 { 5738 u_int next; 5739 5740 ahc_outb(ahc, SCBPTR, scbptr); 5741 next = ahc_inb(ahc, SCB_NEXT); 5742 5743 ahc_outb(ahc, SCB_CONTROL, 0); 5744 5745 ahc_add_curscb_to_free_list(ahc); 5746 5747 if (prev != SCB_LIST_NULL) { 5748 ahc_outb(ahc, SCBPTR, prev); 5749 ahc_outb(ahc, SCB_NEXT, next); 5750 } else 5751 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5752 5753 return (next); 5754 } 5755 5756 /* 5757 * Add the SCB as selected by SCBPTR onto the on chip list of 5758 * free hardware SCBs. This list is empty/unused if we are not 5759 * performing SCB paging. 5760 */ 5761 static void 5762 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5763 { 5764 /* 5765 * Invalidate the tag so that our abort 5766 * routines don't think it's active. 5767 */ 5768 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5769 5770 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5771 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5772 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5773 } 5774 } 5775 5776 /* 5777 * Manipulate the waiting for selection list and return the 5778 * scb that follows the one that we remove. 5779 */ 5780 static u_int 5781 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5782 { 5783 u_int curscb, next; 5784 5785 /* 5786 * Select the SCB we want to abort and 5787 * pull the next pointer out of it. 5788 */ 5789 curscb = ahc_inb(ahc, SCBPTR); 5790 ahc_outb(ahc, SCBPTR, scbpos); 5791 next = ahc_inb(ahc, SCB_NEXT); 5792 5793 /* Clear the necessary fields */ 5794 ahc_outb(ahc, SCB_CONTROL, 0); 5795 5796 ahc_add_curscb_to_free_list(ahc); 5797 5798 /* update the waiting list */ 5799 if (prev == SCB_LIST_NULL) { 5800 /* First in the list */ 5801 ahc_outb(ahc, WAITING_SCBH, next); 5802 5803 /* 5804 * Ensure we aren't attempting to perform 5805 * selection for this entry. 5806 */ 5807 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5808 } else { 5809 /* 5810 * Select the scb that pointed to us 5811 * and update its next pointer. 5812 */ 5813 ahc_outb(ahc, SCBPTR, prev); 5814 ahc_outb(ahc, SCB_NEXT, next); 5815 } 5816 5817 /* 5818 * Point us back at the original scb position. 5819 */ 5820 ahc_outb(ahc, SCBPTR, curscb); 5821 return next; 5822 } 5823 5824 /******************************** Error Handling ******************************/ 5825 /* 5826 * Abort all SCBs that match the given description (target/channel/lun/tag), 5827 * setting their status to the passed in status if the status has not already 5828 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5829 * is paused before it is called. 5830 */ 5831 int 5832 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5833 int lun, u_int tag, role_t role, uint32_t status) 5834 { 5835 struct scb *scbp; 5836 struct scb *scbp_next; 5837 u_int active_scb; 5838 int i, j; 5839 int maxtarget; 5840 int minlun; 5841 int maxlun; 5842 5843 int found; 5844 5845 /* 5846 * Don't attempt to run any queued untagged transactions 5847 * until we are done with the abort process. 5848 */ 5849 ahc_freeze_untagged_queues(ahc); 5850 5851 /* restore this when we're done */ 5852 active_scb = ahc_inb(ahc, SCBPTR); 5853 5854 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5855 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5856 5857 /* 5858 * Clean out the busy target table for any untagged commands. 5859 */ 5860 i = 0; 5861 maxtarget = 16; 5862 if (target != CAM_TARGET_WILDCARD) { 5863 i = target; 5864 if (channel == 'B') 5865 i += 8; 5866 maxtarget = i + 1; 5867 } 5868 5869 if (lun == CAM_LUN_WILDCARD) { 5870 5871 /* 5872 * Unless we are using an SCB based 5873 * busy targets table, there is only 5874 * one table entry for all luns of 5875 * a target. 5876 */ 5877 minlun = 0; 5878 maxlun = 1; 5879 if ((ahc->flags & AHC_SCB_BTT) != 0) 5880 maxlun = AHC_NUM_LUNS; 5881 } else { 5882 minlun = lun; 5883 maxlun = lun + 1; 5884 } 5885 5886 if (role != ROLE_TARGET) { 5887 for (;i < maxtarget; i++) { 5888 for (j = minlun;j < maxlun; j++) { 5889 u_int scbid; 5890 u_int tcl; 5891 5892 tcl = BUILD_TCL(i << 4, j); 5893 scbid = ahc_index_busy_tcl(ahc, tcl); 5894 scbp = ahc_lookup_scb(ahc, scbid); 5895 if (scbp == NULL 5896 || ahc_match_scb(ahc, scbp, target, channel, 5897 lun, tag, role) == 0) 5898 continue; 5899 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5900 } 5901 } 5902 5903 /* 5904 * Go through the disconnected list and remove any entries we 5905 * have queued for completion, 0'ing their control byte too. 5906 * We save the active SCB and restore it ourselves, so there 5907 * is no reason for this search to restore it too. 5908 */ 5909 ahc_search_disc_list(ahc, target, channel, lun, tag, 5910 /*stop_on_first*/FALSE, /*remove*/TRUE, 5911 /*save_state*/FALSE); 5912 } 5913 5914 /* 5915 * Go through the hardware SCB array looking for commands that 5916 * were active but not on any list. In some cases, these remnants 5917 * might not still have mappings in the scbindex array (e.g. unexpected 5918 * bus free with the same scb queued for an abort). Don't hold this 5919 * against them. 5920 */ 5921 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5922 u_int scbid; 5923 5924 ahc_outb(ahc, SCBPTR, i); 5925 scbid = ahc_inb(ahc, SCB_TAG); 5926 scbp = ahc_lookup_scb(ahc, scbid); 5927 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5928 || (scbp != NULL 5929 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5930 ahc_add_curscb_to_free_list(ahc); 5931 } 5932 5933 /* 5934 * Go through the pending CCB list and look for 5935 * commands for this target that are still active. 5936 * These are other tagged commands that were 5937 * disconnected when the reset occurred. 5938 */ 5939 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5940 while (scbp_next != NULL) { 5941 scbp = scbp_next; 5942 scbp_next = LIST_NEXT(scbp, pending_links); 5943 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5944 cam_status ostat; 5945 5946 ostat = aic_get_transaction_status(scbp); 5947 if (ostat == CAM_REQ_INPROG) 5948 aic_set_transaction_status(scbp, status); 5949 if (aic_get_transaction_status(scbp) != CAM_REQ_CMP) 5950 aic_freeze_scb(scbp); 5951 if ((scbp->flags & SCB_ACTIVE) == 0) 5952 printf("Inactive SCB on pending list\n"); 5953 ahc_done(ahc, scbp); 5954 found++; 5955 } 5956 } 5957 ahc_outb(ahc, SCBPTR, active_scb); 5958 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5959 ahc_release_untagged_queues(ahc); 5960 return found; 5961 } 5962 5963 static void 5964 ahc_reset_current_bus(struct ahc_softc *ahc) 5965 { 5966 uint8_t scsiseq; 5967 5968 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5969 scsiseq = ahc_inb(ahc, SCSISEQ); 5970 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5971 ahc_flush_device_writes(ahc); 5972 aic_delay(AHC_BUSRESET_DELAY); 5973 /* Turn off the bus reset */ 5974 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5975 5976 ahc_clear_intstat(ahc); 5977 5978 /* Re-enable reset interrupts */ 5979 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5980 } 5981 5982 int 5983 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5984 { 5985 struct ahc_devinfo devinfo; 5986 u_int initiator, target, max_scsiid; 5987 u_int sblkctl; 5988 u_int scsiseq; 5989 u_int simode1; 5990 int found; 5991 int restart_needed; 5992 char cur_channel; 5993 5994 ahc->pending_device = NULL; 5995 5996 ahc_compile_devinfo(&devinfo, 5997 CAM_TARGET_WILDCARD, 5998 CAM_TARGET_WILDCARD, 5999 CAM_LUN_WILDCARD, 6000 channel, ROLE_UNKNOWN); 6001 ahc_pause(ahc); 6002 6003 /* Make sure the sequencer is in a safe location. */ 6004 ahc_clear_critical_section(ahc); 6005 6006 /* 6007 * Run our command complete fifos to ensure that we perform 6008 * completion processing on any commands that 'completed' 6009 * before the reset occurred. 6010 */ 6011 ahc_run_qoutfifo(ahc); 6012 #ifdef AHC_TARGET_MODE 6013 /* 6014 * XXX - In Twin mode, the tqinfifo may have commands 6015 * for an unaffected channel in it. However, if 6016 * we have run out of ATIO resources to drain that 6017 * queue, we may not get them all out here. Further, 6018 * the blocked transactions for the reset channel 6019 * should just be killed off, irrespecitve of whether 6020 * we are blocked on ATIO resources. Write a routine 6021 * to compact the tqinfifo appropriately. 6022 */ 6023 if ((ahc->flags & AHC_TARGETROLE) != 0) { 6024 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 6025 } 6026 #endif 6027 6028 /* 6029 * Reset the bus if we are initiating this reset 6030 */ 6031 sblkctl = ahc_inb(ahc, SBLKCTL); 6032 cur_channel = 'A'; 6033 if ((ahc->features & AHC_TWIN) != 0 6034 && ((sblkctl & SELBUSB) != 0)) 6035 cur_channel = 'B'; 6036 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6037 if (cur_channel != channel) { 6038 /* Case 1: Command for another bus is active 6039 * Stealthily reset the other bus without 6040 * upsetting the current bus. 6041 */ 6042 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 6043 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 6044 #ifdef AHC_TARGET_MODE 6045 /* 6046 * Bus resets clear ENSELI, so we cannot 6047 * defer re-enabling bus reset interrupts 6048 * if we are in target mode. 6049 */ 6050 if ((ahc->flags & AHC_TARGETROLE) != 0) 6051 simode1 |= ENSCSIRST; 6052 #endif 6053 ahc_outb(ahc, SIMODE1, simode1); 6054 if (initiate_reset) 6055 ahc_reset_current_bus(ahc); 6056 ahc_clear_intstat(ahc); 6057 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 6058 ahc_outb(ahc, SBLKCTL, sblkctl); 6059 restart_needed = FALSE; 6060 } else { 6061 /* Case 2: A command from this bus is active or we're idle */ 6062 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 6063 #ifdef AHC_TARGET_MODE 6064 /* 6065 * Bus resets clear ENSELI, so we cannot 6066 * defer re-enabling bus reset interrupts 6067 * if we are in target mode. 6068 */ 6069 if ((ahc->flags & AHC_TARGETROLE) != 0) 6070 simode1 |= ENSCSIRST; 6071 #endif 6072 ahc_outb(ahc, SIMODE1, simode1); 6073 if (initiate_reset) 6074 ahc_reset_current_bus(ahc); 6075 ahc_clear_intstat(ahc); 6076 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 6077 restart_needed = TRUE; 6078 } 6079 6080 /* 6081 * Clean up all the state information for the 6082 * pending transactions on this bus. 6083 */ 6084 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6085 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6086 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6087 6088 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6089 6090 #ifdef AHC_TARGET_MODE 6091 /* 6092 * Send an immediate notify ccb to all target more peripheral 6093 * drivers affected by this action. 6094 */ 6095 for (target = 0; target <= max_scsiid; target++) { 6096 struct ahc_tmode_tstate* tstate; 6097 u_int lun; 6098 6099 tstate = ahc->enabled_targets[target]; 6100 if (tstate == NULL) 6101 continue; 6102 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 6103 struct ahc_tmode_lstate* lstate; 6104 6105 lstate = tstate->enabled_luns[lun]; 6106 if (lstate == NULL) 6107 continue; 6108 6109 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6110 EVENT_TYPE_BUS_RESET, /*arg*/0); 6111 ahc_send_lstate_events(ahc, lstate); 6112 } 6113 } 6114 #endif 6115 /* Notify the XPT that a bus reset occurred */ 6116 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 6117 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 6118 6119 /* 6120 * Revert to async/narrow transfers until we renegotiate. 6121 */ 6122 for (target = 0; target <= max_scsiid; target++) { 6123 6124 if (ahc->enabled_targets[target] == NULL) 6125 continue; 6126 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6127 struct ahc_devinfo devinfo; 6128 6129 ahc_compile_devinfo(&devinfo, target, initiator, 6130 CAM_LUN_WILDCARD, 6131 channel, ROLE_UNKNOWN); 6132 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6133 AHC_TRANS_CUR, /*paused*/TRUE); 6134 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 6135 /*period*/0, /*offset*/0, 6136 /*ppr_options*/0, AHC_TRANS_CUR, 6137 /*paused*/TRUE); 6138 } 6139 } 6140 6141 if (restart_needed) 6142 ahc_restart(ahc); 6143 else 6144 ahc_unpause(ahc); 6145 return found; 6146 } 6147 6148 6149 /***************************** Residual Processing ****************************/ 6150 /* 6151 * Calculate the residual for a just completed SCB. 6152 */ 6153 void 6154 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6155 { 6156 struct hardware_scb *hscb; 6157 struct status_pkt *spkt; 6158 uint32_t sgptr; 6159 uint32_t resid_sgptr; 6160 uint32_t resid; 6161 6162 /* 6163 * 5 cases. 6164 * 1) No residual. 6165 * SG_RESID_VALID clear in sgptr. 6166 * 2) Transferless command 6167 * 3) Never performed any transfers. 6168 * sgptr has SG_FULL_RESID set. 6169 * 4) No residual but target did not 6170 * save data pointers after the 6171 * last transfer, so sgptr was 6172 * never updated. 6173 * 5) We have a partial residual. 6174 * Use residual_sgptr to determine 6175 * where we are. 6176 */ 6177 6178 hscb = scb->hscb; 6179 sgptr = aic_le32toh(hscb->sgptr); 6180 if ((sgptr & SG_RESID_VALID) == 0) 6181 /* Case 1 */ 6182 return; 6183 sgptr &= ~SG_RESID_VALID; 6184 6185 if ((sgptr & SG_LIST_NULL) != 0) 6186 /* Case 2 */ 6187 return; 6188 6189 spkt = &hscb->shared_data.status; 6190 resid_sgptr = aic_le32toh(spkt->residual_sg_ptr); 6191 if ((sgptr & SG_FULL_RESID) != 0) { 6192 /* Case 3 */ 6193 resid = aic_get_transfer_length(scb); 6194 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 6195 /* Case 4 */ 6196 return; 6197 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 6198 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 6199 } else { 6200 struct ahc_dma_seg *sg; 6201 6202 /* 6203 * Remainder of the SG where the transfer 6204 * stopped. 6205 */ 6206 resid = aic_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 6207 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 6208 6209 /* The residual sg_ptr always points to the next sg */ 6210 sg--; 6211 6212 /* 6213 * Add up the contents of all residual 6214 * SG segments that are after the SG where 6215 * the transfer stopped. 6216 */ 6217 while ((aic_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 6218 sg++; 6219 resid += aic_le32toh(sg->len) & AHC_SG_LEN_MASK; 6220 } 6221 } 6222 if ((scb->flags & SCB_SENSE) == 0) 6223 aic_set_residual(scb, resid); 6224 else 6225 aic_set_sense_residual(scb, resid); 6226 6227 #ifdef AHC_DEBUG 6228 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6229 ahc_print_path(ahc, scb); 6230 printf("Handled %sResidual of %d bytes\n", 6231 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 6232 } 6233 #endif 6234 } 6235 6236 /******************************* Target Mode **********************************/ 6237 #ifdef AHC_TARGET_MODE 6238 /* 6239 * Add a target mode event to this lun's queue 6240 */ 6241 static void 6242 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6243 u_int initiator_id, u_int event_type, u_int event_arg) 6244 { 6245 struct ahc_tmode_event *event; 6246 int pending; 6247 6248 xpt_freeze_devq(lstate->path, /*count*/1); 6249 if (lstate->event_w_idx >= lstate->event_r_idx) 6250 pending = lstate->event_w_idx - lstate->event_r_idx; 6251 else 6252 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6253 - (lstate->event_r_idx - lstate->event_w_idx); 6254 6255 if (event_type == EVENT_TYPE_BUS_RESET 6256 || event_type == MSG_BUS_DEV_RESET) { 6257 /* 6258 * Any earlier events are irrelevant, so reset our buffer. 6259 * This has the effect of allowing us to deal with reset 6260 * floods (an external device holding down the reset line) 6261 * without losing the event that is really interesting. 6262 */ 6263 lstate->event_r_idx = 0; 6264 lstate->event_w_idx = 0; 6265 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6266 } 6267 6268 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6269 xpt_print_path(lstate->path); 6270 printf("immediate event %x:%x lost\n", 6271 lstate->event_buffer[lstate->event_r_idx].event_type, 6272 lstate->event_buffer[lstate->event_r_idx].event_arg); 6273 lstate->event_r_idx++; 6274 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6275 lstate->event_r_idx = 0; 6276 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6277 } 6278 6279 event = &lstate->event_buffer[lstate->event_w_idx]; 6280 event->initiator_id = initiator_id; 6281 event->event_type = event_type; 6282 event->event_arg = event_arg; 6283 lstate->event_w_idx++; 6284 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6285 lstate->event_w_idx = 0; 6286 } 6287 6288 /* 6289 * Send any target mode events queued up waiting 6290 * for immediate notify resources. 6291 */ 6292 void 6293 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6294 { 6295 struct ccb_hdr *ccbh; 6296 struct ccb_immed_notify *inot; 6297 6298 while (lstate->event_r_idx != lstate->event_w_idx 6299 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6300 struct ahc_tmode_event *event; 6301 6302 event = &lstate->event_buffer[lstate->event_r_idx]; 6303 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6304 inot = (struct ccb_immed_notify *)ccbh; 6305 switch (event->event_type) { 6306 case EVENT_TYPE_BUS_RESET: 6307 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6308 break; 6309 default: 6310 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6311 inot->message_args[0] = event->event_type; 6312 inot->message_args[1] = event->event_arg; 6313 break; 6314 } 6315 inot->initiator_id = event->initiator_id; 6316 inot->sense_len = 0; 6317 xpt_done((union ccb *)inot); 6318 lstate->event_r_idx++; 6319 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6320 lstate->event_r_idx = 0; 6321 } 6322 } 6323 #endif 6324 6325 /******************** Sequencer Program Patching/Download *********************/ 6326 6327 #ifdef AHC_DUMP_SEQ 6328 void 6329 ahc_dumpseq(struct ahc_softc* ahc) 6330 { 6331 int i; 6332 6333 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6334 ahc_outb(ahc, SEQADDR0, 0); 6335 ahc_outb(ahc, SEQADDR1, 0); 6336 for (i = 0; i < ahc->instruction_ram_size; i++) { 6337 uint8_t ins_bytes[4]; 6338 6339 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6340 printf("0x%08x\n", ins_bytes[0] << 24 6341 | ins_bytes[1] << 16 6342 | ins_bytes[2] << 8 6343 | ins_bytes[3]); 6344 } 6345 } 6346 #endif 6347 6348 static int 6349 ahc_loadseq(struct ahc_softc *ahc) 6350 { 6351 struct cs cs_table[num_critical_sections]; 6352 u_int begin_set[num_critical_sections]; 6353 u_int end_set[num_critical_sections]; 6354 struct patch *cur_patch; 6355 u_int cs_count; 6356 u_int cur_cs; 6357 u_int i; 6358 u_int skip_addr; 6359 u_int sg_prefetch_cnt; 6360 int downloaded; 6361 uint8_t download_consts[7]; 6362 6363 /* 6364 * Start out with 0 critical sections 6365 * that apply to this firmware load. 6366 */ 6367 cs_count = 0; 6368 cur_cs = 0; 6369 memset(begin_set, 0, sizeof(begin_set)); 6370 memset(end_set, 0, sizeof(end_set)); 6371 6372 /* Setup downloadable constant table */ 6373 download_consts[QOUTFIFO_OFFSET] = 0; 6374 if (ahc->targetcmds != NULL) 6375 download_consts[QOUTFIFO_OFFSET] += 32; 6376 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6377 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6378 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6379 sg_prefetch_cnt = ahc->pci_cachesize; 6380 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6381 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6382 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6383 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6384 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6385 6386 cur_patch = patches; 6387 downloaded = 0; 6388 skip_addr = 0; 6389 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6390 ahc_outb(ahc, SEQADDR0, 0); 6391 ahc_outb(ahc, SEQADDR1, 0); 6392 6393 for (i = 0; i < sizeof(seqprog)/4; i++) { 6394 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6395 /* 6396 * Don't download this instruction as it 6397 * is in a patch that was removed. 6398 */ 6399 continue; 6400 } 6401 6402 if (downloaded == ahc->instruction_ram_size) { 6403 /* 6404 * We're about to exceed the instruction 6405 * storage capacity for this chip. Fail 6406 * the load. 6407 */ 6408 printf("\n%s: Program too large for instruction memory " 6409 "size of %d!\n", ahc_name(ahc), 6410 ahc->instruction_ram_size); 6411 return (ENOMEM); 6412 } 6413 6414 /* 6415 * Move through the CS table until we find a CS 6416 * that might apply to this instruction. 6417 */ 6418 for (; cur_cs < num_critical_sections; cur_cs++) { 6419 if (critical_sections[cur_cs].end <= i) { 6420 if (begin_set[cs_count] == TRUE 6421 && end_set[cs_count] == FALSE) { 6422 cs_table[cs_count].end = downloaded; 6423 end_set[cs_count] = TRUE; 6424 cs_count++; 6425 } 6426 continue; 6427 } 6428 if (critical_sections[cur_cs].begin <= i 6429 && begin_set[cs_count] == FALSE) { 6430 cs_table[cs_count].begin = downloaded; 6431 begin_set[cs_count] = TRUE; 6432 } 6433 break; 6434 } 6435 ahc_download_instr(ahc, i, download_consts); 6436 downloaded++; 6437 } 6438 6439 ahc->num_critical_sections = cs_count; 6440 if (cs_count != 0) { 6441 6442 cs_count *= sizeof(struct cs); 6443 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6444 if (ahc->critical_sections == NULL) 6445 panic("ahc_loadseq: Could not malloc"); 6446 memcpy(ahc->critical_sections, cs_table, cs_count); 6447 } 6448 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6449 6450 if (bootverbose) { 6451 printf(" %d instructions downloaded\n", downloaded); 6452 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 6453 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); 6454 } 6455 return (0); 6456 } 6457 6458 static int 6459 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6460 u_int start_instr, u_int *skip_addr) 6461 { 6462 struct patch *cur_patch; 6463 struct patch *last_patch; 6464 u_int num_patches; 6465 6466 num_patches = sizeof(patches)/sizeof(struct patch); 6467 last_patch = &patches[num_patches]; 6468 cur_patch = *start_patch; 6469 6470 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6471 6472 if (cur_patch->patch_func(ahc) == 0) { 6473 6474 /* Start rejecting code */ 6475 *skip_addr = start_instr + cur_patch->skip_instr; 6476 cur_patch += cur_patch->skip_patch; 6477 } else { 6478 /* Accepted this patch. Advance to the next 6479 * one and wait for our intruction pointer to 6480 * hit this point. 6481 */ 6482 cur_patch++; 6483 } 6484 } 6485 6486 *start_patch = cur_patch; 6487 if (start_instr < *skip_addr) 6488 /* Still skipping */ 6489 return (0); 6490 6491 return (1); 6492 } 6493 6494 static void 6495 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6496 { 6497 union ins_formats instr; 6498 struct ins_format1 *fmt1_ins; 6499 struct ins_format3 *fmt3_ins; 6500 u_int opcode; 6501 6502 /* 6503 * The firmware is always compiled into a little endian format. 6504 */ 6505 instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6506 6507 fmt1_ins = &instr.format1; 6508 fmt3_ins = NULL; 6509 6510 /* Pull the opcode */ 6511 opcode = instr.format1.opcode; 6512 switch (opcode) { 6513 case AIC_OP_JMP: 6514 case AIC_OP_JC: 6515 case AIC_OP_JNC: 6516 case AIC_OP_CALL: 6517 case AIC_OP_JNE: 6518 case AIC_OP_JNZ: 6519 case AIC_OP_JE: 6520 case AIC_OP_JZ: 6521 { 6522 struct patch *cur_patch; 6523 int address_offset; 6524 u_int address; 6525 u_int skip_addr; 6526 u_int i; 6527 6528 fmt3_ins = &instr.format3; 6529 address_offset = 0; 6530 address = fmt3_ins->address; 6531 cur_patch = patches; 6532 skip_addr = 0; 6533 6534 for (i = 0; i < address;) { 6535 6536 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6537 6538 if (skip_addr > i) { 6539 int end_addr; 6540 6541 end_addr = MIN(address, skip_addr); 6542 address_offset += end_addr - i; 6543 i = skip_addr; 6544 } else { 6545 i++; 6546 } 6547 } 6548 address -= address_offset; 6549 fmt3_ins->address = address; 6550 /* FALLTHROUGH */ 6551 } 6552 case AIC_OP_OR: 6553 case AIC_OP_AND: 6554 case AIC_OP_XOR: 6555 case AIC_OP_ADD: 6556 case AIC_OP_ADC: 6557 case AIC_OP_BMOV: 6558 if (fmt1_ins->parity != 0) { 6559 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6560 } 6561 fmt1_ins->parity = 0; 6562 if ((ahc->features & AHC_CMD_CHAN) == 0 6563 && opcode == AIC_OP_BMOV) { 6564 /* 6565 * Block move was added at the same time 6566 * as the command channel. Verify that 6567 * this is only a move of a single element 6568 * and convert the BMOV to a MOV 6569 * (AND with an immediate of FF). 6570 */ 6571 if (fmt1_ins->immediate != 1) 6572 panic("%s: BMOV not supported\n", 6573 ahc_name(ahc)); 6574 fmt1_ins->opcode = AIC_OP_AND; 6575 fmt1_ins->immediate = 0xff; 6576 } 6577 /* FALLTHROUGH */ 6578 case AIC_OP_ROL: 6579 if ((ahc->features & AHC_ULTRA2) != 0) { 6580 int i, count; 6581 6582 /* Calculate odd parity for the instruction */ 6583 for (i = 0, count = 0; i < 31; i++) { 6584 uint32_t mask; 6585 6586 mask = 0x01 << i; 6587 if ((instr.integer & mask) != 0) 6588 count++; 6589 } 6590 if ((count & 0x01) == 0) 6591 instr.format1.parity = 1; 6592 } else { 6593 /* Compress the instruction for older sequencers */ 6594 if (fmt3_ins != NULL) { 6595 instr.integer = 6596 fmt3_ins->immediate 6597 | (fmt3_ins->source << 8) 6598 | (fmt3_ins->address << 16) 6599 | (fmt3_ins->opcode << 25); 6600 } else { 6601 instr.integer = 6602 fmt1_ins->immediate 6603 | (fmt1_ins->source << 8) 6604 | (fmt1_ins->destination << 16) 6605 | (fmt1_ins->ret << 24) 6606 | (fmt1_ins->opcode << 25); 6607 } 6608 } 6609 /* The sequencer is a little endian cpu */ 6610 instr.integer = aic_htole32(instr.integer); 6611 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6612 break; 6613 default: 6614 panic("Unknown opcode encountered in seq program"); 6615 break; 6616 } 6617 } 6618 6619 int 6620 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 6621 const char *name, u_int address, u_int value, 6622 u_int *cur_column, u_int wrap_point) 6623 { 6624 int printed; 6625 u_int printed_mask; 6626 6627 if (cur_column != NULL && *cur_column >= wrap_point) { 6628 printf("\n"); 6629 *cur_column = 0; 6630 } 6631 printed = printf("%s[0x%x]", name, value); 6632 if (table == NULL) { 6633 printed += printf(" "); 6634 *cur_column += printed; 6635 return (printed); 6636 } 6637 printed_mask = 0; 6638 while (printed_mask != 0xFF) { 6639 int entry; 6640 6641 for (entry = 0; entry < num_entries; entry++) { 6642 if (((value & table[entry].mask) 6643 != table[entry].value) 6644 || ((printed_mask & table[entry].mask) 6645 == table[entry].mask)) 6646 continue; 6647 6648 printed += printf("%s%s", 6649 printed_mask == 0 ? ":(" : "|", 6650 table[entry].name); 6651 printed_mask |= table[entry].mask; 6652 6653 break; 6654 } 6655 if (entry >= num_entries) 6656 break; 6657 } 6658 if (printed_mask != 0) 6659 printed += printf(") "); 6660 else 6661 printed += printf(" "); 6662 if (cur_column != NULL) 6663 *cur_column += printed; 6664 return (printed); 6665 } 6666 6667 void 6668 ahc_dump_card_state(struct ahc_softc *ahc) 6669 { 6670 struct scb *scb; 6671 struct scb_tailq *untagged_q; 6672 u_int cur_col; 6673 int paused; 6674 int target; 6675 int maxtarget; 6676 int i; 6677 uint8_t last_phase; 6678 uint8_t qinpos; 6679 uint8_t qintail; 6680 uint8_t qoutpos; 6681 uint8_t scb_index; 6682 uint8_t saved_scbptr; 6683 6684 if (ahc_is_paused(ahc)) { 6685 paused = 1; 6686 } else { 6687 paused = 0; 6688 ahc_pause(ahc); 6689 } 6690 6691 saved_scbptr = ahc_inb(ahc, SCBPTR); 6692 last_phase = ahc_inb(ahc, LASTPHASE); 6693 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 6694 "%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6695 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6696 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6697 if (paused) 6698 printf("Card was paused\n"); 6699 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6700 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6701 ahc_inb(ahc, ARG_2)); 6702 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6703 ahc_inb(ahc, SCBPTR)); 6704 cur_col = 0; 6705 if ((ahc->features & AHC_DT) != 0) 6706 ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); 6707 ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); 6708 ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); 6709 ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); 6710 ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); 6711 ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); 6712 ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); 6713 ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); 6714 ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); 6715 ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); 6716 ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); 6717 ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); 6718 ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); 6719 ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); 6720 ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); 6721 ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); 6722 ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); 6723 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); 6724 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); 6725 if (cur_col != 0) 6726 printf("\n"); 6727 printf("STACK:"); 6728 for (i = 0; i < STACK_SIZE; i++) 6729 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); 6730 printf("\nSCB count = %d\n", ahc->scb_data->numscbs); 6731 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6732 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6733 /* QINFIFO */ 6734 printf("QINFIFO entries: "); 6735 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6736 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6737 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6738 } else 6739 qinpos = ahc_inb(ahc, QINPOS); 6740 qintail = ahc->qinfifonext; 6741 while (qinpos != qintail) { 6742 printf("%d ", ahc->qinfifo[qinpos]); 6743 qinpos++; 6744 } 6745 printf("\n"); 6746 6747 printf("Waiting Queue entries: "); 6748 scb_index = ahc_inb(ahc, WAITING_SCBH); 6749 i = 0; 6750 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6751 ahc_outb(ahc, SCBPTR, scb_index); 6752 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6753 scb_index = ahc_inb(ahc, SCB_NEXT); 6754 } 6755 printf("\n"); 6756 6757 printf("Disconnected Queue entries: "); 6758 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6759 i = 0; 6760 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6761 ahc_outb(ahc, SCBPTR, scb_index); 6762 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6763 scb_index = ahc_inb(ahc, SCB_NEXT); 6764 } 6765 printf("\n"); 6766 6767 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6768 printf("QOUTFIFO entries: "); 6769 qoutpos = ahc->qoutfifonext; 6770 i = 0; 6771 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6772 printf("%d ", ahc->qoutfifo[qoutpos]); 6773 qoutpos++; 6774 } 6775 printf("\n"); 6776 6777 printf("Sequencer Free SCB List: "); 6778 scb_index = ahc_inb(ahc, FREE_SCBH); 6779 i = 0; 6780 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6781 ahc_outb(ahc, SCBPTR, scb_index); 6782 printf("%d ", scb_index); 6783 scb_index = ahc_inb(ahc, SCB_NEXT); 6784 } 6785 printf("\n"); 6786 6787 printf("Sequencer SCB Info: "); 6788 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6789 ahc_outb(ahc, SCBPTR, i); 6790 cur_col = printf("\n%3d ", i); 6791 6792 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); 6793 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); 6794 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); 6795 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6796 } 6797 printf("\n"); 6798 6799 printf("Pending list: "); 6800 i = 0; 6801 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6802 if (i++ > 256) 6803 break; 6804 cur_col = printf("\n%3d ", scb->hscb->tag); 6805 ahc_scb_control_print(scb->hscb->control, &cur_col, 60); 6806 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); 6807 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); 6808 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6809 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6810 printf("("); 6811 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), 6812 &cur_col, 60); 6813 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6814 printf(")"); 6815 } 6816 } 6817 printf("\n"); 6818 6819 printf("Kernel Free SCB list: "); 6820 i = 0; 6821 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6822 if (i++ > 256) 6823 break; 6824 printf("%d ", scb->hscb->tag); 6825 } 6826 printf("\n"); 6827 6828 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6829 for (target = 0; target <= maxtarget; target++) { 6830 untagged_q = &ahc->untagged_queues[target]; 6831 if (TAILQ_FIRST(untagged_q) == NULL) 6832 continue; 6833 printf("Untagged Q(%d): ", target); 6834 i = 0; 6835 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6836 if (i++ > 256) 6837 break; 6838 printf("%d ", scb->hscb->tag); 6839 } 6840 printf("\n"); 6841 } 6842 6843 ahc_platform_dump_card_state(ahc); 6844 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 6845 ahc_outb(ahc, SCBPTR, saved_scbptr); 6846 if (paused == 0) 6847 ahc_unpause(ahc); 6848 } 6849 6850 /*************************** Timeout Handling *********************************/ 6851 void 6852 ahc_timeout(struct scb *scb) 6853 { 6854 struct ahc_softc *ahc; 6855 6856 ahc = scb->ahc_softc; 6857 if ((scb->flags & SCB_ACTIVE) != 0) { 6858 if ((scb->flags & SCB_TIMEDOUT) == 0) { 6859 LIST_INSERT_HEAD(&ahc->timedout_scbs, scb, 6860 timedout_links); 6861 scb->flags |= SCB_TIMEDOUT; 6862 } 6863 ahc_wakeup_recovery_thread(ahc); 6864 } 6865 } 6866 6867 /* 6868 * Re-schedule a timeout for the passed in SCB if we determine that some 6869 * other SCB is in the process of recovery or an SCB with a longer 6870 * timeout is still pending. Limit our search to just "other_scb" 6871 * if it is non-NULL. 6872 */ 6873 static int 6874 ahc_other_scb_timeout(struct ahc_softc *ahc, struct scb *scb, 6875 struct scb *other_scb) 6876 { 6877 u_int newtimeout; 6878 int found; 6879 6880 ahc_print_path(ahc, scb); 6881 printf("Other SCB Timeout%s", 6882 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 6883 ? " again\n" : "\n"); 6884 6885 newtimeout = aic_get_timeout(scb); 6886 scb->flags |= SCB_OTHERTCL_TIMEOUT; 6887 found = 0; 6888 if (other_scb != NULL) { 6889 if ((other_scb->flags 6890 & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0 6891 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) { 6892 found++; 6893 newtimeout = MAX(aic_get_timeout(other_scb), 6894 newtimeout); 6895 } 6896 } else { 6897 LIST_FOREACH(other_scb, &ahc->pending_scbs, pending_links) { 6898 if ((other_scb->flags 6899 & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0 6900 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) { 6901 found++; 6902 newtimeout = 6903 MAX(aic_get_timeout(other_scb), 6904 newtimeout); 6905 } 6906 } 6907 } 6908 6909 if (found != 0) 6910 aic_scb_timer_reset(scb, newtimeout); 6911 else { 6912 ahc_print_path(ahc, scb); 6913 printf("No other SCB worth waiting for...\n"); 6914 } 6915 6916 return (found != 0); 6917 } 6918 6919 /* 6920 * ahc_recover_commands determines if any of the commands that have currently 6921 * timedout are the root cause for this timeout. Innocent commands are given 6922 * a new timeout while we wait for the command executing on the bus to timeout. 6923 * This routine is invoked from a thread context so we are allowed to sleep. 6924 * Our lock is not held on entry. 6925 */ 6926 void 6927 ahc_recover_commands(struct ahc_softc *ahc) 6928 { 6929 struct scb *scb; 6930 long s; 6931 int found; 6932 int restart_needed; 6933 u_int last_phase; 6934 6935 ahc_lock(ahc, &s); 6936 6937 /* 6938 * Pause the controller and manually flush any 6939 * commands that have just completed but that our 6940 * interrupt handler has yet to see. 6941 */ 6942 ahc_pause_and_flushwork(ahc); 6943 6944 if (LIST_EMPTY(&ahc->timedout_scbs) != 0) { 6945 /* 6946 * The timedout commands have already 6947 * completed. This typically means 6948 * that either the timeout value was on 6949 * the hairy edge of what the device 6950 * requires or - more likely - interrupts 6951 * are not happening. 6952 */ 6953 printf("%s: Timedout SCBs already complete. " 6954 "Interrupts may not be functioning.\n", ahc_name(ahc)); 6955 ahc_unpause(ahc); 6956 ahc_unlock(ahc, &s); 6957 return; 6958 } 6959 6960 restart_needed = 0; 6961 printf("%s: Recovery Initiated\n", ahc_name(ahc)); 6962 ahc_dump_card_state(ahc); 6963 6964 last_phase = ahc_inb(ahc, LASTPHASE); 6965 while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) { 6966 u_int active_scb_index; 6967 u_int saved_scbptr; 6968 int target; 6969 int lun; 6970 int i; 6971 char channel; 6972 6973 target = SCB_GET_TARGET(ahc, scb); 6974 channel = SCB_GET_CHANNEL(ahc, scb); 6975 lun = SCB_GET_LUN(scb); 6976 6977 ahc_print_path(ahc, scb); 6978 printf("SCB 0x%x - timed out\n", scb->hscb->tag); 6979 if (scb->sg_count > 0) { 6980 for (i = 0; i < scb->sg_count; i++) { 6981 printf("sg[%d] - Addr 0x%x : Length %d\n", 6982 i, 6983 scb->sg_list[i].addr, 6984 scb->sg_list[i].len & AHC_SG_LEN_MASK); 6985 } 6986 } 6987 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 6988 /* 6989 * Been down this road before. 6990 * Do a full bus reset. 6991 */ 6992 aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); 6993 bus_reset: 6994 found = ahc_reset_channel(ahc, channel, 6995 /*Initiate Reset*/TRUE); 6996 printf("%s: Issued Channel %c Bus Reset. " 6997 "%d SCBs aborted\n", ahc_name(ahc), channel, 6998 found); 6999 continue; 7000 } 7001 7002 /* 7003 * Remove the command from the timedout list in 7004 * preparation for requeing it. 7005 */ 7006 LIST_REMOVE(scb, timedout_links); 7007 scb->flags &= ~SCB_TIMEDOUT; 7008 7009 /* 7010 * If we are a target, transition to bus free and report 7011 * the timeout. 7012 * 7013 * The target/initiator that is holding up the bus may not 7014 * be the same as the one that triggered this timeout 7015 * (different commands have different timeout lengths). 7016 * If the bus is idle and we are actiing as the initiator 7017 * for this request, queue a BDR message to the timed out 7018 * target. Otherwise, if the timed out transaction is 7019 * active: 7020 * Initiator transaction: 7021 * Stuff the message buffer with a BDR message and assert 7022 * ATN in the hopes that the target will let go of the bus 7023 * and go to the mesgout phase. If this fails, we'll 7024 * get another timeout 2 seconds later which will attempt 7025 * a bus reset. 7026 * 7027 * Target transaction: 7028 * Transition to BUS FREE and report the error. 7029 * It's good to be the target! 7030 */ 7031 saved_scbptr = ahc_inb(ahc, SCBPTR); 7032 active_scb_index = ahc_inb(ahc, SCB_TAG); 7033 7034 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 7035 && (active_scb_index < ahc->scb_data->numscbs)) { 7036 struct scb *active_scb; 7037 7038 /* 7039 * If the active SCB is not us, assume that 7040 * the active SCB has a longer timeout than 7041 * the timedout SCB, and wait for the active 7042 * SCB to timeout. 7043 */ 7044 active_scb = ahc_lookup_scb(ahc, active_scb_index); 7045 if (active_scb != scb) { 7046 if (ahc_other_scb_timeout(ahc, scb, 7047 active_scb) != 0) 7048 goto bus_reset; 7049 continue; 7050 } 7051 7052 /* It's us */ 7053 if ((scb->flags & SCB_TARGET_SCB) != 0) { 7054 7055 /* 7056 * Send back any queued up transactions 7057 * and properly record the error condition. 7058 */ 7059 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), 7060 SCB_GET_CHANNEL(ahc, scb), 7061 SCB_GET_LUN(scb), 7062 scb->hscb->tag, 7063 ROLE_TARGET, 7064 CAM_CMD_TIMEOUT); 7065 7066 /* Will clear us from the bus */ 7067 restart_needed = 1; 7068 break; 7069 } 7070 7071 ahc_set_recoveryscb(ahc, active_scb); 7072 ahc_outb(ahc, MSG_OUT, HOST_MSG); 7073 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 7074 ahc_print_path(ahc, active_scb); 7075 printf("BDR message in message buffer\n"); 7076 active_scb->flags |= SCB_DEVICE_RESET; 7077 aic_scb_timer_reset(scb, 2 * 1000000); 7078 } else if (last_phase != P_BUSFREE 7079 && (ahc_inb(ahc, SSTAT1) & REQINIT) == 0) { 7080 /* 7081 * SCB is not identified, there 7082 * is no pending REQ, and the sequencer 7083 * has not seen a busfree. Looks like 7084 * a stuck connection waiting to 7085 * go busfree. Reset the bus. 7086 */ 7087 printf("%s: Connection stuck awaiting busfree or " 7088 "Identify Msg.\n", ahc_name(ahc)); 7089 goto bus_reset; 7090 } else { 7091 int disconnected; 7092 7093 if (last_phase != P_BUSFREE 7094 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 7095 /* Hung target selection. Goto busfree */ 7096 printf("%s: Hung target selection\n", 7097 ahc_name(ahc)); 7098 restart_needed = 1; 7099 break; 7100 } 7101 7102 /* XXX Shouldn't panic. Just punt instead? */ 7103 if ((scb->flags & SCB_TARGET_SCB) != 0) 7104 panic("Timed-out target SCB but bus idle"); 7105 7106 if (ahc_search_qinfifo(ahc, target, channel, lun, 7107 scb->hscb->tag, ROLE_INITIATOR, 7108 /*status*/0, SEARCH_COUNT) > 0) { 7109 disconnected = FALSE; 7110 } else { 7111 disconnected = TRUE; 7112 } 7113 7114 if (disconnected) { 7115 7116 ahc_set_recoveryscb(ahc, scb); 7117 /* 7118 * Actually re-queue this SCB in an attempt 7119 * to select the device before it reconnects. 7120 * In either case (selection or reselection), 7121 * we will now issue a target reset to the 7122 * timed-out device. 7123 * 7124 * Set the MK_MESSAGE control bit indicating 7125 * that we desire to send a message. We 7126 * also set the disconnected flag since 7127 * in the paging case there is no guarantee 7128 * that our SCB control byte matches the 7129 * version on the card. We don't want the 7130 * sequencer to abort the command thinking 7131 * an unsolicited reselection occurred. 7132 */ 7133 scb->hscb->control |= MK_MESSAGE|DISCONNECTED; 7134 scb->flags |= SCB_DEVICE_RESET; 7135 7136 /* 7137 * Remove any cached copy of this SCB in the 7138 * disconnected list in preparation for the 7139 * queuing of our abort SCB. We use the 7140 * same element in the SCB, SCB_NEXT, for 7141 * both the qinfifo and the disconnected list. 7142 */ 7143 ahc_search_disc_list(ahc, target, channel, 7144 lun, scb->hscb->tag, 7145 /*stop_on_first*/TRUE, 7146 /*remove*/TRUE, 7147 /*save_state*/FALSE); 7148 7149 /* 7150 * In the non-paging case, the sequencer will 7151 * never re-reference the in-core SCB. 7152 * To make sure we are notified during 7153 * reslection, set the MK_MESSAGE flag in 7154 * the card's copy of the SCB. 7155 */ 7156 if ((ahc->flags & AHC_PAGESCBS) == 0) { 7157 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 7158 ahc_outb(ahc, SCB_CONTROL, 7159 ahc_inb(ahc, SCB_CONTROL) 7160 | MK_MESSAGE); 7161 } 7162 7163 /* 7164 * Clear out any entries in the QINFIFO first 7165 * so we are the next SCB for this target 7166 * to run. 7167 */ 7168 ahc_search_qinfifo(ahc, 7169 SCB_GET_TARGET(ahc, scb), 7170 channel, SCB_GET_LUN(scb), 7171 SCB_LIST_NULL, 7172 ROLE_INITIATOR, 7173 CAM_REQUEUE_REQ, 7174 SEARCH_COMPLETE); 7175 ahc_print_path(ahc, scb); 7176 printf("Queuing a BDR SCB\n"); 7177 ahc_qinfifo_requeue_tail(ahc, scb); 7178 ahc_outb(ahc, SCBPTR, saved_scbptr); 7179 aic_scb_timer_reset(scb, 2 * 1000000); 7180 } else { 7181 /* Go "immediatly" to the bus reset */ 7182 /* This shouldn't happen */ 7183 ahc_set_recoveryscb(ahc, scb); 7184 ahc_print_path(ahc, scb); 7185 printf("SCB %d: Immediate reset. " 7186 "Flags = 0x%x\n", scb->hscb->tag, 7187 scb->flags); 7188 goto bus_reset; 7189 } 7190 } 7191 break; 7192 } 7193 7194 /* 7195 * Any remaining SCBs were not the "culprit", so remove 7196 * them from the timeout list. The timer for these commands 7197 * will be reset once the recovery SCB completes. 7198 */ 7199 while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) { 7200 7201 LIST_REMOVE(scb, timedout_links); 7202 scb->flags &= ~SCB_TIMEDOUT; 7203 } 7204 7205 if (restart_needed) 7206 ahc_restart(ahc); 7207 else 7208 ahc_unpause(ahc); 7209 ahc_unlock(ahc, &s); 7210 } 7211 7212 /************************* Target Mode ****************************************/ 7213 #ifdef AHC_TARGET_MODE 7214 cam_status 7215 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 7216 struct ahc_tmode_tstate **tstate, 7217 struct ahc_tmode_lstate **lstate, 7218 int notfound_failure) 7219 { 7220 7221 if ((ahc->features & AHC_TARGETMODE) == 0) 7222 return (CAM_REQ_INVALID); 7223 7224 /* 7225 * Handle the 'black hole' device that sucks up 7226 * requests to unattached luns on enabled targets. 7227 */ 7228 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 7229 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 7230 *tstate = NULL; 7231 *lstate = ahc->black_hole; 7232 } else { 7233 u_int max_id; 7234 7235 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 7236 if (ccb->ccb_h.target_id > max_id) 7237 return (CAM_TID_INVALID); 7238 7239 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 7240 return (CAM_LUN_INVALID); 7241 7242 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 7243 *lstate = NULL; 7244 if (*tstate != NULL) 7245 *lstate = 7246 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 7247 } 7248 7249 if (notfound_failure != 0 && *lstate == NULL) 7250 return (CAM_PATH_INVALID); 7251 7252 return (CAM_REQ_CMP); 7253 } 7254 7255 void 7256 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 7257 { 7258 struct ahc_tmode_tstate *tstate; 7259 struct ahc_tmode_lstate *lstate; 7260 struct ccb_en_lun *cel; 7261 cam_status status; 7262 u_long s; 7263 u_int target; 7264 u_int lun; 7265 u_int target_mask; 7266 u_int our_id; 7267 int error; 7268 char channel; 7269 7270 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 7271 /*notfound_failure*/FALSE); 7272 7273 if (status != CAM_REQ_CMP) { 7274 ccb->ccb_h.status = status; 7275 return; 7276 } 7277 7278 if (cam_sim_bus(sim) == 0) 7279 our_id = ahc->our_id; 7280 else 7281 our_id = ahc->our_id_b; 7282 7283 if (ccb->ccb_h.target_id != our_id) { 7284 /* 7285 * our_id represents our initiator ID, or 7286 * the ID of the first target to have an 7287 * enabled lun in target mode. There are 7288 * two cases that may preclude enabling a 7289 * target id other than our_id. 7290 * 7291 * o our_id is for an active initiator role. 7292 * Since the hardware does not support 7293 * reselections to the initiator role at 7294 * anything other than our_id, and our_id 7295 * is used by the hardware to indicate the 7296 * ID to use for both select-out and 7297 * reselect-out operations, the only target 7298 * ID we can support in this mode is our_id. 7299 * 7300 * o The MULTARGID feature is not available and 7301 * a previous target mode ID has been enabled. 7302 */ 7303 if ((ahc->features & AHC_MULTIROLE) != 0) { 7304 7305 if ((ahc->features & AHC_MULTI_TID) != 0 7306 && (ahc->flags & AHC_INITIATORROLE) != 0) { 7307 /* 7308 * Only allow additional targets if 7309 * the initiator role is disabled. 7310 * The hardware cannot handle a re-select-in 7311 * on the initiator id during a re-select-out 7312 * on a different target id. 7313 */ 7314 status = CAM_TID_INVALID; 7315 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 7316 || ahc->enabled_luns > 0) { 7317 /* 7318 * Only allow our target id to change 7319 * if the initiator role is not configured 7320 * and there are no enabled luns which 7321 * are attached to the currently registered 7322 * scsi id. 7323 */ 7324 status = CAM_TID_INVALID; 7325 } 7326 } else if ((ahc->features & AHC_MULTI_TID) == 0 7327 && ahc->enabled_luns > 0) { 7328 7329 status = CAM_TID_INVALID; 7330 } 7331 } 7332 7333 if (status != CAM_REQ_CMP) { 7334 ccb->ccb_h.status = status; 7335 return; 7336 } 7337 7338 /* 7339 * We now have an id that is valid. 7340 * If we aren't in target mode, switch modes. 7341 */ 7342 if ((ahc->flags & AHC_TARGETROLE) == 0 7343 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 7344 u_long s; 7345 ahc_flag saved_flags; 7346 7347 printf("Configuring Target Mode\n"); 7348 ahc_lock(ahc, &s); 7349 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 7350 ccb->ccb_h.status = CAM_BUSY; 7351 ahc_unlock(ahc, &s); 7352 return; 7353 } 7354 saved_flags = ahc->flags; 7355 ahc->flags |= AHC_TARGETROLE; 7356 if ((ahc->features & AHC_MULTIROLE) == 0) 7357 ahc->flags &= ~AHC_INITIATORROLE; 7358 ahc_pause(ahc); 7359 error = ahc_loadseq(ahc); 7360 if (error != 0) { 7361 /* 7362 * Restore original configuration and notify 7363 * the caller that we cannot support target mode. 7364 * Since the adapter started out in this 7365 * configuration, the firmware load will succeed, 7366 * so there is no point in checking ahc_loadseq's 7367 * return value. 7368 */ 7369 ahc->flags = saved_flags; 7370 (void)ahc_loadseq(ahc); 7371 ahc_restart(ahc); 7372 ahc_unlock(ahc, &s); 7373 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 7374 return; 7375 } 7376 ahc_restart(ahc); 7377 ahc_unlock(ahc, &s); 7378 } 7379 cel = &ccb->cel; 7380 target = ccb->ccb_h.target_id; 7381 lun = ccb->ccb_h.target_lun; 7382 channel = SIM_CHANNEL(ahc, sim); 7383 target_mask = 0x01 << target; 7384 if (channel == 'B') 7385 target_mask <<= 8; 7386 7387 if (cel->enable != 0) { 7388 u_int scsiseq; 7389 7390 /* Are we already enabled?? */ 7391 if (lstate != NULL) { 7392 xpt_print_path(ccb->ccb_h.path); 7393 printf("Lun already enabled\n"); 7394 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 7395 return; 7396 } 7397 7398 if (cel->grp6_len != 0 7399 || cel->grp7_len != 0) { 7400 /* 7401 * Don't (yet?) support vendor 7402 * specific commands. 7403 */ 7404 ccb->ccb_h.status = CAM_REQ_INVALID; 7405 printf("Non-zero Group Codes\n"); 7406 return; 7407 } 7408 7409 /* 7410 * Seems to be okay. 7411 * Setup our data structures. 7412 */ 7413 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 7414 tstate = ahc_alloc_tstate(ahc, target, channel); 7415 if (tstate == NULL) { 7416 xpt_print_path(ccb->ccb_h.path); 7417 printf("Couldn't allocate tstate\n"); 7418 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7419 return; 7420 } 7421 } 7422 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 7423 if (lstate == NULL) { 7424 xpt_print_path(ccb->ccb_h.path); 7425 printf("Couldn't allocate lstate\n"); 7426 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7427 return; 7428 } 7429 memset(lstate, 0, sizeof(*lstate)); 7430 status = xpt_create_path(&lstate->path, /*periph*/NULL, 7431 xpt_path_path_id(ccb->ccb_h.path), 7432 xpt_path_target_id(ccb->ccb_h.path), 7433 xpt_path_lun_id(ccb->ccb_h.path)); 7434 if (status != CAM_REQ_CMP) { 7435 free(lstate, M_DEVBUF); 7436 xpt_print_path(ccb->ccb_h.path); 7437 printf("Couldn't allocate path\n"); 7438 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7439 return; 7440 } 7441 SLIST_INIT(&lstate->accept_tios); 7442 SLIST_INIT(&lstate->immed_notifies); 7443 ahc_lock(ahc, &s); 7444 ahc_pause(ahc); 7445 if (target != CAM_TARGET_WILDCARD) { 7446 tstate->enabled_luns[lun] = lstate; 7447 ahc->enabled_luns++; 7448 7449 if ((ahc->features & AHC_MULTI_TID) != 0) { 7450 u_int targid_mask; 7451 7452 targid_mask = ahc_inb(ahc, TARGID) 7453 | (ahc_inb(ahc, TARGID + 1) << 8); 7454 7455 targid_mask |= target_mask; 7456 ahc_outb(ahc, TARGID, targid_mask); 7457 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 7458 7459 ahc_update_scsiid(ahc, targid_mask); 7460 } else { 7461 u_int our_id; 7462 char channel; 7463 7464 channel = SIM_CHANNEL(ahc, sim); 7465 our_id = SIM_SCSI_ID(ahc, sim); 7466 7467 /* 7468 * This can only happen if selections 7469 * are not enabled 7470 */ 7471 if (target != our_id) { 7472 u_int sblkctl; 7473 char cur_channel; 7474 int swap; 7475 7476 sblkctl = ahc_inb(ahc, SBLKCTL); 7477 cur_channel = (sblkctl & SELBUSB) 7478 ? 'B' : 'A'; 7479 if ((ahc->features & AHC_TWIN) == 0) 7480 cur_channel = 'A'; 7481 swap = cur_channel != channel; 7482 if (channel == 'A') 7483 ahc->our_id = target; 7484 else 7485 ahc->our_id_b = target; 7486 7487 if (swap) 7488 ahc_outb(ahc, SBLKCTL, 7489 sblkctl ^ SELBUSB); 7490 7491 ahc_outb(ahc, SCSIID, target); 7492 7493 if (swap) 7494 ahc_outb(ahc, SBLKCTL, sblkctl); 7495 } 7496 } 7497 } else 7498 ahc->black_hole = lstate; 7499 /* Allow select-in operations */ 7500 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 7501 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7502 scsiseq |= ENSELI; 7503 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7504 scsiseq = ahc_inb(ahc, SCSISEQ); 7505 scsiseq |= ENSELI; 7506 ahc_outb(ahc, SCSISEQ, scsiseq); 7507 } 7508 ahc_unpause(ahc); 7509 ahc_unlock(ahc, &s); 7510 ccb->ccb_h.status = CAM_REQ_CMP; 7511 xpt_print_path(ccb->ccb_h.path); 7512 printf("Lun now enabled for target mode\n"); 7513 } else { 7514 struct scb *scb; 7515 int i, empty; 7516 7517 if (lstate == NULL) { 7518 ccb->ccb_h.status = CAM_LUN_INVALID; 7519 return; 7520 } 7521 7522 ahc_lock(ahc, &s); 7523 7524 ccb->ccb_h.status = CAM_REQ_CMP; 7525 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 7526 struct ccb_hdr *ccbh; 7527 7528 ccbh = &scb->io_ctx->ccb_h; 7529 if (ccbh->func_code == XPT_CONT_TARGET_IO 7530 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 7531 printf("CTIO pending\n"); 7532 ccb->ccb_h.status = CAM_REQ_INVALID; 7533 ahc_unlock(ahc, &s); 7534 return; 7535 } 7536 } 7537 7538 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 7539 printf("ATIOs pending\n"); 7540 ccb->ccb_h.status = CAM_REQ_INVALID; 7541 } 7542 7543 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 7544 printf("INOTs pending\n"); 7545 ccb->ccb_h.status = CAM_REQ_INVALID; 7546 } 7547 7548 if (ccb->ccb_h.status != CAM_REQ_CMP) { 7549 ahc_unlock(ahc, &s); 7550 return; 7551 } 7552 7553 xpt_print_path(ccb->ccb_h.path); 7554 printf("Target mode disabled\n"); 7555 xpt_free_path(lstate->path); 7556 free(lstate, M_DEVBUF); 7557 7558 ahc_pause(ahc); 7559 /* Can we clean up the target too? */ 7560 if (target != CAM_TARGET_WILDCARD) { 7561 tstate->enabled_luns[lun] = NULL; 7562 ahc->enabled_luns--; 7563 for (empty = 1, i = 0; i < 8; i++) 7564 if (tstate->enabled_luns[i] != NULL) { 7565 empty = 0; 7566 break; 7567 } 7568 7569 if (empty) { 7570 ahc_free_tstate(ahc, target, channel, 7571 /*force*/FALSE); 7572 if (ahc->features & AHC_MULTI_TID) { 7573 u_int targid_mask; 7574 7575 targid_mask = ahc_inb(ahc, TARGID) 7576 | (ahc_inb(ahc, TARGID + 1) 7577 << 8); 7578 7579 targid_mask &= ~target_mask; 7580 ahc_outb(ahc, TARGID, targid_mask); 7581 ahc_outb(ahc, TARGID+1, 7582 (targid_mask >> 8)); 7583 ahc_update_scsiid(ahc, targid_mask); 7584 } 7585 } 7586 } else { 7587 7588 ahc->black_hole = NULL; 7589 7590 /* 7591 * We can't allow selections without 7592 * our black hole device. 7593 */ 7594 empty = TRUE; 7595 } 7596 if (ahc->enabled_luns == 0) { 7597 /* Disallow select-in */ 7598 u_int scsiseq; 7599 7600 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7601 scsiseq &= ~ENSELI; 7602 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7603 scsiseq = ahc_inb(ahc, SCSISEQ); 7604 scsiseq &= ~ENSELI; 7605 ahc_outb(ahc, SCSISEQ, scsiseq); 7606 7607 if ((ahc->features & AHC_MULTIROLE) == 0) { 7608 printf("Configuring Initiator Mode\n"); 7609 ahc->flags &= ~AHC_TARGETROLE; 7610 ahc->flags |= AHC_INITIATORROLE; 7611 /* 7612 * Returning to a configuration that 7613 * fit previously will always succeed. 7614 */ 7615 (void)ahc_loadseq(ahc); 7616 ahc_restart(ahc); 7617 /* 7618 * Unpaused. The extra unpause 7619 * that follows is harmless. 7620 */ 7621 } 7622 } 7623 ahc_unpause(ahc); 7624 ahc_unlock(ahc, &s); 7625 } 7626 } 7627 7628 static void 7629 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 7630 { 7631 u_int scsiid_mask; 7632 u_int scsiid; 7633 7634 if ((ahc->features & AHC_MULTI_TID) == 0) 7635 panic("ahc_update_scsiid called on non-multitid unit\n"); 7636 7637 /* 7638 * Since we will rely on the TARGID mask 7639 * for selection enables, ensure that OID 7640 * in SCSIID is not set to some other ID 7641 * that we don't want to allow selections on. 7642 */ 7643 if ((ahc->features & AHC_ULTRA2) != 0) 7644 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 7645 else 7646 scsiid = ahc_inb(ahc, SCSIID); 7647 scsiid_mask = 0x1 << (scsiid & OID); 7648 if ((targid_mask & scsiid_mask) == 0) { 7649 u_int our_id; 7650 7651 /* ffs counts from 1 */ 7652 our_id = ffs(targid_mask); 7653 if (our_id == 0) 7654 our_id = ahc->our_id; 7655 else 7656 our_id--; 7657 scsiid &= TID; 7658 scsiid |= our_id; 7659 } 7660 if ((ahc->features & AHC_ULTRA2) != 0) 7661 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 7662 else 7663 ahc_outb(ahc, SCSIID, scsiid); 7664 } 7665 7666 void 7667 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7668 { 7669 struct target_cmd *cmd; 7670 7671 /* 7672 * If the card supports auto-access pause, 7673 * we can access the card directly regardless 7674 * of whether it is paused or not. 7675 */ 7676 if ((ahc->features & AHC_AUTOPAUSE) != 0) 7677 paused = TRUE; 7678 7679 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 7680 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 7681 7682 /* 7683 * Only advance through the queue if we 7684 * have the resources to process the command. 7685 */ 7686 if (ahc_handle_target_cmd(ahc, cmd) != 0) 7687 break; 7688 7689 cmd->cmd_valid = 0; 7690 aic_dmamap_sync(ahc, ahc->shared_data_dmat, 7691 ahc->shared_data_dmamap, 7692 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 7693 sizeof(struct target_cmd), 7694 BUS_DMASYNC_PREREAD); 7695 ahc->tqinfifonext++; 7696 7697 /* 7698 * Lazily update our position in the target mode incoming 7699 * command queue as seen by the sequencer. 7700 */ 7701 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 7702 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 7703 u_int hs_mailbox; 7704 7705 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 7706 hs_mailbox &= ~HOST_TQINPOS; 7707 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 7708 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 7709 } else { 7710 if (!paused) 7711 ahc_pause(ahc); 7712 ahc_outb(ahc, KERNEL_TQINPOS, 7713 ahc->tqinfifonext & HOST_TQINPOS); 7714 if (!paused) 7715 ahc_unpause(ahc); 7716 } 7717 } 7718 } 7719 } 7720 7721 static int 7722 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 7723 { 7724 struct ahc_tmode_tstate *tstate; 7725 struct ahc_tmode_lstate *lstate; 7726 struct ccb_accept_tio *atio; 7727 uint8_t *byte; 7728 int initiator; 7729 int target; 7730 int lun; 7731 7732 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7733 target = SCSIID_OUR_ID(cmd->scsiid); 7734 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7735 7736 byte = cmd->bytes; 7737 tstate = ahc->enabled_targets[target]; 7738 lstate = NULL; 7739 if (tstate != NULL) 7740 lstate = tstate->enabled_luns[lun]; 7741 7742 /* 7743 * Commands for disabled luns go to the black hole driver. 7744 */ 7745 if (lstate == NULL) 7746 lstate = ahc->black_hole; 7747 7748 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7749 if (atio == NULL) { 7750 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7751 /* 7752 * Wait for more ATIOs from the peripheral driver for this lun. 7753 */ 7754 if (bootverbose) 7755 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7756 return (1); 7757 } else 7758 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7759 #if 0 7760 printf("Incoming command from %d for %d:%d%s\n", 7761 initiator, target, lun, 7762 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7763 #endif 7764 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7765 7766 if (lstate == ahc->black_hole) { 7767 /* Fill in the wildcards */ 7768 atio->ccb_h.target_id = target; 7769 atio->ccb_h.target_lun = lun; 7770 } 7771 7772 /* 7773 * Package it up and send it off to 7774 * whomever has this lun enabled. 7775 */ 7776 atio->sense_len = 0; 7777 atio->init_id = initiator; 7778 if (byte[0] != 0xFF) { 7779 /* Tag was included */ 7780 atio->tag_action = *byte++; 7781 atio->tag_id = *byte++; 7782 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7783 } else { 7784 atio->ccb_h.flags = 0; 7785 } 7786 byte++; 7787 7788 /* Okay. Now determine the cdb size based on the command code */ 7789 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7790 case 0: 7791 atio->cdb_len = 6; 7792 break; 7793 case 1: 7794 case 2: 7795 atio->cdb_len = 10; 7796 break; 7797 case 4: 7798 atio->cdb_len = 16; 7799 break; 7800 case 5: 7801 atio->cdb_len = 12; 7802 break; 7803 case 3: 7804 default: 7805 /* Only copy the opcode. */ 7806 atio->cdb_len = 1; 7807 printf("Reserved or VU command code type encountered\n"); 7808 break; 7809 } 7810 7811 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7812 7813 atio->ccb_h.status |= CAM_CDB_RECVD; 7814 7815 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7816 /* 7817 * We weren't allowed to disconnect. 7818 * We're hanging on the bus until a 7819 * continue target I/O comes in response 7820 * to this accept tio. 7821 */ 7822 #if 0 7823 printf("Received Immediate Command %d:%d:%d - %p\n", 7824 initiator, target, lun, ahc->pending_device); 7825 #endif 7826 ahc->pending_device = lstate; 7827 aic_freeze_ccb((union ccb *)atio); 7828 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7829 } 7830 xpt_done((union ccb*)atio); 7831 return (0); 7832 } 7833 7834 #endif 7835