1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU Public License ("GPL"). 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $Id: //depot/src/aic7xxx/aic7xxx.c#39 $ 32 * 33 * $FreeBSD$ 34 */ 35 36 #include <dev/aic7xxx/aic7xxx_freebsd.h> 37 #include <dev/aic7xxx/aic7xxx_inline.h> 38 #include <dev/aic7xxx/aicasm/aicasm_insformat.h> 39 40 /****************************** Softc Data ************************************/ 41 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 42 43 /***************************** Lookup Tables **********************************/ 44 char *ahc_chip_names[] = 45 { 46 "NONE", 47 "aic7770", 48 "aic7850", 49 "aic7855", 50 "aic7859", 51 "aic7860", 52 "aic7870", 53 "aic7880", 54 "aic7895", 55 "aic7895C", 56 "aic7890/91", 57 "aic7896/97", 58 "aic7892", 59 "aic7899" 60 }; 61 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 62 63 /* 64 * Hardware error codes. 65 */ 66 struct ahc_hard_error_entry { 67 uint8_t errno; 68 char *errmesg; 69 }; 70 71 static struct ahc_hard_error_entry ahc_hard_errors[] = { 72 { ILLHADDR, "Illegal Host Access" }, 73 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 74 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 75 { SQPARERR, "Sequencer Parity Error" }, 76 { DPARERR, "Data-path Parity Error" }, 77 { MPARERR, "Scratch or SCB Memory Parity Error" }, 78 { PCIERRSTAT, "PCI Error detected" }, 79 { CIOPARERR, "CIOBUS Parity Error" }, 80 }; 81 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 82 83 static struct ahc_phase_table_entry ahc_phase_table[] = 84 { 85 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 86 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 87 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 88 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 89 { P_COMMAND, MSG_NOOP, "in Command phase" }, 90 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 91 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 92 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 93 { P_BUSFREE, MSG_NOOP, "while idle" }, 94 { 0, MSG_NOOP, "in unknown phase" } 95 }; 96 97 /* 98 * In most cases we only wish to itterate over real phases, so 99 * exclude the last element from the count. 100 */ 101 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 102 103 /* 104 * Valid SCSIRATE values. (p. 3-17) 105 * Provides a mapping of tranfer periods in ns to the proper value to 106 * stick in the scsixfer reg. 107 */ 108 static struct ahc_syncrate ahc_syncrates[] = 109 { 110 /* ultra2 fast/ultra period rate */ 111 { 0x42, 0x000, 9, "80.0" }, 112 { 0x03, 0x000, 10, "40.0" }, 113 { 0x04, 0x000, 11, "33.0" }, 114 { 0x05, 0x100, 12, "20.0" }, 115 { 0x06, 0x110, 15, "16.0" }, 116 { 0x07, 0x120, 18, "13.4" }, 117 { 0x08, 0x000, 25, "10.0" }, 118 { 0x19, 0x010, 31, "8.0" }, 119 { 0x1a, 0x020, 37, "6.67" }, 120 { 0x1b, 0x030, 43, "5.7" }, 121 { 0x1c, 0x040, 50, "5.0" }, 122 { 0x00, 0x050, 56, "4.4" }, 123 { 0x00, 0x060, 62, "4.0" }, 124 { 0x00, 0x070, 68, "3.6" }, 125 { 0x00, 0x000, 0, NULL } 126 }; 127 128 /* Our Sequencer Program */ 129 #include "aic7xxx_seq.h" 130 131 /**************************** Function Declarations ***************************/ 132 static struct ahc_tmode_tstate* 133 ahc_alloc_tstate(struct ahc_softc *ahc, 134 u_int scsi_id, char channel); 135 #ifdef AHC_TARGET_MODE 136 static void ahc_free_tstate(struct ahc_softc *ahc, 137 u_int scsi_id, char channel, int force); 138 #endif 139 static struct ahc_syncrate* 140 ahc_devlimited_syncrate(struct ahc_softc *ahc, 141 struct ahc_initiator_tinfo *, 142 u_int *period, 143 u_int *ppr_options, 144 role_t role); 145 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 146 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 147 struct ahc_devinfo *devinfo); 148 static void ahc_scb_devinfo(struct ahc_softc *ahc, 149 struct ahc_devinfo *devinfo, 150 struct scb *scb); 151 static void ahc_assert_atn(struct ahc_softc *ahc); 152 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 153 struct ahc_devinfo *devinfo, 154 struct scb *scb); 155 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 156 struct ahc_devinfo *devinfo); 157 static void ahc_construct_sdtr(struct ahc_softc *ahc, 158 struct ahc_devinfo *devinfo, 159 u_int period, u_int offset); 160 static void ahc_construct_wdtr(struct ahc_softc *ahc, 161 struct ahc_devinfo *devinfo, 162 u_int bus_width); 163 static void ahc_construct_ppr(struct ahc_softc *ahc, 164 struct ahc_devinfo *devinfo, 165 u_int period, u_int offset, 166 u_int bus_width, u_int ppr_options); 167 static void ahc_clear_msg_state(struct ahc_softc *ahc); 168 static void ahc_handle_message_phase(struct ahc_softc *ahc); 169 typedef enum { 170 AHCMSG_1B, 171 AHCMSG_2B, 172 AHCMSG_EXT 173 } ahc_msgtype; 174 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 175 u_int msgval, int full); 176 static int ahc_parse_msg(struct ahc_softc *ahc, 177 struct ahc_devinfo *devinfo); 178 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 179 struct ahc_devinfo *devinfo); 180 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 181 struct ahc_devinfo *devinfo); 182 static void ahc_handle_devreset(struct ahc_softc *ahc, 183 struct ahc_devinfo *devinfo, 184 cam_status status, char *message, 185 int verbose_level); 186 #if AHC_TARGET_MODE 187 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 188 struct ahc_devinfo *devinfo, 189 struct scb *scb); 190 #endif 191 192 static bus_dmamap_callback_t ahc_dmamap_cb; 193 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 194 static int ahc_init_scbdata(struct ahc_softc *ahc); 195 static void ahc_fini_scbdata(struct ahc_softc *ahc); 196 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 197 struct scb *prev_scb, 198 struct scb *scb); 199 static int ahc_qinfifo_count(struct ahc_softc *ahc); 200 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 201 u_int prev, u_int scbptr); 202 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 203 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 204 u_int scbpos, u_int prev); 205 static int ahc_abort_scbs(struct ahc_softc *ahc, int target, 206 char channel, int lun, u_int tag, 207 role_t role, uint32_t status); 208 static void ahc_reset_current_bus(struct ahc_softc *ahc); 209 #ifdef AHC_DUMP_SEQ 210 static void ahc_dumpseq(struct ahc_softc *ahc); 211 #endif 212 static void ahc_loadseq(struct ahc_softc *ahc); 213 static int ahc_check_patch(struct ahc_softc *ahc, 214 struct patch **start_patch, 215 u_int start_instr, u_int *skip_addr); 216 static void ahc_download_instr(struct ahc_softc *ahc, 217 u_int instrptr, uint8_t *dconsts); 218 #ifdef AHC_TARGET_MODE 219 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 220 struct ahc_tmode_lstate *lstate, 221 u_int initiator_id, 222 u_int event_type, 223 u_int event_arg); 224 static void ahc_update_scsiid(struct ahc_softc *ahc, 225 u_int targid_mask); 226 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 227 struct target_cmd *cmd); 228 #endif 229 /************************* Sequencer Execution Control ************************/ 230 /* 231 * Restart the sequencer program from address zero 232 */ 233 void 234 ahc_restart(struct ahc_softc *ahc) 235 { 236 237 ahc_pause(ahc); 238 239 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 240 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 241 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 242 243 /* 244 * Ensure that the sequencer's idea of TQINPOS 245 * matches our own. The sequencer increments TQINPOS 246 * only after it sees a DMA complete and a reset could 247 * occur before the increment leaving the kernel to believe 248 * the command arrived but the sequencer to not. 249 */ 250 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 251 252 /* Always allow reselection */ 253 ahc_outb(ahc, SCSISEQ, 254 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 255 if ((ahc->features & AHC_CMD_CHAN) != 0) { 256 /* Ensure that no DMA operations are in progress */ 257 ahc_outb(ahc, CCSCBCNT, 0); 258 ahc_outb(ahc, CCSGCTL, 0); 259 ahc_outb(ahc, CCSCBCTL, 0); 260 } 261 /* 262 * If we were in the process of DMA'ing SCB data into 263 * an SCB, replace that SCB on the free list. This prevents 264 * an SCB leak. 265 */ 266 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 267 ahc_add_curscb_to_free_list(ahc); 268 ahc_outb(ahc, SEQ_FLAGS2, 269 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 270 } 271 ahc_outb(ahc, MWI_RESIDUAL, 0); 272 ahc_outb(ahc, SEQCTL, FASTMODE); 273 ahc_outb(ahc, SEQADDR0, 0); 274 ahc_outb(ahc, SEQADDR1, 0); 275 ahc_unpause(ahc); 276 } 277 278 /************************* Input/Output Queues ********************************/ 279 void 280 ahc_run_qoutfifo(struct ahc_softc *ahc) 281 { 282 struct scb *scb; 283 u_int scb_index; 284 285 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 286 287 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 288 if ((ahc->qoutfifonext & 0x03) == 0x03) { 289 u_int modnext; 290 291 /* 292 * Clear 32bits of QOUTFIFO at a time 293 * so that we don't clobber an incoming 294 * byte DMA to the array on architectures 295 * that only support 32bit load and store 296 * operations. 297 */ 298 modnext = ahc->qoutfifonext & ~0x3; 299 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 300 } 301 ahc->qoutfifonext++; 302 303 scb = ahc_lookup_scb(ahc, scb_index); 304 if (scb == NULL) { 305 printf("%s: WARNING no command for scb %d " 306 "(cmdcmplt)\nQOUTPOS = %d\n", 307 ahc_name(ahc), scb_index, 308 ahc->qoutfifonext - 1); 309 continue; 310 } 311 312 /* 313 * Save off the residual 314 * if there is one. 315 */ 316 ahc_update_residual(scb); 317 ahc_done(ahc, scb); 318 } 319 } 320 321 void 322 ahc_run_untagged_queues(struct ahc_softc *ahc) 323 { 324 int i; 325 326 for (i = 0; i < 16; i++) 327 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 328 } 329 330 void 331 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 332 { 333 struct scb *scb; 334 335 if (ahc->untagged_queue_lock != 0) 336 return; 337 338 if ((scb = TAILQ_FIRST(queue)) != NULL 339 && (scb->flags & SCB_ACTIVE) == 0) { 340 scb->flags |= SCB_ACTIVE; 341 ahc_queue_scb(ahc, scb); 342 } 343 } 344 345 /************************* Interrupt Handling *********************************/ 346 void 347 ahc_handle_brkadrint(struct ahc_softc *ahc) 348 { 349 /* 350 * We upset the sequencer :-( 351 * Lookup the error message 352 */ 353 int i; 354 int error; 355 356 error = ahc_inb(ahc, ERROR); 357 for (i = 0; error != 1 && i < num_errors; i++) 358 error >>= 1; 359 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 360 ahc_name(ahc), ahc_hard_errors[i].errmesg, 361 ahc_inb(ahc, SEQADDR0) | 362 (ahc_inb(ahc, SEQADDR1) << 8)); 363 364 ahc_dump_card_state(ahc); 365 366 /* Tell everyone that this HBA is no longer availible */ 367 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 368 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 369 CAM_NO_HBA); 370 371 /* Disable all interrupt sources by resetting the controller */ 372 ahc_shutdown(ahc); 373 } 374 375 void 376 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 377 { 378 struct scb *scb; 379 struct ahc_devinfo devinfo; 380 381 ahc_fetch_devinfo(ahc, &devinfo); 382 383 /* 384 * Clear the upper byte that holds SEQINT status 385 * codes and clear the SEQINT bit. We will unpause 386 * the sequencer, if appropriate, after servicing 387 * the request. 388 */ 389 ahc_outb(ahc, CLRINT, CLRSEQINT); 390 switch (intstat & SEQINT_MASK) { 391 case BAD_STATUS: 392 { 393 u_int scb_index; 394 struct hardware_scb *hscb; 395 396 /* 397 * Set the default return value to 0 (don't 398 * send sense). The sense code will change 399 * this if needed. 400 */ 401 ahc_outb(ahc, RETURN_1, 0); 402 403 /* 404 * The sequencer will notify us when a command 405 * has an error that would be of interest to 406 * the kernel. This allows us to leave the sequencer 407 * running in the common case of command completes 408 * without error. The sequencer will already have 409 * dma'd the SCB back up to us, so we can reference 410 * the in kernel copy directly. 411 */ 412 scb_index = ahc_inb(ahc, SCB_TAG); 413 scb = ahc_lookup_scb(ahc, scb_index); 414 if (scb == NULL) { 415 printf("%s:%c:%d: ahc_intr - referenced scb " 416 "not valid during seqint 0x%x scb(%d)\n", 417 ahc_name(ahc), devinfo.channel, 418 devinfo.target, intstat, scb_index); 419 ahc_dump_card_state(ahc); 420 panic("for safety"); 421 goto unpause; 422 } 423 424 hscb = scb->hscb; 425 426 /* Don't want to clobber the original sense code */ 427 if ((scb->flags & SCB_SENSE) != 0) { 428 /* 429 * Clear the SCB_SENSE Flag and have 430 * the sequencer do a normal command 431 * complete. 432 */ 433 scb->flags &= ~SCB_SENSE; 434 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 435 break; 436 } 437 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 438 /* Freeze the queue until the client sees the error. */ 439 ahc_freeze_devq(ahc, scb); 440 ahc_freeze_scb(scb); 441 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 442 switch (hscb->shared_data.status.scsi_status) { 443 case SCSI_STATUS_OK: 444 printf("%s: Interrupted for staus of 0???\n", 445 ahc_name(ahc)); 446 break; 447 case SCSI_STATUS_CMD_TERMINATED: 448 case SCSI_STATUS_CHECK_COND: 449 { 450 struct ahc_dma_seg *sg; 451 struct scsi_sense *sc; 452 struct ahc_initiator_tinfo *targ_info; 453 struct ahc_tmode_tstate *tstate; 454 struct ahc_transinfo *tinfo; 455 #ifdef AHC_DEBUG 456 if (ahc_debug & AHC_SHOWSENSE) { 457 ahc_print_path(ahc, scb); 458 printf("SCB %d: requests Check Status\n", 459 scb->hscb->tag); 460 } 461 #endif 462 463 if (ahc_perform_autosense(scb) == 0) 464 break; 465 466 targ_info = ahc_fetch_transinfo(ahc, 467 devinfo.channel, 468 devinfo.our_scsiid, 469 devinfo.target, 470 &tstate); 471 tinfo = &targ_info->curr; 472 sg = scb->sg_list; 473 sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 474 /* 475 * Save off the residual if there is one. 476 */ 477 ahc_update_residual(scb); 478 #ifdef AHC_DEBUG 479 if (ahc_debug & AHC_SHOWSENSE) { 480 ahc_print_path(ahc, scb); 481 printf("Sending Sense\n"); 482 } 483 #endif 484 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 485 sg->len = ahc_get_sense_bufsize(ahc, scb); 486 sg->len |= AHC_DMA_LAST_SEG; 487 488 /* Fixup byte order */ 489 sg->addr = ahc_htole32(sg->addr); 490 sg->len = ahc_htole32(sg->len); 491 492 sc->opcode = REQUEST_SENSE; 493 sc->byte2 = 0; 494 if (tinfo->protocol_version <= SCSI_REV_2 495 && SCB_GET_LUN(scb) < 8) 496 sc->byte2 = SCB_GET_LUN(scb) << 5; 497 sc->unused[0] = 0; 498 sc->unused[1] = 0; 499 sc->length = sg->len; 500 sc->control = 0; 501 502 /* 503 * We can't allow the target to disconnect. 504 * This will be an untagged transaction and 505 * having the target disconnect will make this 506 * transaction indestinguishable from outstanding 507 * tagged transactions. 508 */ 509 hscb->control = 0; 510 511 /* 512 * This request sense could be because the 513 * the device lost power or in some other 514 * way has lost our transfer negotiations. 515 * Renegotiate if appropriate. Unit attention 516 * errors will be reported before any data 517 * phases occur. 518 */ 519 if (ahc_get_residual(scb) 520 == ahc_get_transfer_length(scb)) { 521 ahc_update_neg_request(ahc, &devinfo, 522 tstate, targ_info, 523 /*force*/TRUE); 524 } 525 if (tstate->auto_negotiate & devinfo.target_mask) { 526 hscb->control |= MK_MESSAGE; 527 scb->flags &= ~SCB_NEGOTIATE; 528 scb->flags |= SCB_AUTO_NEGOTIATE; 529 } 530 hscb->cdb_len = sizeof(*sc); 531 hscb->dataptr = sg->addr; 532 hscb->datacnt = sg->len; 533 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 534 hscb->sgptr = ahc_htole32(hscb->sgptr); 535 scb->sg_count = 1; 536 scb->flags |= SCB_SENSE; 537 ahc_qinfifo_requeue_tail(ahc, scb); 538 ahc_outb(ahc, RETURN_1, SEND_SENSE); 539 #ifdef __FreeBSD__ 540 /* 541 * Ensure we have enough time to actually 542 * retrieve the sense. 543 */ 544 untimeout(ahc_timeout, (caddr_t)scb, 545 scb->io_ctx->ccb_h.timeout_ch); 546 scb->io_ctx->ccb_h.timeout_ch = 547 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 548 #endif 549 break; 550 } 551 default: 552 break; 553 } 554 break; 555 } 556 case NO_MATCH: 557 { 558 /* Ensure we don't leave the selection hardware on */ 559 ahc_outb(ahc, SCSISEQ, 560 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 561 562 printf("%s:%c:%d: no active SCB for reconnecting " 563 "target - issuing BUS DEVICE RESET\n", 564 ahc_name(ahc), devinfo.channel, devinfo.target); 565 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 566 "ARG_1 == 0x%x ACCUM = 0x%x\n", 567 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 568 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 569 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 570 "SINDEX == 0x%x\n", 571 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 572 ahc_index_busy_tcl(ahc, 573 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 574 ahc_inb(ahc, SAVED_LUN))), 575 ahc_inb(ahc, SINDEX)); 576 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 577 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 578 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 579 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 580 ahc_inb(ahc, SCB_CONTROL)); 581 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 582 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 583 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 584 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 585 ahc_dump_card_state(ahc); 586 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 587 ahc->msgout_len = 1; 588 ahc->msgout_index = 0; 589 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 590 ahc_outb(ahc, MSG_OUT, HOST_MSG); 591 ahc_assert_atn(ahc); 592 break; 593 } 594 case SEND_REJECT: 595 { 596 u_int rejbyte = ahc_inb(ahc, ACCUM); 597 printf("%s:%c:%d: Warning - unknown message received from " 598 "target (0x%x). Rejecting\n", 599 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 600 break; 601 } 602 case NO_IDENT: 603 { 604 /* 605 * The reconnecting target either did not send an identify 606 * message, or did, but we didn't find an SCB to match and 607 * before it could respond to our ATN/abort, it hit a dataphase. 608 * The only safe thing to do is to blow it away with a bus 609 * reset. 610 */ 611 int found; 612 613 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 614 "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n", 615 ahc_name(ahc), devinfo.channel, devinfo.target, 616 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID)); 617 found = ahc_reset_channel(ahc, devinfo.channel, 618 /*initiate reset*/TRUE); 619 printf("%s: Issued Channel %c Bus Reset. " 620 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 621 found); 622 return; 623 } 624 case IGN_WIDE_RES: 625 ahc_handle_ign_wide_residue(ahc, &devinfo); 626 break; 627 case BAD_PHASE: 628 { 629 u_int lastphase; 630 631 lastphase = ahc_inb(ahc, LASTPHASE); 632 printf("%s:%c:%d: unknown scsi bus phase %x, " 633 "lastphase = 0x%x. Attempting to continue\n", 634 ahc_name(ahc), devinfo.channel, devinfo.target, 635 lastphase, ahc_inb(ahc, SCSISIGI)); 636 break; 637 } 638 case MISSED_BUSFREE: 639 { 640 u_int lastphase; 641 642 lastphase = ahc_inb(ahc, LASTPHASE); 643 printf("%s:%c:%d: Missed busfree. " 644 "Lastphase = 0x%x, Curphase = 0x%x\n", 645 ahc_name(ahc), devinfo.channel, devinfo.target, 646 lastphase, ahc_inb(ahc, SCSISIGI)); 647 ahc_restart(ahc); 648 return; 649 } 650 case HOST_MSG_LOOP: 651 { 652 /* 653 * The sequencer has encountered a message phase 654 * that requires host assistance for completion. 655 * While handling the message phase(s), we will be 656 * notified by the sequencer after each byte is 657 * transfered so we can track bus phase changes. 658 * 659 * If this is the first time we've seen a HOST_MSG_LOOP 660 * interrupt, initialize the state of the host message 661 * loop. 662 */ 663 if (ahc->msg_type == MSG_TYPE_NONE) { 664 struct scb *scb; 665 u_int scb_index; 666 u_int bus_phase; 667 668 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 669 if (bus_phase != P_MESGIN 670 && bus_phase != P_MESGOUT) { 671 printf("ahc_intr: HOST_MSG_LOOP bad " 672 "phase 0x%x\n", 673 bus_phase); 674 /* 675 * Probably transitioned to bus free before 676 * we got here. Just punt the message. 677 */ 678 ahc_clear_intstat(ahc); 679 ahc_restart(ahc); 680 return; 681 } 682 683 scb_index = ahc_inb(ahc, SCB_TAG); 684 scb = ahc_lookup_scb(ahc, scb_index); 685 if (devinfo.role == ROLE_INITIATOR) { 686 if (scb == NULL) 687 panic("HOST_MSG_LOOP with " 688 "invalid SCB %x\n", scb_index); 689 690 if (bus_phase == P_MESGOUT) 691 ahc_setup_initiator_msgout(ahc, 692 &devinfo, 693 scb); 694 else { 695 ahc->msg_type = 696 MSG_TYPE_INITIATOR_MSGIN; 697 ahc->msgin_index = 0; 698 } 699 } else { 700 if (bus_phase == P_MESGOUT) { 701 ahc->msg_type = 702 MSG_TYPE_TARGET_MSGOUT; 703 ahc->msgin_index = 0; 704 } 705 #if AHC_TARGET_MODE 706 else 707 ahc_setup_target_msgin(ahc, 708 &devinfo, 709 scb); 710 #endif 711 } 712 } 713 714 ahc_handle_message_phase(ahc); 715 break; 716 } 717 case PERR_DETECTED: 718 { 719 /* 720 * If we've cleared the parity error interrupt 721 * but the sequencer still believes that SCSIPERR 722 * is true, it must be that the parity error is 723 * for the currently presented byte on the bus, 724 * and we are not in a phase (data-in) where we will 725 * eventually ack this byte. Ack the byte and 726 * throw it away in the hope that the target will 727 * take us to message out to deliver the appropriate 728 * error message. 729 */ 730 if ((intstat & SCSIINT) == 0 731 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 732 733 if ((ahc->features & AHC_DT) == 0) { 734 u_int curphase; 735 736 /* 737 * The hardware will only let you ack bytes 738 * if the expected phase in SCSISIGO matches 739 * the current phase. Make sure this is 740 * currently the case. 741 */ 742 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 743 ahc_outb(ahc, LASTPHASE, curphase); 744 ahc_outb(ahc, SCSISIGO, curphase); 745 } 746 ahc_inb(ahc, SCSIDATL); 747 } 748 break; 749 } 750 case DATA_OVERRUN: 751 { 752 /* 753 * When the sequencer detects an overrun, it 754 * places the controller in "BITBUCKET" mode 755 * and allows the target to complete its transfer. 756 * Unfortunately, none of the counters get updated 757 * when the controller is in this mode, so we have 758 * no way of knowing how large the overrun was. 759 */ 760 u_int scbindex = ahc_inb(ahc, SCB_TAG); 761 u_int lastphase = ahc_inb(ahc, LASTPHASE); 762 u_int i; 763 764 scb = ahc_lookup_scb(ahc, scbindex); 765 for (i = 0; i < num_phases; i++) { 766 if (lastphase == ahc_phase_table[i].phase) 767 break; 768 } 769 ahc_print_path(ahc, scb); 770 printf("data overrun detected %s." 771 " Tag == 0x%x.\n", 772 ahc_phase_table[i].phasemsg, 773 scb->hscb->tag); 774 ahc_print_path(ahc, scb); 775 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 776 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 777 ahc_get_transfer_length(scb), scb->sg_count); 778 if (scb->sg_count > 0) { 779 for (i = 0; i < scb->sg_count; i++) { 780 printf("sg[%d] - Addr 0x%x : Length %d\n", 781 i, 782 ahc_le32toh(scb->sg_list[i].addr), 783 ahc_le32toh(scb->sg_list[i].len) 784 & AHC_SG_LEN_MASK); 785 } 786 } 787 /* 788 * Set this and it will take effect when the 789 * target does a command complete. 790 */ 791 ahc_freeze_devq(ahc, scb); 792 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 793 ahc_freeze_scb(scb); 794 break; 795 } 796 case MKMSG_FAILED: 797 { 798 u_int scbindex; 799 800 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 801 ahc_name(ahc), devinfo.channel, devinfo.target, 802 devinfo.lun); 803 scbindex = ahc_inb(ahc, SCB_TAG); 804 scb = ahc_lookup_scb(ahc, scbindex); 805 if (scb != NULL 806 && (scb->flags & SCB_RECOVERY_SCB) != 0) 807 /* 808 * Ensure that we didn't put a second instance of this 809 * SCB into the QINFIFO. 810 */ 811 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 812 SCB_GET_CHANNEL(ahc, scb), 813 SCB_GET_LUN(scb), scb->hscb->tag, 814 ROLE_INITIATOR, /*status*/0, 815 SEARCH_REMOVE); 816 break; 817 } 818 case NO_FREE_SCB: 819 { 820 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 821 ahc_dump_card_state(ahc); 822 panic("for safety"); 823 break; 824 } 825 case SCB_MISMATCH: 826 { 827 u_int scbptr; 828 829 scbptr = ahc_inb(ahc, SCBPTR); 830 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 831 scbptr, ahc_inb(ahc, ARG_1), 832 ahc->scb_data->hscbs[scbptr].tag); 833 ahc_dump_card_state(ahc); 834 panic("for saftey"); 835 break; 836 } 837 case OUT_OF_RANGE: 838 { 839 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 840 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 841 "ARG_1 == 0x%x ACCUM = 0x%x\n", 842 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 843 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 844 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 845 "SINDEX == 0x%x\n, A == 0x%x\n", 846 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 847 ahc_index_busy_tcl(ahc, 848 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 849 ahc_inb(ahc, SAVED_LUN))), 850 ahc_inb(ahc, SINDEX), 851 ahc_inb(ahc, ACCUM)); 852 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 853 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 854 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 855 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 856 ahc_inb(ahc, SCB_CONTROL)); 857 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 858 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 859 ahc_dump_card_state(ahc); 860 panic("for safety"); 861 break; 862 } 863 default: 864 printf("ahc_intr: seqint, " 865 "intstat == 0x%x, scsisigi = 0x%x\n", 866 intstat, ahc_inb(ahc, SCSISIGI)); 867 break; 868 } 869 unpause: 870 /* 871 * The sequencer is paused immediately on 872 * a SEQINT, so we should restart it when 873 * we're done. 874 */ 875 ahc_unpause(ahc); 876 } 877 878 void 879 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 880 { 881 u_int scb_index; 882 u_int status0; 883 u_int status; 884 struct scb *scb; 885 char cur_channel; 886 char intr_channel; 887 888 /* Make sure the sequencer is in a safe location. */ 889 ahc_clear_critical_section(ahc); 890 891 if ((ahc->features & AHC_TWIN) != 0 892 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 893 cur_channel = 'B'; 894 else 895 cur_channel = 'A'; 896 intr_channel = cur_channel; 897 898 if ((ahc->features & AHC_ULTRA2) != 0) 899 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 900 else 901 status0 = 0; 902 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 903 if (status == 0 && status0 == 0) { 904 if ((ahc->features & AHC_TWIN) != 0) { 905 /* Try the other channel */ 906 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 907 status = ahc_inb(ahc, SSTAT1) 908 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 909 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 910 } 911 if (status == 0) { 912 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 913 ahc_outb(ahc, CLRINT, CLRSCSIINT); 914 ahc_unpause(ahc); 915 return; 916 } 917 } 918 919 scb_index = ahc_inb(ahc, SCB_TAG); 920 scb = ahc_lookup_scb(ahc, scb_index); 921 if (scb != NULL 922 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0) 923 scb = NULL; 924 925 if ((ahc->features & AHC_ULTRA2) != 0 926 && (status0 & IOERR) != 0) { 927 int now_lvd; 928 929 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 930 printf("%s: Transceiver State Has Changed to %s mode\n", 931 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 932 ahc_outb(ahc, CLRSINT0, CLRIOERR); 933 /* 934 * When transitioning to SE mode, the reset line 935 * glitches, triggering an arbitration bug in some 936 * Ultra2 controllers. This bug is cleared when we 937 * assert the reset line. Since a reset glitch has 938 * already occurred with this transition and a 939 * transceiver state change is handled just like 940 * a bus reset anyway, asserting the reset line 941 * ourselves is safe. 942 */ 943 ahc_reset_channel(ahc, intr_channel, 944 /*Initiate Reset*/now_lvd == 0); 945 } else if ((status & SCSIRSTI) != 0) { 946 printf("%s: Someone reset channel %c\n", 947 ahc_name(ahc), intr_channel); 948 if (intr_channel != cur_channel) 949 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 950 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 951 } else if ((status & SCSIPERR) != 0) { 952 /* 953 * Determine the bus phase and queue an appropriate message. 954 * SCSIPERR is latched true as soon as a parity error 955 * occurs. If the sequencer acked the transfer that 956 * caused the parity error and the currently presented 957 * transfer on the bus has correct parity, SCSIPERR will 958 * be cleared by CLRSCSIPERR. Use this to determine if 959 * we should look at the last phase the sequencer recorded, 960 * or the current phase presented on the bus. 961 */ 962 u_int mesg_out; 963 u_int curphase; 964 u_int errorphase; 965 u_int lastphase; 966 u_int scsirate; 967 u_int i; 968 u_int sstat2; 969 970 lastphase = ahc_inb(ahc, LASTPHASE); 971 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 972 sstat2 = ahc_inb(ahc, SSTAT2); 973 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 974 /* 975 * For all phases save DATA, the sequencer won't 976 * automatically ack a byte that has a parity error 977 * in it. So the only way that the current phase 978 * could be 'data-in' is if the parity error is for 979 * an already acked byte in the data phase. During 980 * synchronous data-in transfers, we may actually 981 * ack bytes before latching the current phase in 982 * LASTPHASE, leading to the discrepancy between 983 * curphase and lastphase. 984 */ 985 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 986 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 987 errorphase = curphase; 988 else 989 errorphase = lastphase; 990 991 for (i = 0; i < num_phases; i++) { 992 if (errorphase == ahc_phase_table[i].phase) 993 break; 994 } 995 mesg_out = ahc_phase_table[i].mesg_out; 996 if (scb != NULL) 997 ahc_print_path(ahc, scb); 998 else 999 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1000 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1001 scsirate = ahc_inb(ahc, SCSIRATE); 1002 printf("parity error detected %s. " 1003 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1004 ahc_phase_table[i].phasemsg, 1005 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8), 1006 scsirate); 1007 1008 if ((ahc->features & AHC_DT) != 0) { 1009 1010 if ((sstat2 & CRCVALERR) != 0) 1011 printf("\tCRC Value Mismatch\n"); 1012 if ((sstat2 & CRCENDERR) != 0) 1013 printf("\tNo terminal CRC packet recevied\n"); 1014 if ((sstat2 & CRCREQERR) != 0) 1015 printf("\tIllegal CRC packet request\n"); 1016 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1017 printf("\tUnexpected %sDT Data Phase\n", 1018 (scsirate & SINGLE_EDGE) ? "" : "non-"); 1019 } 1020 1021 /* 1022 * We've set the hardware to assert ATN if we 1023 * get a parity error on "in" phases, so all we 1024 * need to do is stuff the message buffer with 1025 * the appropriate message. "In" phases have set 1026 * mesg_out to something other than MSG_NOP. 1027 */ 1028 if (mesg_out != MSG_NOOP) { 1029 if (ahc->msg_type != MSG_TYPE_NONE) 1030 ahc->send_msg_perror = TRUE; 1031 else 1032 ahc_outb(ahc, MSG_OUT, mesg_out); 1033 } 1034 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1035 ahc_unpause(ahc); 1036 } else if ((status & SELTO) != 0) { 1037 u_int scbptr; 1038 1039 /* Stop the selection */ 1040 ahc_outb(ahc, SCSISEQ, 0); 1041 1042 /* No more pending messages */ 1043 ahc_clear_msg_state(ahc); 1044 1045 /* Clear interrupt state */ 1046 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1047 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1048 1049 /* 1050 * Although the driver does not care about the 1051 * 'Selection in Progress' status bit, the busy 1052 * LED does. SELINGO is only cleared by a sucessfull 1053 * selection, so we must manually clear it to insure 1054 * the LED turns off just incase no future successful 1055 * selections occur (e.g. no devices on the bus). 1056 */ 1057 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1058 1059 scbptr = ahc_inb(ahc, WAITING_SCBH); 1060 ahc_outb(ahc, SCBPTR, scbptr); 1061 scb_index = ahc_inb(ahc, SCB_TAG); 1062 1063 scb = ahc_lookup_scb(ahc, scb_index); 1064 if (scb == NULL) { 1065 printf("%s: ahc_intr - referenced scb not " 1066 "valid during SELTO scb(%d, %d)\n", 1067 ahc_name(ahc), scbptr, scb_index); 1068 } else { 1069 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1070 ahc_freeze_devq(ahc, scb); 1071 } 1072 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1073 ahc_restart(ahc); 1074 } else if ((status & BUSFREE) != 0 1075 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1076 u_int lastphase; 1077 u_int saved_scsiid; 1078 u_int saved_lun; 1079 u_int target; 1080 u_int initiator_role_id; 1081 char channel; 1082 int printerror; 1083 1084 /* 1085 * Clear our selection hardware as soon as possible. 1086 * We may have an entry in the waiting Q for this target, 1087 * that is affected by this busfree and we don't want to 1088 * go about selecting the target while we handle the event. 1089 */ 1090 ahc_outb(ahc, SCSISEQ, 1091 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1092 1093 /* 1094 * Disable busfree interrupts and clear the busfree 1095 * interrupt status. We do this here so that several 1096 * bus transactions occur prior to clearing the SCSIINT 1097 * latch. It can take a bit for the clearing to take effect. 1098 */ 1099 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1100 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1101 1102 /* 1103 * Look at what phase we were last in. 1104 * If its message out, chances are pretty good 1105 * that the busfree was in response to one of 1106 * our abort requests. 1107 */ 1108 lastphase = ahc_inb(ahc, LASTPHASE); 1109 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1110 saved_lun = ahc_inb(ahc, SAVED_LUN); 1111 target = SCSIID_TARGET(ahc, saved_scsiid); 1112 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1113 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1114 printerror = 1; 1115 1116 if (lastphase == P_MESGOUT) { 1117 struct ahc_devinfo devinfo; 1118 u_int tag; 1119 1120 ahc_fetch_devinfo(ahc, &devinfo); 1121 tag = SCB_LIST_NULL; 1122 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1123 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1124 if (ahc->msgout_buf[ahc->msgout_index - 1] 1125 == MSG_ABORT_TAG) 1126 tag = scb->hscb->tag; 1127 ahc_print_path(ahc, scb); 1128 printf("SCB %d - Abort%s Completed.\n", 1129 scb->hscb->tag, tag == SCB_LIST_NULL ? 1130 "" : " Tag"); 1131 ahc_abort_scbs(ahc, target, channel, 1132 saved_lun, tag, 1133 ROLE_INITIATOR, 1134 CAM_REQ_ABORTED); 1135 printerror = 0; 1136 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1137 MSG_BUS_DEV_RESET, TRUE)) { 1138 struct ahc_devinfo devinfo; 1139 #ifdef __FreeBSD__ 1140 /* 1141 * Don't mark the user's request for this BDR 1142 * as completing with CAM_BDR_SENT. CAM3 1143 * specifies CAM_REQ_CMP. 1144 */ 1145 if (scb != NULL 1146 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1147 && ahc_match_scb(ahc, scb, target, channel, 1148 CAM_LUN_WILDCARD, 1149 SCB_LIST_NULL, 1150 ROLE_INITIATOR)) { 1151 ahc_set_transaction_status(scb, CAM_REQ_CMP); 1152 } 1153 #endif 1154 ahc_compile_devinfo(&devinfo, 1155 initiator_role_id, 1156 target, 1157 CAM_LUN_WILDCARD, 1158 channel, 1159 ROLE_INITIATOR); 1160 ahc_handle_devreset(ahc, &devinfo, 1161 CAM_BDR_SENT, 1162 "Bus Device Reset", 1163 /*verbose_level*/0); 1164 printerror = 0; 1165 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1166 MSG_EXT_PPR, FALSE)) { 1167 struct ahc_initiator_tinfo *tinfo; 1168 struct ahc_tmode_tstate *tstate; 1169 1170 /* 1171 * PPR Rejected. Try non-ppr negotiation 1172 * and retry command. 1173 */ 1174 tinfo = ahc_fetch_transinfo(ahc, 1175 devinfo.channel, 1176 devinfo.our_scsiid, 1177 devinfo.target, 1178 &tstate); 1179 tinfo->curr.transport_version = 2; 1180 tinfo->goal.transport_version = 2; 1181 tinfo->goal.ppr_options = 0; 1182 ahc_qinfifo_requeue_tail(ahc, scb); 1183 printerror = 0; 1184 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1185 MSG_EXT_WDTR, FALSE) 1186 || ahc_sent_msg(ahc, AHCMSG_EXT, 1187 MSG_EXT_SDTR, FALSE)) { 1188 /* 1189 * Negotiation Rejected. Go-async and 1190 * retry command. 1191 */ 1192 ahc_set_width(ahc, &devinfo, 1193 MSG_EXT_WDTR_BUS_8_BIT, 1194 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1195 /*paused*/TRUE); 1196 ahc_set_syncrate(ahc, &devinfo, 1197 /*syncrate*/NULL, 1198 /*period*/0, /*offset*/0, 1199 /*ppr_options*/0, 1200 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1201 /*paused*/TRUE); 1202 ahc_qinfifo_requeue_tail(ahc, scb); 1203 printerror = 0; 1204 } 1205 } 1206 if (printerror != 0) { 1207 u_int i; 1208 1209 if (scb != NULL) { 1210 u_int tag; 1211 1212 if ((scb->hscb->control & TAG_ENB) != 0) 1213 tag = scb->hscb->tag; 1214 else 1215 tag = SCB_LIST_NULL; 1216 ahc_print_path(ahc, scb); 1217 ahc_abort_scbs(ahc, target, channel, 1218 SCB_GET_LUN(scb), tag, 1219 ROLE_INITIATOR, 1220 CAM_UNEXP_BUSFREE); 1221 } else { 1222 /* 1223 * We had not fully identified this connection, 1224 * so we cannot abort anything. 1225 */ 1226 printf("%s: ", ahc_name(ahc)); 1227 } 1228 for (i = 0; i < num_phases; i++) { 1229 if (lastphase == ahc_phase_table[i].phase) 1230 break; 1231 } 1232 printf("Unexpected busfree %s\n" 1233 "SEQADDR == 0x%x\n", 1234 ahc_phase_table[i].phasemsg, 1235 ahc_inb(ahc, SEQADDR0) 1236 | (ahc_inb(ahc, SEQADDR1) << 8)); 1237 } 1238 ahc_clear_msg_state(ahc); 1239 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1240 ahc_restart(ahc); 1241 } else { 1242 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1243 ahc_name(ahc), status); 1244 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1245 } 1246 } 1247 1248 #define AHC_MAX_STEPS 2000 1249 void 1250 ahc_clear_critical_section(struct ahc_softc *ahc) 1251 { 1252 int stepping; 1253 int steps; 1254 u_int simode0; 1255 u_int simode1; 1256 1257 if (ahc->num_critical_sections == 0) 1258 return; 1259 1260 stepping = FALSE; 1261 steps = 0; 1262 simode0 = 0; 1263 simode1 = 0; 1264 for (;;) { 1265 struct cs *cs; 1266 u_int seqaddr; 1267 u_int i; 1268 1269 seqaddr = ahc_inb(ahc, SEQADDR0) 1270 | (ahc_inb(ahc, SEQADDR1) << 8); 1271 1272 cs = ahc->critical_sections; 1273 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1274 1275 if (cs->begin < seqaddr && cs->end >= seqaddr) 1276 break; 1277 } 1278 1279 if (i == ahc->num_critical_sections) 1280 break; 1281 1282 if (steps > AHC_MAX_STEPS) { 1283 printf("%s: Infinite loop in critical section\n", 1284 ahc_name(ahc)); 1285 ahc_dump_card_state(ahc); 1286 panic("critical section loop"); 1287 } 1288 1289 steps++; 1290 if (stepping == FALSE) { 1291 1292 /* 1293 * Disable all interrupt sources so that the 1294 * sequencer will not be stuck by a pausing 1295 * interrupt condition while we attempt to 1296 * leave a critical section. 1297 */ 1298 simode0 = ahc_inb(ahc, SIMODE0); 1299 ahc_outb(ahc, SIMODE0, 0); 1300 simode1 = ahc_inb(ahc, SIMODE1); 1301 ahc_outb(ahc, SIMODE1, 0); 1302 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1303 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1304 stepping = TRUE; 1305 } 1306 ahc_outb(ahc, HCNTRL, ahc->unpause); 1307 do { 1308 ahc_delay(200); 1309 } while (!ahc_is_paused(ahc)); 1310 } 1311 if (stepping) { 1312 ahc_outb(ahc, SIMODE0, simode0); 1313 ahc_outb(ahc, SIMODE1, simode1); 1314 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1315 } 1316 } 1317 1318 /* 1319 * Clear any pending interrupt status. 1320 */ 1321 void 1322 ahc_clear_intstat(struct ahc_softc *ahc) 1323 { 1324 /* Clear any interrupt conditions this may have caused */ 1325 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1326 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1327 CLRREQINIT); 1328 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1329 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1330 } 1331 1332 /**************************** Debugging Routines ******************************/ 1333 void 1334 ahc_print_scb(struct scb *scb) 1335 { 1336 int i; 1337 1338 struct hardware_scb *hscb = scb->hscb; 1339 1340 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1341 scb, 1342 hscb->control, 1343 hscb->scsiid, 1344 hscb->lun, 1345 hscb->cdb_len); 1346 i = 0; 1347 printf("Shared Data: %#02x %#02x %#02x %#02x\n", 1348 hscb->shared_data.cdb[i++], 1349 hscb->shared_data.cdb[i++], 1350 hscb->shared_data.cdb[i++], 1351 hscb->shared_data.cdb[i++]); 1352 printf(" %#02x %#02x %#02x %#02x\n", 1353 hscb->shared_data.cdb[i++], 1354 hscb->shared_data.cdb[i++], 1355 hscb->shared_data.cdb[i++], 1356 hscb->shared_data.cdb[i++]); 1357 printf(" %#02x %#02x %#02x %#02x\n", 1358 hscb->shared_data.cdb[i++], 1359 hscb->shared_data.cdb[i++], 1360 hscb->shared_data.cdb[i++], 1361 hscb->shared_data.cdb[i++]); 1362 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1363 ahc_le32toh(hscb->dataptr), 1364 ahc_le32toh(hscb->datacnt), 1365 ahc_le32toh(hscb->sgptr), 1366 hscb->tag); 1367 if (scb->sg_count > 0) { 1368 for (i = 0; i < scb->sg_count; i++) { 1369 printf("sg[%d] - Addr 0x%x : Length %d\n", 1370 i, 1371 ahc_le32toh(scb->sg_list[i].addr), 1372 ahc_le32toh(scb->sg_list[i].len)); 1373 } 1374 } 1375 } 1376 1377 /************************* Transfer Negotiation *******************************/ 1378 /* 1379 * Allocate per target mode instance (ID we respond to as a target) 1380 * transfer negotiation data structures. 1381 */ 1382 static struct ahc_tmode_tstate * 1383 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1384 { 1385 struct ahc_tmode_tstate *master_tstate; 1386 struct ahc_tmode_tstate *tstate; 1387 int i; 1388 1389 master_tstate = ahc->enabled_targets[ahc->our_id]; 1390 if (channel == 'B') { 1391 scsi_id += 8; 1392 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1393 } 1394 if (ahc->enabled_targets[scsi_id] != NULL 1395 && ahc->enabled_targets[scsi_id] != master_tstate) 1396 panic("%s: ahc_alloc_tstate - Target already allocated", 1397 ahc_name(ahc)); 1398 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1399 if (tstate == NULL) 1400 return (NULL); 1401 1402 /* 1403 * If we have allocated a master tstate, copy user settings from 1404 * the master tstate (taken from SRAM or the EEPROM) for this 1405 * channel, but reset our current and goal settings to async/narrow 1406 * until an initiator talks to us. 1407 */ 1408 if (master_tstate != NULL) { 1409 memcpy(tstate, master_tstate, sizeof(*tstate)); 1410 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1411 tstate->ultraenb = 0; 1412 for (i = 0; i < 16; i++) { 1413 memset(&tstate->transinfo[i].curr, 0, 1414 sizeof(tstate->transinfo[i].curr)); 1415 memset(&tstate->transinfo[i].goal, 0, 1416 sizeof(tstate->transinfo[i].goal)); 1417 } 1418 } else 1419 memset(tstate, 0, sizeof(*tstate)); 1420 ahc->enabled_targets[scsi_id] = tstate; 1421 return (tstate); 1422 } 1423 1424 #ifdef AHC_TARGET_MODE 1425 /* 1426 * Free per target mode instance (ID we respond to as a target) 1427 * transfer negotiation data structures. 1428 */ 1429 static void 1430 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1431 { 1432 struct ahc_tmode_tstate *tstate; 1433 1434 /* 1435 * Don't clean up our "master" tstate. 1436 * It has our default user settings. 1437 */ 1438 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1439 || (channel == 'A' && scsi_id == ahc->our_id)) 1440 && force == FALSE) 1441 return; 1442 1443 if (channel == 'B') 1444 scsi_id += 8; 1445 tstate = ahc->enabled_targets[scsi_id]; 1446 if (tstate != NULL) 1447 free(tstate, M_DEVBUF); 1448 ahc->enabled_targets[scsi_id] = NULL; 1449 } 1450 #endif 1451 1452 /* 1453 * Called when we have an active connection to a target on the bus, 1454 * this function finds the nearest syncrate to the input period limited 1455 * by the capabilities of the bus connectivity of and sync settings for 1456 * the target. 1457 */ 1458 struct ahc_syncrate * 1459 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1460 struct ahc_initiator_tinfo *tinfo, 1461 u_int *period, u_int *ppr_options, role_t role) { 1462 struct ahc_transinfo *transinfo; 1463 u_int maxsync; 1464 1465 if ((ahc->features & AHC_ULTRA2) != 0) { 1466 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1467 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1468 maxsync = AHC_SYNCRATE_DT; 1469 } else { 1470 maxsync = AHC_SYNCRATE_ULTRA; 1471 /* Can't do DT on an SE bus */ 1472 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1473 } 1474 } else if ((ahc->features & AHC_ULTRA) != 0) { 1475 maxsync = AHC_SYNCRATE_ULTRA; 1476 } else { 1477 maxsync = AHC_SYNCRATE_FAST; 1478 } 1479 /* 1480 * Never allow a value higher than our current goal 1481 * period otherwise we may allow a target initiated 1482 * negotiation to go above the limit as set by the 1483 * user. In the case of an initiator initiated 1484 * sync negotiation, we limit based on the user 1485 * setting. This allows the system to still accept 1486 * incoming negotiations even if target initiated 1487 * negotiation is not performed. 1488 */ 1489 if (role == ROLE_TARGET) 1490 transinfo = &tinfo->user; 1491 else 1492 transinfo = &tinfo->goal; 1493 *ppr_options &= transinfo->ppr_options; 1494 if (transinfo->period == 0) { 1495 *period = 0; 1496 *ppr_options = 0; 1497 return (NULL); 1498 } 1499 *period = MAX(*period, transinfo->period); 1500 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1501 } 1502 1503 /* 1504 * Look up the valid period to SCSIRATE conversion in our table. 1505 * Return the period and offset that should be sent to the target 1506 * if this was the beginning of an SDTR. 1507 */ 1508 struct ahc_syncrate * 1509 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1510 u_int *ppr_options, u_int maxsync) 1511 { 1512 struct ahc_syncrate *syncrate; 1513 1514 if ((ahc->features & AHC_DT) == 0) 1515 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1516 1517 /* Skip all DT only entries if DT is not available */ 1518 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1519 && maxsync < AHC_SYNCRATE_ULTRA2) 1520 maxsync = AHC_SYNCRATE_ULTRA2; 1521 1522 for (syncrate = &ahc_syncrates[maxsync]; 1523 syncrate->rate != NULL; 1524 syncrate++) { 1525 1526 /* 1527 * The Ultra2 table doesn't go as low 1528 * as for the Fast/Ultra cards. 1529 */ 1530 if ((ahc->features & AHC_ULTRA2) != 0 1531 && (syncrate->sxfr_u2 == 0)) 1532 break; 1533 1534 if (*period <= syncrate->period) { 1535 /* 1536 * When responding to a target that requests 1537 * sync, the requested rate may fall between 1538 * two rates that we can output, but still be 1539 * a rate that we can receive. Because of this, 1540 * we want to respond to the target with 1541 * the same rate that it sent to us even 1542 * if the period we use to send data to it 1543 * is lower. Only lower the response period 1544 * if we must. 1545 */ 1546 if (syncrate == &ahc_syncrates[maxsync]) 1547 *period = syncrate->period; 1548 1549 /* 1550 * At some speeds, we only support 1551 * ST transfers. 1552 */ 1553 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1554 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1555 break; 1556 } 1557 } 1558 1559 if ((*period == 0) 1560 || (syncrate->rate == NULL) 1561 || ((ahc->features & AHC_ULTRA2) != 0 1562 && (syncrate->sxfr_u2 == 0))) { 1563 /* Use asynchronous transfers. */ 1564 *period = 0; 1565 syncrate = NULL; 1566 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1567 } 1568 return (syncrate); 1569 } 1570 1571 /* 1572 * Convert from an entry in our syncrate table to the SCSI equivalent 1573 * sync "period" factor. 1574 */ 1575 u_int 1576 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1577 { 1578 struct ahc_syncrate *syncrate; 1579 1580 if ((ahc->features & AHC_ULTRA2) != 0) 1581 scsirate &= SXFR_ULTRA2; 1582 else 1583 scsirate &= SXFR; 1584 1585 syncrate = &ahc_syncrates[maxsync]; 1586 while (syncrate->rate != NULL) { 1587 1588 if ((ahc->features & AHC_ULTRA2) != 0) { 1589 if (syncrate->sxfr_u2 == 0) 1590 break; 1591 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1592 return (syncrate->period); 1593 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1594 return (syncrate->period); 1595 } 1596 syncrate++; 1597 } 1598 return (0); /* async */ 1599 } 1600 1601 /* 1602 * Truncate the given synchronous offset to a value the 1603 * current adapter type and syncrate are capable of. 1604 */ 1605 void 1606 ahc_validate_offset(struct ahc_softc *ahc, 1607 struct ahc_initiator_tinfo *tinfo, 1608 struct ahc_syncrate *syncrate, 1609 u_int *offset, int wide, role_t role) 1610 { 1611 u_int maxoffset; 1612 1613 /* Limit offset to what we can do */ 1614 if (syncrate == NULL) { 1615 maxoffset = 0; 1616 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1617 maxoffset = MAX_OFFSET_ULTRA2; 1618 } else { 1619 if (wide) 1620 maxoffset = MAX_OFFSET_16BIT; 1621 else 1622 maxoffset = MAX_OFFSET_8BIT; 1623 } 1624 *offset = MIN(*offset, maxoffset); 1625 if (tinfo != NULL) { 1626 if (role == ROLE_TARGET) 1627 *offset = MIN(*offset, tinfo->user.offset); 1628 else 1629 *offset = MIN(*offset, tinfo->goal.offset); 1630 } 1631 } 1632 1633 /* 1634 * Truncate the given transfer width parameter to a value the 1635 * current adapter type is capable of. 1636 */ 1637 void 1638 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1639 u_int *bus_width, role_t role) 1640 { 1641 switch (*bus_width) { 1642 default: 1643 if (ahc->features & AHC_WIDE) { 1644 /* Respond Wide */ 1645 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1646 break; 1647 } 1648 /* FALLTHROUGH */ 1649 case MSG_EXT_WDTR_BUS_8_BIT: 1650 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1651 break; 1652 } 1653 if (tinfo != NULL) { 1654 if (role == ROLE_TARGET) 1655 *bus_width = MIN(tinfo->user.width, *bus_width); 1656 else 1657 *bus_width = MIN(tinfo->goal.width, *bus_width); 1658 } 1659 } 1660 1661 /* 1662 * Update the bitmask of targets for which the controller should 1663 * negotiate with at the next convenient oportunity. This currently 1664 * means the next time we send the initial identify messages for 1665 * a new transaction. 1666 */ 1667 int 1668 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1669 struct ahc_tmode_tstate *tstate, 1670 struct ahc_initiator_tinfo *tinfo, int force) 1671 { 1672 u_int auto_negotiate_orig; 1673 1674 auto_negotiate_orig = tstate->auto_negotiate; 1675 if (tinfo->curr.period != tinfo->goal.period 1676 || tinfo->curr.width != tinfo->goal.width 1677 || tinfo->curr.offset != tinfo->goal.offset 1678 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1679 || (force 1680 && (tinfo->goal.period != 0 1681 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1682 || tinfo->goal.ppr_options != 0))) 1683 tstate->auto_negotiate |= devinfo->target_mask; 1684 else 1685 tstate->auto_negotiate &= ~devinfo->target_mask; 1686 1687 return (auto_negotiate_orig != tstate->auto_negotiate); 1688 } 1689 1690 /* 1691 * Update the user/goal/curr tables of synchronous negotiation 1692 * parameters as well as, in the case of a current or active update, 1693 * any data structures on the host controller. In the case of an 1694 * active update, the specified target is currently talking to us on 1695 * the bus, so the transfer parameter update must take effect 1696 * immediately. 1697 */ 1698 void 1699 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1700 struct ahc_syncrate *syncrate, u_int period, 1701 u_int offset, u_int ppr_options, u_int type, int paused) 1702 { 1703 struct ahc_initiator_tinfo *tinfo; 1704 struct ahc_tmode_tstate *tstate; 1705 u_int old_period; 1706 u_int old_offset; 1707 u_int old_ppr; 1708 int active; 1709 int update_needed; 1710 1711 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1712 update_needed = 0; 1713 1714 if (syncrate == NULL) { 1715 period = 0; 1716 offset = 0; 1717 } 1718 1719 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1720 devinfo->target, &tstate); 1721 1722 if ((type & AHC_TRANS_USER) != 0) { 1723 tinfo->user.period = period; 1724 tinfo->user.offset = offset; 1725 tinfo->user.ppr_options = ppr_options; 1726 } 1727 1728 if ((type & AHC_TRANS_GOAL) != 0) { 1729 tinfo->goal.period = period; 1730 tinfo->goal.offset = offset; 1731 tinfo->goal.ppr_options = ppr_options; 1732 } 1733 1734 old_period = tinfo->curr.period; 1735 old_offset = tinfo->curr.offset; 1736 old_ppr = tinfo->curr.ppr_options; 1737 1738 if ((type & AHC_TRANS_CUR) != 0 1739 && (old_period != period 1740 || old_offset != offset 1741 || old_ppr != ppr_options)) { 1742 u_int scsirate; 1743 1744 update_needed++; 1745 scsirate = tinfo->scsirate; 1746 if ((ahc->features & AHC_ULTRA2) != 0) { 1747 1748 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1749 if (syncrate != NULL) { 1750 scsirate |= syncrate->sxfr_u2; 1751 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1752 scsirate |= ENABLE_CRC; 1753 else 1754 scsirate |= SINGLE_EDGE; 1755 } 1756 } else { 1757 1758 scsirate &= ~(SXFR|SOFS); 1759 /* 1760 * Ensure Ultra mode is set properly for 1761 * this target. 1762 */ 1763 tstate->ultraenb &= ~devinfo->target_mask; 1764 if (syncrate != NULL) { 1765 if (syncrate->sxfr & ULTRA_SXFR) { 1766 tstate->ultraenb |= 1767 devinfo->target_mask; 1768 } 1769 scsirate |= syncrate->sxfr & SXFR; 1770 scsirate |= offset & SOFS; 1771 } 1772 if (active) { 1773 u_int sxfrctl0; 1774 1775 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1776 sxfrctl0 &= ~FAST20; 1777 if (tstate->ultraenb & devinfo->target_mask) 1778 sxfrctl0 |= FAST20; 1779 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1780 } 1781 } 1782 if (active) { 1783 ahc_outb(ahc, SCSIRATE, scsirate); 1784 if ((ahc->features & AHC_ULTRA2) != 0) 1785 ahc_outb(ahc, SCSIOFFSET, offset); 1786 } 1787 1788 tinfo->scsirate = scsirate; 1789 tinfo->curr.period = period; 1790 tinfo->curr.offset = offset; 1791 tinfo->curr.ppr_options = ppr_options; 1792 1793 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1794 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1795 if (bootverbose) { 1796 if (offset != 0) { 1797 printf("%s: target %d synchronous at %sMHz%s, " 1798 "offset = 0x%x\n", ahc_name(ahc), 1799 devinfo->target, syncrate->rate, 1800 (ppr_options & MSG_EXT_PPR_DT_REQ) 1801 ? " DT" : "", offset); 1802 } else { 1803 printf("%s: target %d using " 1804 "asynchronous transfers\n", 1805 ahc_name(ahc), devinfo->target); 1806 } 1807 } 1808 } 1809 1810 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1811 tinfo, /*force*/FALSE); 1812 1813 if (update_needed) 1814 ahc_update_pending_scbs(ahc); 1815 } 1816 1817 /* 1818 * Update the user/goal/curr tables of wide negotiation 1819 * parameters as well as, in the case of a current or active update, 1820 * any data structures on the host controller. In the case of an 1821 * active update, the specified target is currently talking to us on 1822 * the bus, so the transfer parameter update must take effect 1823 * immediately. 1824 */ 1825 void 1826 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1827 u_int width, u_int type, int paused) 1828 { 1829 struct ahc_initiator_tinfo *tinfo; 1830 struct ahc_tmode_tstate *tstate; 1831 u_int oldwidth; 1832 int active; 1833 int update_needed; 1834 1835 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1836 update_needed = 0; 1837 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1838 devinfo->target, &tstate); 1839 1840 if ((type & AHC_TRANS_USER) != 0) 1841 tinfo->user.width = width; 1842 1843 if ((type & AHC_TRANS_GOAL) != 0) 1844 tinfo->goal.width = width; 1845 1846 oldwidth = tinfo->curr.width; 1847 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1848 u_int scsirate; 1849 1850 update_needed++; 1851 scsirate = tinfo->scsirate; 1852 scsirate &= ~WIDEXFER; 1853 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1854 scsirate |= WIDEXFER; 1855 1856 tinfo->scsirate = scsirate; 1857 1858 if (active) 1859 ahc_outb(ahc, SCSIRATE, scsirate); 1860 1861 tinfo->curr.width = width; 1862 1863 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1864 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1865 if (bootverbose) { 1866 printf("%s: target %d using %dbit transfers\n", 1867 ahc_name(ahc), devinfo->target, 1868 8 * (0x01 << width)); 1869 } 1870 } 1871 1872 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1873 tinfo, /*force*/FALSE); 1874 if (update_needed) 1875 ahc_update_pending_scbs(ahc); 1876 } 1877 1878 /* 1879 * Update the current state of tagged queuing for a given target. 1880 */ 1881 void 1882 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1883 ahc_queue_alg alg) 1884 { 1885 ahc_platform_set_tags(ahc, devinfo, alg); 1886 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1887 devinfo->lun, AC_TRANSFER_NEG, &alg); 1888 } 1889 1890 /* 1891 * When the transfer settings for a connection change, update any 1892 * in-transit SCBs to contain the new data so the hardware will 1893 * be set correctly during future (re)selections. 1894 */ 1895 static void 1896 ahc_update_pending_scbs(struct ahc_softc *ahc) 1897 { 1898 struct scb *pending_scb; 1899 int pending_scb_count; 1900 int i; 1901 int paused; 1902 u_int saved_scbptr; 1903 1904 /* 1905 * Traverse the pending SCB list and ensure that all of the 1906 * SCBs there have the proper settings. 1907 */ 1908 pending_scb_count = 0; 1909 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 1910 struct ahc_devinfo devinfo; 1911 struct hardware_scb *pending_hscb; 1912 struct ahc_initiator_tinfo *tinfo; 1913 struct ahc_tmode_tstate *tstate; 1914 1915 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 1916 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 1917 devinfo.our_scsiid, 1918 devinfo.target, &tstate); 1919 pending_hscb = pending_scb->hscb; 1920 pending_hscb->control &= ~ULTRAENB; 1921 if ((tstate->ultraenb & devinfo.target_mask) != 0) 1922 pending_hscb->control |= ULTRAENB; 1923 pending_hscb->scsirate = tinfo->scsirate; 1924 pending_hscb->scsioffset = tinfo->curr.offset; 1925 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 1926 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 1927 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 1928 pending_hscb->control &= ~MK_MESSAGE; 1929 } 1930 pending_scb_count++; 1931 } 1932 1933 if (pending_scb_count == 0) 1934 return; 1935 1936 if (ahc_is_paused(ahc)) { 1937 paused = 1; 1938 } else { 1939 paused = 0; 1940 ahc_pause(ahc); 1941 } 1942 1943 saved_scbptr = ahc_inb(ahc, SCBPTR); 1944 /* Ensure that the hscbs down on the card match the new information */ 1945 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 1946 struct hardware_scb *pending_hscb; 1947 u_int control; 1948 u_int scb_tag; 1949 1950 ahc_outb(ahc, SCBPTR, i); 1951 scb_tag = ahc_inb(ahc, SCB_TAG); 1952 pending_scb = ahc_lookup_scb(ahc, scb_tag); 1953 if (pending_scb == NULL) 1954 continue; 1955 1956 pending_hscb = pending_scb->hscb; 1957 control = ahc_inb(ahc, SCB_CONTROL); 1958 control &= ~(ULTRAENB|MK_MESSAGE); 1959 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 1960 ahc_outb(ahc, SCB_CONTROL, control); 1961 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 1962 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 1963 } 1964 ahc_outb(ahc, SCBPTR, saved_scbptr); 1965 1966 if (paused == 0) 1967 ahc_unpause(ahc); 1968 } 1969 1970 /**************************** Pathing Information *****************************/ 1971 static void 1972 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1973 { 1974 u_int saved_scsiid; 1975 role_t role; 1976 int our_id; 1977 1978 if (ahc_inb(ahc, SSTAT0) & TARGET) 1979 role = ROLE_TARGET; 1980 else 1981 role = ROLE_INITIATOR; 1982 1983 if (role == ROLE_TARGET 1984 && (ahc->features & AHC_MULTI_TID) != 0 1985 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 1986 /* We were selected, so pull our id from TARGIDIN */ 1987 our_id = ahc_inb(ahc, TARGIDIN) & OID; 1988 } else if ((ahc->features & AHC_ULTRA2) != 0) 1989 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 1990 else 1991 our_id = ahc_inb(ahc, SCSIID) & OID; 1992 1993 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1994 ahc_compile_devinfo(devinfo, 1995 our_id, 1996 SCSIID_TARGET(ahc, saved_scsiid), 1997 ahc_inb(ahc, SAVED_LUN), 1998 SCSIID_CHANNEL(ahc, saved_scsiid), 1999 role); 2000 } 2001 2002 struct ahc_phase_table_entry* 2003 ahc_lookup_phase_entry(int phase) 2004 { 2005 struct ahc_phase_table_entry *entry; 2006 struct ahc_phase_table_entry *last_entry; 2007 2008 /* 2009 * num_phases doesn't include the default entry which 2010 * will be returned if the phase doesn't match. 2011 */ 2012 last_entry = &ahc_phase_table[num_phases]; 2013 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2014 if (phase == entry->phase) 2015 break; 2016 } 2017 return (entry); 2018 } 2019 2020 void 2021 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2022 u_int lun, char channel, role_t role) 2023 { 2024 devinfo->our_scsiid = our_id; 2025 devinfo->target = target; 2026 devinfo->lun = lun; 2027 devinfo->target_offset = target; 2028 devinfo->channel = channel; 2029 devinfo->role = role; 2030 if (channel == 'B') 2031 devinfo->target_offset += 8; 2032 devinfo->target_mask = (0x01 << devinfo->target_offset); 2033 } 2034 2035 static void 2036 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2037 struct scb *scb) 2038 { 2039 role_t role; 2040 int our_id; 2041 2042 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2043 role = ROLE_INITIATOR; 2044 if ((scb->hscb->control & TARGET_SCB) != 0) 2045 role = ROLE_TARGET; 2046 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2047 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2048 } 2049 2050 2051 /************************ Message Phase Processing ****************************/ 2052 static void 2053 ahc_assert_atn(struct ahc_softc *ahc) 2054 { 2055 u_int scsisigo; 2056 2057 scsisigo = ATNO; 2058 if ((ahc->features & AHC_DT) == 0) 2059 scsisigo |= ahc_inb(ahc, SCSISIGI); 2060 ahc_outb(ahc, SCSISIGO, scsisigo); 2061 } 2062 2063 /* 2064 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2065 * or enters the initial message out phase, we are interrupted. Fill our 2066 * outgoing message buffer with the appropriate message and beging handing 2067 * the message phase(s) manually. 2068 */ 2069 static void 2070 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2071 struct scb *scb) 2072 { 2073 /* 2074 * To facilitate adding multiple messages together, 2075 * each routine should increment the index and len 2076 * variables instead of setting them explicitly. 2077 */ 2078 ahc->msgout_index = 0; 2079 ahc->msgout_len = 0; 2080 2081 if ((scb->flags & SCB_DEVICE_RESET) == 0 2082 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2083 u_int identify_msg; 2084 2085 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2086 if ((scb->hscb->control & DISCENB) != 0) 2087 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2088 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2089 ahc->msgout_len++; 2090 2091 if ((scb->hscb->control & TAG_ENB) != 0) { 2092 ahc->msgout_buf[ahc->msgout_index++] = 2093 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2094 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2095 ahc->msgout_len += 2; 2096 } 2097 } 2098 2099 if (scb->flags & SCB_DEVICE_RESET) { 2100 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2101 ahc->msgout_len++; 2102 ahc_print_path(ahc, scb); 2103 printf("Bus Device Reset Message Sent\n"); 2104 /* 2105 * Clear our selection hardware in advance of 2106 * the busfree. We may have an entry in the waiting 2107 * Q for this target, and we don't want to go about 2108 * selecting while we handle the busfree and blow it 2109 * away. 2110 */ 2111 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2112 } else if ((scb->flags & SCB_ABORT) != 0) { 2113 if ((scb->hscb->control & TAG_ENB) != 0) 2114 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2115 else 2116 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2117 ahc->msgout_len++; 2118 ahc_print_path(ahc, scb); 2119 printf("Abort%s Message Sent\n", 2120 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2121 /* 2122 * Clear our selection hardware in advance of 2123 * the busfree. We may have an entry in the waiting 2124 * Q for this target, and we don't want to go about 2125 * selecting while we handle the busfree and blow it 2126 * away. 2127 */ 2128 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2129 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2130 ahc_build_transfer_msg(ahc, devinfo); 2131 } else { 2132 printf("ahc_intr: AWAITING_MSG for an SCB that " 2133 "does not have a waiting message\n"); 2134 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2135 devinfo->target_mask); 2136 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2137 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2138 ahc_inb(ahc, MSG_OUT), scb->flags); 2139 } 2140 2141 /* 2142 * Clear the MK_MESSAGE flag from the SCB so we aren't 2143 * asked to send this message again. 2144 */ 2145 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2146 scb->hscb->control &= ~MK_MESSAGE; 2147 ahc->msgout_index = 0; 2148 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2149 } 2150 2151 /* 2152 * Build an appropriate transfer negotiation message for the 2153 * currently active target. 2154 */ 2155 static void 2156 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2157 { 2158 /* 2159 * We need to initiate transfer negotiations. 2160 * If our current and goal settings are identical, 2161 * we want to renegotiate due to a check condition. 2162 */ 2163 struct ahc_initiator_tinfo *tinfo; 2164 struct ahc_tmode_tstate *tstate; 2165 struct ahc_syncrate *rate; 2166 int dowide; 2167 int dosync; 2168 int doppr; 2169 int use_ppr; 2170 u_int period; 2171 u_int ppr_options; 2172 u_int offset; 2173 2174 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2175 devinfo->target, &tstate); 2176 /* 2177 * Filter our period based on the current connection. 2178 * If we can't perform DT transfers on this segment (not in LVD 2179 * mode for instance), then our decision to issue a PPR message 2180 * may change. 2181 */ 2182 period = tinfo->goal.period; 2183 ppr_options = tinfo->goal.ppr_options; 2184 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2185 &ppr_options, devinfo->role); 2186 dowide = tinfo->curr.width != tinfo->goal.width; 2187 dosync = tinfo->curr.period != period; 2188 doppr = tinfo->curr.ppr_options != ppr_options; 2189 2190 if (!dowide && !dosync && !doppr) { 2191 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2192 dosync = tinfo->goal.period != 0; 2193 doppr = tinfo->goal.ppr_options != 0; 2194 } 2195 2196 if (!dowide && !dosync && !doppr) { 2197 panic("ahc_intr: AWAITING_MSG for negotiation, " 2198 "but no negotiation needed\n"); 2199 } 2200 2201 use_ppr = (tinfo->curr.transport_version >= 3) || doppr; 2202 /* Target initiated PPR is not allowed in the SCSI spec */ 2203 if (devinfo->role == ROLE_TARGET) 2204 use_ppr = 0; 2205 2206 /* 2207 * Both the PPR message and SDTR message require the 2208 * goal syncrate to be limited to what the target device 2209 * is capable of handling (based on whether an LVD->SE 2210 * expander is on the bus), so combine these two cases. 2211 * Regardless, guarantee that if we are using WDTR and SDTR 2212 * messages that WDTR comes first. 2213 */ 2214 if (use_ppr || (dosync && !dowide)) { 2215 2216 offset = tinfo->goal.offset; 2217 ahc_validate_offset(ahc, tinfo, rate, &offset, 2218 use_ppr ? tinfo->goal.width 2219 : tinfo->curr.width, 2220 devinfo->role); 2221 if (use_ppr) { 2222 ahc_construct_ppr(ahc, devinfo, period, offset, 2223 tinfo->goal.width, ppr_options); 2224 } else { 2225 ahc_construct_sdtr(ahc, devinfo, period, offset); 2226 } 2227 } else { 2228 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2229 } 2230 } 2231 2232 /* 2233 * Build a synchronous negotiation message in our message 2234 * buffer based on the input parameters. 2235 */ 2236 static void 2237 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2238 u_int period, u_int offset) 2239 { 2240 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2241 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2242 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2243 ahc->msgout_buf[ahc->msgout_index++] = period; 2244 ahc->msgout_buf[ahc->msgout_index++] = offset; 2245 ahc->msgout_len += 5; 2246 if (bootverbose) { 2247 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2248 ahc_name(ahc), devinfo->channel, devinfo->target, 2249 devinfo->lun, period, offset); 2250 } 2251 } 2252 2253 /* 2254 * Build a wide negotiateion message in our message 2255 * buffer based on the input parameters. 2256 */ 2257 static void 2258 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2259 u_int bus_width) 2260 { 2261 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2262 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2263 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2264 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2265 ahc->msgout_len += 4; 2266 if (bootverbose) { 2267 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2268 ahc_name(ahc), devinfo->channel, devinfo->target, 2269 devinfo->lun, bus_width); 2270 } 2271 } 2272 2273 /* 2274 * Build a parallel protocol request message in our message 2275 * buffer based on the input parameters. 2276 */ 2277 static void 2278 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2279 u_int period, u_int offset, u_int bus_width, 2280 u_int ppr_options) 2281 { 2282 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2283 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2284 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2285 ahc->msgout_buf[ahc->msgout_index++] = period; 2286 ahc->msgout_buf[ahc->msgout_index++] = 0; 2287 ahc->msgout_buf[ahc->msgout_index++] = offset; 2288 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2289 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2290 ahc->msgout_len += 8; 2291 if (bootverbose) { 2292 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2293 "offset %x, ppr_options %x\n", ahc_name(ahc), 2294 devinfo->channel, devinfo->target, devinfo->lun, 2295 bus_width, period, offset, ppr_options); 2296 } 2297 } 2298 2299 /* 2300 * Clear any active message state. 2301 */ 2302 static void 2303 ahc_clear_msg_state(struct ahc_softc *ahc) 2304 { 2305 ahc->msgout_len = 0; 2306 ahc->msgin_index = 0; 2307 ahc->msg_type = MSG_TYPE_NONE; 2308 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2309 /* 2310 * The target didn't care to respond to our 2311 * message request, so clear ATN. 2312 */ 2313 ahc_outb(ahc, CLRSINT1, CLRATNO); 2314 } 2315 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2316 } 2317 2318 /* 2319 * Manual message loop handler. 2320 */ 2321 static void 2322 ahc_handle_message_phase(struct ahc_softc *ahc) 2323 { 2324 struct ahc_devinfo devinfo; 2325 u_int bus_phase; 2326 int end_session; 2327 2328 ahc_fetch_devinfo(ahc, &devinfo); 2329 end_session = FALSE; 2330 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2331 2332 reswitch: 2333 switch (ahc->msg_type) { 2334 case MSG_TYPE_INITIATOR_MSGOUT: 2335 { 2336 int lastbyte; 2337 int phasemis; 2338 int msgdone; 2339 2340 if (ahc->msgout_len == 0) 2341 panic("HOST_MSG_LOOP interrupt with no active message"); 2342 2343 phasemis = bus_phase != P_MESGOUT; 2344 if (phasemis) { 2345 if (bus_phase == P_MESGIN) { 2346 /* 2347 * Change gears and see if 2348 * this messages is of interest to 2349 * us or should be passed back to 2350 * the sequencer. 2351 */ 2352 ahc_outb(ahc, CLRSINT1, CLRATNO); 2353 ahc->send_msg_perror = FALSE; 2354 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2355 ahc->msgin_index = 0; 2356 goto reswitch; 2357 } 2358 end_session = TRUE; 2359 break; 2360 } 2361 2362 if (ahc->send_msg_perror) { 2363 ahc_outb(ahc, CLRSINT1, CLRATNO); 2364 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2365 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2366 break; 2367 } 2368 2369 msgdone = ahc->msgout_index == ahc->msgout_len; 2370 if (msgdone) { 2371 /* 2372 * The target has requested a retry. 2373 * Re-assert ATN, reset our message index to 2374 * 0, and try again. 2375 */ 2376 ahc->msgout_index = 0; 2377 ahc_assert_atn(ahc); 2378 } 2379 2380 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2381 if (lastbyte) { 2382 /* Last byte is signified by dropping ATN */ 2383 ahc_outb(ahc, CLRSINT1, CLRATNO); 2384 } 2385 2386 /* 2387 * Clear our interrupt status and present 2388 * the next byte on the bus. 2389 */ 2390 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2391 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2392 break; 2393 } 2394 case MSG_TYPE_INITIATOR_MSGIN: 2395 { 2396 int phasemis; 2397 int message_done; 2398 2399 phasemis = bus_phase != P_MESGIN; 2400 2401 if (phasemis) { 2402 ahc->msgin_index = 0; 2403 if (bus_phase == P_MESGOUT 2404 && (ahc->send_msg_perror == TRUE 2405 || (ahc->msgout_len != 0 2406 && ahc->msgout_index == 0))) { 2407 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2408 goto reswitch; 2409 } 2410 end_session = TRUE; 2411 break; 2412 } 2413 2414 /* Pull the byte in without acking it */ 2415 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2416 2417 message_done = ahc_parse_msg(ahc, &devinfo); 2418 2419 if (message_done) { 2420 /* 2421 * Clear our incoming message buffer in case there 2422 * is another message following this one. 2423 */ 2424 ahc->msgin_index = 0; 2425 2426 /* 2427 * If this message illicited a response, 2428 * assert ATN so the target takes us to the 2429 * message out phase. 2430 */ 2431 if (ahc->msgout_len != 0) 2432 ahc_assert_atn(ahc); 2433 } else 2434 ahc->msgin_index++; 2435 2436 /* Ack the byte */ 2437 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2438 ahc_inb(ahc, SCSIDATL); 2439 break; 2440 } 2441 case MSG_TYPE_TARGET_MSGIN: 2442 { 2443 int msgdone; 2444 int msgout_request; 2445 2446 if (ahc->msgout_len == 0) 2447 panic("Target MSGIN with no active message"); 2448 2449 /* 2450 * If we interrupted a mesgout session, the initiator 2451 * will not know this until our first REQ. So, we 2452 * only honor mesgout requests after we've sent our 2453 * first byte. 2454 */ 2455 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2456 && ahc->msgout_index > 0) 2457 msgout_request = TRUE; 2458 else 2459 msgout_request = FALSE; 2460 2461 if (msgout_request) { 2462 2463 /* 2464 * Change gears and see if 2465 * this messages is of interest to 2466 * us or should be passed back to 2467 * the sequencer. 2468 */ 2469 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2470 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2471 ahc->msgin_index = 0; 2472 /* Dummy read to REQ for first byte */ 2473 ahc_inb(ahc, SCSIDATL); 2474 ahc_outb(ahc, SXFRCTL0, 2475 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2476 break; 2477 } 2478 2479 msgdone = ahc->msgout_index == ahc->msgout_len; 2480 if (msgdone) { 2481 ahc_outb(ahc, SXFRCTL0, 2482 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2483 end_session = TRUE; 2484 break; 2485 } 2486 2487 /* 2488 * Present the next byte on the bus. 2489 */ 2490 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2491 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2492 break; 2493 } 2494 case MSG_TYPE_TARGET_MSGOUT: 2495 { 2496 int lastbyte; 2497 int msgdone; 2498 2499 /* 2500 * The initiator signals that this is 2501 * the last byte by dropping ATN. 2502 */ 2503 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2504 2505 /* 2506 * Read the latched byte, but turn off SPIOEN first 2507 * so that we don't inadvertantly cause a REQ for the 2508 * next byte. 2509 */ 2510 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2511 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2512 msgdone = ahc_parse_msg(ahc, &devinfo); 2513 if (msgdone == MSGLOOP_TERMINATED) { 2514 /* 2515 * The message is *really* done in that it caused 2516 * us to go to bus free. The sequencer has already 2517 * been reset at this point, so pull the ejection 2518 * handle. 2519 */ 2520 return; 2521 } 2522 2523 ahc->msgin_index++; 2524 2525 /* 2526 * XXX Read spec about initiator dropping ATN too soon 2527 * and use msgdone to detect it. 2528 */ 2529 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2530 ahc->msgin_index = 0; 2531 2532 /* 2533 * If this message illicited a response, transition 2534 * to the Message in phase and send it. 2535 */ 2536 if (ahc->msgout_len != 0) { 2537 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2538 ahc_outb(ahc, SXFRCTL0, 2539 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2540 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2541 ahc->msgin_index = 0; 2542 break; 2543 } 2544 } 2545 2546 if (lastbyte) 2547 end_session = TRUE; 2548 else { 2549 /* Ask for the next byte. */ 2550 ahc_outb(ahc, SXFRCTL0, 2551 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2552 } 2553 2554 break; 2555 } 2556 default: 2557 panic("Unknown REQINIT message type"); 2558 } 2559 2560 if (end_session) { 2561 ahc_clear_msg_state(ahc); 2562 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2563 } else 2564 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2565 } 2566 2567 /* 2568 * See if we sent a particular extended message to the target. 2569 * If "full" is true, return true only if the target saw the full 2570 * message. If "full" is false, return true if the target saw at 2571 * least the first byte of the message. 2572 */ 2573 static int 2574 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2575 { 2576 int found; 2577 u_int index; 2578 2579 found = FALSE; 2580 index = 0; 2581 2582 while (index < ahc->msgout_len) { 2583 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2584 u_int end_index; 2585 2586 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2587 if (ahc->msgout_buf[index+2] == msgval 2588 && type == AHCMSG_EXT) { 2589 2590 if (full) { 2591 if (ahc->msgout_index > end_index) 2592 found = TRUE; 2593 } else if (ahc->msgout_index > index) 2594 found = TRUE; 2595 } 2596 index = end_index; 2597 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2598 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2599 2600 /* Skip tag type and tag id or residue param*/ 2601 index += 2; 2602 } else { 2603 /* Single byte message */ 2604 if (type == AHCMSG_1B 2605 && ahc->msgout_buf[index] == msgval 2606 && ahc->msgout_index > index) 2607 found = TRUE; 2608 index++; 2609 } 2610 2611 if (found) 2612 break; 2613 } 2614 return (found); 2615 } 2616 2617 /* 2618 * Wait for a complete incoming message, parse it, and respond accordingly. 2619 */ 2620 static int 2621 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2622 { 2623 struct ahc_initiator_tinfo *tinfo; 2624 struct ahc_tmode_tstate *tstate; 2625 int reject; 2626 int done; 2627 int response; 2628 u_int targ_scsirate; 2629 2630 done = MSGLOOP_IN_PROG; 2631 response = FALSE; 2632 reject = FALSE; 2633 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2634 devinfo->target, &tstate); 2635 targ_scsirate = tinfo->scsirate; 2636 2637 /* 2638 * Parse as much of the message as is availible, 2639 * rejecting it if we don't support it. When 2640 * the entire message is availible and has been 2641 * handled, return MSGLOOP_MSGCOMPLETE, indicating 2642 * that we have parsed an entire message. 2643 * 2644 * In the case of extended messages, we accept the length 2645 * byte outright and perform more checking once we know the 2646 * extended message type. 2647 */ 2648 switch (ahc->msgin_buf[0]) { 2649 case MSG_MESSAGE_REJECT: 2650 response = ahc_handle_msg_reject(ahc, devinfo); 2651 /* FALLTHROUGH */ 2652 case MSG_NOOP: 2653 done = MSGLOOP_MSGCOMPLETE; 2654 break; 2655 case MSG_EXTENDED: 2656 { 2657 /* Wait for enough of the message to begin validation */ 2658 if (ahc->msgin_index < 2) 2659 break; 2660 switch (ahc->msgin_buf[2]) { 2661 case MSG_EXT_SDTR: 2662 { 2663 struct ahc_syncrate *syncrate; 2664 u_int period; 2665 u_int ppr_options; 2666 u_int offset; 2667 u_int saved_offset; 2668 2669 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 2670 reject = TRUE; 2671 break; 2672 } 2673 2674 /* 2675 * Wait until we have both args before validating 2676 * and acting on this message. 2677 * 2678 * Add one to MSG_EXT_SDTR_LEN to account for 2679 * the extended message preamble. 2680 */ 2681 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 2682 break; 2683 2684 period = ahc->msgin_buf[3]; 2685 ppr_options = 0; 2686 saved_offset = offset = ahc->msgin_buf[4]; 2687 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2688 &ppr_options, 2689 devinfo->role); 2690 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 2691 targ_scsirate & WIDEXFER, 2692 devinfo->role); 2693 if (bootverbose) { 2694 printf("(%s:%c:%d:%d): Received " 2695 "SDTR period %x, offset %x\n\t" 2696 "Filtered to period %x, offset %x\n", 2697 ahc_name(ahc), devinfo->channel, 2698 devinfo->target, devinfo->lun, 2699 ahc->msgin_buf[3], saved_offset, 2700 period, offset); 2701 } 2702 ahc_set_syncrate(ahc, devinfo, 2703 syncrate, period, 2704 offset, ppr_options, 2705 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2706 /*paused*/TRUE); 2707 2708 /* 2709 * See if we initiated Sync Negotiation 2710 * and didn't have to fall down to async 2711 * transfers. 2712 */ 2713 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 2714 /* We started it */ 2715 if (saved_offset != offset) { 2716 /* Went too low - force async */ 2717 reject = TRUE; 2718 } 2719 } else { 2720 /* 2721 * Send our own SDTR in reply 2722 */ 2723 if (bootverbose 2724 && devinfo->role == ROLE_INITIATOR) { 2725 printf("(%s:%c:%d:%d): Target " 2726 "Initiated SDTR\n", 2727 ahc_name(ahc), devinfo->channel, 2728 devinfo->target, devinfo->lun); 2729 } 2730 ahc->msgout_index = 0; 2731 ahc->msgout_len = 0; 2732 ahc_construct_sdtr(ahc, devinfo, 2733 period, offset); 2734 ahc->msgout_index = 0; 2735 response = TRUE; 2736 } 2737 done = MSGLOOP_MSGCOMPLETE; 2738 break; 2739 } 2740 case MSG_EXT_WDTR: 2741 { 2742 u_int bus_width; 2743 u_int saved_width; 2744 u_int sending_reply; 2745 2746 sending_reply = FALSE; 2747 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 2748 reject = TRUE; 2749 break; 2750 } 2751 2752 /* 2753 * Wait until we have our arg before validating 2754 * and acting on this message. 2755 * 2756 * Add one to MSG_EXT_WDTR_LEN to account for 2757 * the extended message preamble. 2758 */ 2759 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 2760 break; 2761 2762 bus_width = ahc->msgin_buf[3]; 2763 saved_width = bus_width; 2764 ahc_validate_width(ahc, tinfo, &bus_width, 2765 devinfo->role); 2766 if (bootverbose) { 2767 printf("(%s:%c:%d:%d): Received WDTR " 2768 "%x filtered to %x\n", 2769 ahc_name(ahc), devinfo->channel, 2770 devinfo->target, devinfo->lun, 2771 saved_width, bus_width); 2772 } 2773 2774 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 2775 /* 2776 * Don't send a WDTR back to the 2777 * target, since we asked first. 2778 * If the width went higher than our 2779 * request, reject it. 2780 */ 2781 if (saved_width > bus_width) { 2782 reject = TRUE; 2783 printf("(%s:%c:%d:%d): requested %dBit " 2784 "transfers. Rejecting...\n", 2785 ahc_name(ahc), devinfo->channel, 2786 devinfo->target, devinfo->lun, 2787 8 * (0x01 << bus_width)); 2788 bus_width = 0; 2789 } 2790 } else { 2791 /* 2792 * Send our own WDTR in reply 2793 */ 2794 if (bootverbose 2795 && devinfo->role == ROLE_INITIATOR) { 2796 printf("(%s:%c:%d:%d): Target " 2797 "Initiated WDTR\n", 2798 ahc_name(ahc), devinfo->channel, 2799 devinfo->target, devinfo->lun); 2800 } 2801 ahc->msgout_index = 0; 2802 ahc->msgout_len = 0; 2803 ahc_construct_wdtr(ahc, devinfo, bus_width); 2804 ahc->msgout_index = 0; 2805 response = TRUE; 2806 sending_reply = TRUE; 2807 } 2808 ahc_set_width(ahc, devinfo, bus_width, 2809 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2810 /*paused*/TRUE); 2811 /* After a wide message, we are async */ 2812 ahc_set_syncrate(ahc, devinfo, 2813 /*syncrate*/NULL, /*period*/0, 2814 /*offset*/0, /*ppr_options*/0, 2815 AHC_TRANS_ACTIVE, /*paused*/TRUE); 2816 if (sending_reply == FALSE && reject == FALSE) { 2817 2818 if (tinfo->goal.period) { 2819 ahc->msgout_index = 0; 2820 ahc->msgout_len = 0; 2821 ahc_build_transfer_msg(ahc, devinfo); 2822 ahc->msgout_index = 0; 2823 response = TRUE; 2824 } 2825 } 2826 done = MSGLOOP_MSGCOMPLETE; 2827 break; 2828 } 2829 case MSG_EXT_PPR: 2830 { 2831 struct ahc_syncrate *syncrate; 2832 u_int period; 2833 u_int offset; 2834 u_int bus_width; 2835 u_int ppr_options; 2836 u_int saved_width; 2837 u_int saved_offset; 2838 u_int saved_ppr_options; 2839 2840 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 2841 reject = TRUE; 2842 break; 2843 } 2844 2845 /* 2846 * Wait until we have all args before validating 2847 * and acting on this message. 2848 * 2849 * Add one to MSG_EXT_PPR_LEN to account for 2850 * the extended message preamble. 2851 */ 2852 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 2853 break; 2854 2855 period = ahc->msgin_buf[3]; 2856 offset = ahc->msgin_buf[5]; 2857 bus_width = ahc->msgin_buf[6]; 2858 saved_width = bus_width; 2859 ppr_options = ahc->msgin_buf[7]; 2860 /* 2861 * According to the spec, a DT only 2862 * period factor with no DT option 2863 * set implies async. 2864 */ 2865 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 2866 && period == 9) 2867 offset = 0; 2868 saved_ppr_options = ppr_options; 2869 saved_offset = offset; 2870 2871 /* 2872 * Mask out any options we don't support 2873 * on any controller. Transfer options are 2874 * only available if we are negotiating wide. 2875 */ 2876 ppr_options &= MSG_EXT_PPR_DT_REQ; 2877 if (bus_width == 0) 2878 ppr_options = 0; 2879 2880 ahc_validate_width(ahc, tinfo, &bus_width, 2881 devinfo->role); 2882 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2883 &ppr_options, 2884 devinfo->role); 2885 ahc_validate_offset(ahc, tinfo, syncrate, 2886 &offset, bus_width, 2887 devinfo->role); 2888 2889 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 2890 /* 2891 * If we are unable to do any of the 2892 * requested options (we went too low), 2893 * then we'll have to reject the message. 2894 */ 2895 if (saved_width > bus_width 2896 || saved_offset != offset 2897 || saved_ppr_options != ppr_options) { 2898 reject = TRUE; 2899 period = 0; 2900 offset = 0; 2901 bus_width = 0; 2902 ppr_options = 0; 2903 syncrate = NULL; 2904 } 2905 } else { 2906 if (devinfo->role != ROLE_TARGET) 2907 printf("(%s:%c:%d:%d): Target " 2908 "Initiated PPR\n", 2909 ahc_name(ahc), devinfo->channel, 2910 devinfo->target, devinfo->lun); 2911 else 2912 printf("(%s:%c:%d:%d): Initiator " 2913 "Initiated PPR\n", 2914 ahc_name(ahc), devinfo->channel, 2915 devinfo->target, devinfo->lun); 2916 ahc->msgout_index = 0; 2917 ahc->msgout_len = 0; 2918 ahc_construct_ppr(ahc, devinfo, period, offset, 2919 bus_width, ppr_options); 2920 ahc->msgout_index = 0; 2921 response = TRUE; 2922 } 2923 if (bootverbose) { 2924 printf("(%s:%c:%d:%d): Received PPR width %x, " 2925 "period %x, offset %x,options %x\n" 2926 "\tFiltered to width %x, period %x, " 2927 "offset %x, options %x\n", 2928 ahc_name(ahc), devinfo->channel, 2929 devinfo->target, devinfo->lun, 2930 saved_width, ahc->msgin_buf[3], 2931 saved_offset, saved_ppr_options, 2932 bus_width, period, offset, ppr_options); 2933 } 2934 ahc_set_width(ahc, devinfo, bus_width, 2935 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2936 /*paused*/TRUE); 2937 ahc_set_syncrate(ahc, devinfo, 2938 syncrate, period, 2939 offset, ppr_options, 2940 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2941 /*paused*/TRUE); 2942 done = MSGLOOP_MSGCOMPLETE; 2943 break; 2944 } 2945 default: 2946 /* Unknown extended message. Reject it. */ 2947 reject = TRUE; 2948 break; 2949 } 2950 break; 2951 } 2952 case MSG_BUS_DEV_RESET: 2953 ahc_handle_devreset(ahc, devinfo, 2954 CAM_BDR_SENT, 2955 "Bus Device Reset Received", 2956 /*verbose_level*/0); 2957 ahc_restart(ahc); 2958 done = MSGLOOP_TERMINATED; 2959 break; 2960 case MSG_ABORT_TAG: 2961 case MSG_ABORT: 2962 case MSG_CLEAR_QUEUE: 2963 #ifdef AHC_TARGET_MODE 2964 /* Target mode messages */ 2965 if (devinfo->role != ROLE_TARGET) { 2966 reject = TRUE; 2967 break; 2968 } 2969 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 2970 devinfo->lun, 2971 ahc->msgin_buf[0] == MSG_ABORT_TAG 2972 ? SCB_LIST_NULL 2973 : ahc_inb(ahc, INITIATOR_TAG), 2974 ROLE_TARGET, CAM_REQ_ABORTED); 2975 2976 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 2977 if (tstate != NULL) { 2978 struct ahc_tmode_lstate* lstate; 2979 2980 lstate = tstate->enabled_luns[devinfo->lun]; 2981 if (lstate != NULL) { 2982 ahc_queue_lstate_event(ahc, lstate, 2983 devinfo->our_scsiid, 2984 ahc->msgin_buf[0], 2985 /*arg*/0); 2986 ahc_send_lstate_events(ahc, lstate); 2987 } 2988 } 2989 done = MSGLOOP_MSGCOMPLETE; 2990 break; 2991 #endif 2992 case MSG_TERM_IO_PROC: 2993 default: 2994 reject = TRUE; 2995 break; 2996 } 2997 2998 if (reject) { 2999 /* 3000 * Setup to reject the message. 3001 */ 3002 ahc->msgout_index = 0; 3003 ahc->msgout_len = 1; 3004 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3005 done = MSGLOOP_MSGCOMPLETE; 3006 response = TRUE; 3007 } 3008 3009 if (done != MSGLOOP_IN_PROG && !response) 3010 /* Clear the outgoing message buffer */ 3011 ahc->msgout_len = 0; 3012 3013 return (done); 3014 } 3015 3016 /* 3017 * Process a message reject message. 3018 */ 3019 static int 3020 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3021 { 3022 /* 3023 * What we care about here is if we had an 3024 * outstanding SDTR or WDTR message for this 3025 * target. If we did, this is a signal that 3026 * the target is refusing negotiation. 3027 */ 3028 struct scb *scb; 3029 struct ahc_initiator_tinfo *tinfo; 3030 struct ahc_tmode_tstate *tstate; 3031 u_int scb_index; 3032 u_int last_msg; 3033 int response = 0; 3034 3035 scb_index = ahc_inb(ahc, SCB_TAG); 3036 scb = ahc_lookup_scb(ahc, scb_index); 3037 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3038 devinfo->our_scsiid, 3039 devinfo->target, &tstate); 3040 /* Might be necessary */ 3041 last_msg = ahc_inb(ahc, LAST_MSG); 3042 3043 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3044 /* 3045 * Target does not support the PPR message. 3046 * Attempt to negotiate SPI-2 style. 3047 */ 3048 if (bootverbose) { 3049 printf("(%s:%c:%d:%d): PPR Rejected. " 3050 "Trying WDTR/SDTR\n", 3051 ahc_name(ahc), devinfo->channel, 3052 devinfo->target, devinfo->lun); 3053 } 3054 tinfo->goal.ppr_options = 0; 3055 tinfo->curr.transport_version = 2; 3056 tinfo->goal.transport_version = 2; 3057 ahc->msgout_index = 0; 3058 ahc->msgout_len = 0; 3059 ahc_build_transfer_msg(ahc, devinfo); 3060 ahc->msgout_index = 0; 3061 response = 1; 3062 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3063 3064 /* note 8bit xfers */ 3065 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3066 "8bit transfers\n", ahc_name(ahc), 3067 devinfo->channel, devinfo->target, devinfo->lun); 3068 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3069 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3070 /*paused*/TRUE); 3071 /* 3072 * No need to clear the sync rate. If the target 3073 * did not accept the command, our syncrate is 3074 * unaffected. If the target started the negotiation, 3075 * but rejected our response, we already cleared the 3076 * sync rate before sending our WDTR. 3077 */ 3078 if (tinfo->goal.period) { 3079 3080 /* Start the sync negotiation */ 3081 ahc->msgout_index = 0; 3082 ahc->msgout_len = 0; 3083 ahc_build_transfer_msg(ahc, devinfo); 3084 ahc->msgout_index = 0; 3085 response = 1; 3086 } 3087 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3088 /* note asynch xfers and clear flag */ 3089 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3090 /*offset*/0, /*ppr_options*/0, 3091 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3092 /*paused*/TRUE); 3093 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 3094 "Using asynchronous transfers\n", 3095 ahc_name(ahc), devinfo->channel, 3096 devinfo->target, devinfo->lun); 3097 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3098 int tag_type; 3099 int mask; 3100 3101 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3102 3103 if (tag_type == MSG_SIMPLE_TASK) { 3104 printf("(%s:%c:%d:%d): refuses tagged commands. " 3105 "Performing non-tagged I/O\n", ahc_name(ahc), 3106 devinfo->channel, devinfo->target, devinfo->lun); 3107 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3108 mask = ~0x23; 3109 } else { 3110 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 3111 "Performing simple queue tagged I/O only\n", 3112 ahc_name(ahc), devinfo->channel, devinfo->target, 3113 devinfo->lun, tag_type == MSG_ORDERED_TASK 3114 ? "ordered" : "head of queue"); 3115 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3116 mask = ~0x03; 3117 } 3118 3119 /* 3120 * Resend the identify for this CCB as the target 3121 * may believe that the selection is invalid otherwise. 3122 */ 3123 ahc_outb(ahc, SCB_CONTROL, 3124 ahc_inb(ahc, SCB_CONTROL) & mask); 3125 scb->hscb->control &= mask; 3126 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3127 /*type*/MSG_SIMPLE_TASK); 3128 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3129 ahc_assert_atn(ahc); 3130 3131 /* 3132 * This transaction is now at the head of 3133 * the untagged queue for this target. 3134 */ 3135 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3136 struct scb_tailq *untagged_q; 3137 3138 untagged_q = 3139 &(ahc->untagged_queues[devinfo->target_offset]); 3140 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3141 scb->flags |= SCB_UNTAGGEDQ; 3142 } 3143 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3144 scb->hscb->tag); 3145 3146 /* 3147 * Requeue all tagged commands for this target 3148 * currently in our posession so they can be 3149 * converted to untagged commands. 3150 */ 3151 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3152 SCB_GET_CHANNEL(ahc, scb), 3153 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3154 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3155 SEARCH_COMPLETE); 3156 } else { 3157 /* 3158 * Otherwise, we ignore it. 3159 */ 3160 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3161 ahc_name(ahc), devinfo->channel, devinfo->target, 3162 last_msg); 3163 } 3164 return (response); 3165 } 3166 3167 /* 3168 * Process an ingnore wide residue message. 3169 */ 3170 static void 3171 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3172 { 3173 u_int scb_index; 3174 struct scb *scb; 3175 3176 scb_index = ahc_inb(ahc, SCB_TAG); 3177 scb = ahc_lookup_scb(ahc, scb_index); 3178 /* 3179 * XXX Actually check data direction in the sequencer? 3180 * Perhaps add datadir to some spare bits in the hscb? 3181 */ 3182 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3183 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3184 /* 3185 * Ignore the message if we haven't 3186 * seen an appropriate data phase yet. 3187 */ 3188 } else { 3189 /* 3190 * If the residual occurred on the last 3191 * transfer and the transfer request was 3192 * expected to end on an odd count, do 3193 * nothing. Otherwise, subtract a byte 3194 * and update the residual count accordingly. 3195 */ 3196 uint32_t sgptr; 3197 3198 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3199 if ((sgptr & SG_LIST_NULL) != 0 3200 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3201 /* 3202 * If the residual occurred on the last 3203 * transfer and the transfer request was 3204 * expected to end on an odd count, do 3205 * nothing. 3206 */ 3207 } else { 3208 struct ahc_dma_seg *sg; 3209 uint32_t data_cnt; 3210 uint32_t data_addr; 3211 3212 /* Pull in the rest of the sgptr */ 3213 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3214 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3215 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 3216 sgptr &= SG_PTR_MASK; 3217 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 3218 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 3219 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 3220 3221 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3222 | (ahc_inb(ahc, SHADDR + 2) << 16) 3223 | (ahc_inb(ahc, SHADDR + 1) << 8) 3224 | (ahc_inb(ahc, SHADDR)); 3225 3226 data_cnt += 1; 3227 data_addr -= 1; 3228 3229 sg = ahc_sg_bus_to_virt(scb, sgptr); 3230 /* 3231 * The residual sg ptr points to the next S/G 3232 * to load so we must go back one. 3233 */ 3234 sg--; 3235 if (sg != scb->sg_list 3236 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) { 3237 3238 sg--; 3239 data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG); 3240 data_addr = sg->addr 3241 + (sg->len & AHC_SG_LEN_MASK) - 1; 3242 3243 /* 3244 * Increment sg so it points to the 3245 * "next" sg. 3246 */ 3247 sg++; 3248 sgptr = ahc_sg_virt_to_bus(scb, sg); 3249 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 3250 sgptr >> 24); 3251 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 3252 sgptr >> 16); 3253 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 3254 sgptr >> 8); 3255 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3256 } 3257 3258 /* XXX What about high address byte??? */ 3259 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 3260 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 3261 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 3262 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3263 3264 /* XXX Perhaps better to just keep the saved address in sram */ 3265 if ((ahc->features & AHC_ULTRA2) != 0) { 3266 ahc_outb(ahc, HADDR + 3, data_addr >> 24); 3267 ahc_outb(ahc, HADDR + 2, data_addr >> 16); 3268 ahc_outb(ahc, HADDR + 1, data_addr >> 8); 3269 ahc_outb(ahc, HADDR, data_addr); 3270 ahc_outb(ahc, DFCNTRL, PRELOADEN); 3271 ahc_outb(ahc, SXFRCTL0, 3272 ahc_inb(ahc, SXFRCTL0) | CLRCHN); 3273 } else { 3274 ahc_outb(ahc, HADDR + 3, data_addr >> 24); 3275 ahc_outb(ahc, HADDR + 2, data_addr >> 16); 3276 ahc_outb(ahc, HADDR + 1, data_addr >> 8); 3277 ahc_outb(ahc, HADDR, data_addr); 3278 } 3279 } 3280 } 3281 } 3282 3283 /* 3284 * Handle the effects of issuing a bus device reset message. 3285 */ 3286 static void 3287 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3288 cam_status status, char *message, int verbose_level) 3289 { 3290 #ifdef AHC_TARGET_MODE 3291 struct ahc_tmode_tstate* tstate; 3292 u_int lun; 3293 #endif 3294 int found; 3295 3296 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3297 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3298 status); 3299 3300 #ifdef AHC_TARGET_MODE 3301 /* 3302 * Send an immediate notify ccb to all target mord peripheral 3303 * drivers affected by this action. 3304 */ 3305 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3306 if (tstate != NULL) { 3307 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3308 struct ahc_tmode_lstate* lstate; 3309 3310 lstate = tstate->enabled_luns[lun]; 3311 if (lstate == NULL) 3312 continue; 3313 3314 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3315 MSG_BUS_DEV_RESET, /*arg*/0); 3316 ahc_send_lstate_events(ahc, lstate); 3317 } 3318 } 3319 #endif 3320 3321 /* 3322 * Go back to async/narrow transfers and renegotiate. 3323 */ 3324 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3325 AHC_TRANS_CUR, /*paused*/TRUE); 3326 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3327 /*period*/0, /*offset*/0, /*ppr_options*/0, 3328 AHC_TRANS_CUR, /*paused*/TRUE); 3329 3330 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3331 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3332 3333 if (message != NULL 3334 && (verbose_level <= bootverbose)) 3335 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3336 message, devinfo->channel, devinfo->target, found); 3337 } 3338 3339 #ifdef AHC_TARGET_MODE 3340 static void 3341 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3342 struct scb *scb) 3343 { 3344 3345 /* 3346 * To facilitate adding multiple messages together, 3347 * each routine should increment the index and len 3348 * variables instead of setting them explicitly. 3349 */ 3350 ahc->msgout_index = 0; 3351 ahc->msgout_len = 0; 3352 3353 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3354 ahc_build_transfer_msg(ahc, devinfo); 3355 else 3356 panic("ahc_intr: AWAITING target message with no message"); 3357 3358 ahc->msgout_index = 0; 3359 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3360 } 3361 #endif 3362 /**************************** Initialization **********************************/ 3363 /* 3364 * Allocate a controller structure for a new device 3365 * and perform initial initializion. 3366 */ 3367 struct ahc_softc * 3368 ahc_alloc(void *platform_arg, char *name) 3369 { 3370 struct ahc_softc *ahc; 3371 int i; 3372 3373 #ifndef __FreeBSD__ 3374 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3375 if (!ahc) { 3376 printf("aic7xxx: cannot malloc softc!\n"); 3377 free(name, M_DEVBUF); 3378 return NULL; 3379 } 3380 #else 3381 ahc = device_get_softc((device_t)platform_arg); 3382 #endif 3383 memset(ahc, 0, sizeof(*ahc)); 3384 LIST_INIT(&ahc->pending_scbs); 3385 /* We don't know our unit number until the OSM sets it */ 3386 ahc->name = name; 3387 ahc->unit = -1; 3388 for (i = 0; i < 16; i++) 3389 TAILQ_INIT(&ahc->untagged_queues[i]); 3390 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3391 ahc_free(ahc); 3392 ahc = NULL; 3393 } 3394 return (ahc); 3395 } 3396 3397 int 3398 ahc_softc_init(struct ahc_softc *ahc, struct ahc_probe_config *config) 3399 { 3400 3401 ahc->chip = config->chip; 3402 ahc->features = config->features; 3403 ahc->bugs = config->bugs; 3404 ahc->flags = config->flags; 3405 ahc->channel = config->channel; 3406 ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS); 3407 ahc->description = config->description; 3408 /* The IRQMS bit is only valid on VL and EISA chips */ 3409 if ((ahc->chip & AHC_PCI) != 0) 3410 ahc->unpause &= ~IRQMS; 3411 ahc->pause = ahc->unpause | PAUSE; 3412 /* XXX The shared scb data stuff should be deprecated */ 3413 if (ahc->scb_data == NULL) { 3414 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3415 M_DEVBUF, M_NOWAIT); 3416 if (ahc->scb_data == NULL) 3417 return (ENOMEM); 3418 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3419 } 3420 3421 return (0); 3422 } 3423 3424 void 3425 ahc_softc_insert(struct ahc_softc *ahc) 3426 { 3427 struct ahc_softc *list_ahc; 3428 3429 #if AHC_PCI_CONFIG > 0 3430 /* 3431 * Second Function PCI devices need to inherit some 3432 * settings from function 0. 3433 */ 3434 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3435 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3436 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3437 ahc_dev_softc_t list_pci; 3438 ahc_dev_softc_t pci; 3439 3440 list_pci = list_ahc->dev_softc; 3441 pci = ahc->dev_softc; 3442 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3443 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3444 struct ahc_softc *master; 3445 struct ahc_softc *slave; 3446 3447 if (ahc_get_pci_function(list_pci) == 0) { 3448 master = list_ahc; 3449 slave = ahc; 3450 } else { 3451 master = ahc; 3452 slave = list_ahc; 3453 } 3454 slave->flags &= ~AHC_BIOS_ENABLED; 3455 slave->flags |= 3456 master->flags & AHC_BIOS_ENABLED; 3457 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3458 slave->flags |= 3459 master->flags & AHC_PRIMARY_CHANNEL; 3460 break; 3461 } 3462 } 3463 } 3464 #endif 3465 3466 /* 3467 * Insertion sort into our list of softcs. 3468 */ 3469 list_ahc = TAILQ_FIRST(&ahc_tailq); 3470 while (list_ahc != NULL 3471 && ahc_softc_comp(list_ahc, ahc) <= 0) 3472 list_ahc = TAILQ_NEXT(list_ahc, links); 3473 if (list_ahc != NULL) 3474 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3475 else 3476 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3477 ahc->init_level++; 3478 } 3479 3480 void 3481 ahc_set_unit(struct ahc_softc *ahc, int unit) 3482 { 3483 ahc->unit = unit; 3484 } 3485 3486 void 3487 ahc_set_name(struct ahc_softc *ahc, char *name) 3488 { 3489 if (ahc->name != NULL) 3490 free(ahc->name, M_DEVBUF); 3491 ahc->name = name; 3492 } 3493 3494 void 3495 ahc_free(struct ahc_softc *ahc) 3496 { 3497 int i; 3498 3499 ahc_fini_scbdata(ahc); 3500 switch (ahc->init_level) { 3501 default: 3502 case 5: 3503 ahc_shutdown(ahc); 3504 TAILQ_REMOVE(&ahc_tailq, ahc, links); 3505 /* FALLTHROUGH */ 3506 case 4: 3507 ahc_dmamap_unload(ahc, ahc->shared_data_dmat, 3508 ahc->shared_data_dmamap); 3509 /* FALLTHROUGH */ 3510 case 3: 3511 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3512 ahc->shared_data_dmamap); 3513 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, 3514 ahc->shared_data_dmamap); 3515 /* FALLTHROUGH */ 3516 case 2: 3517 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); 3518 case 1: 3519 #ifndef __linux__ 3520 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); 3521 #endif 3522 break; 3523 case 0: 3524 break; 3525 } 3526 3527 #ifndef __linux__ 3528 ahc_dma_tag_destroy(ahc, ahc->parent_dmat); 3529 #endif 3530 ahc_platform_free(ahc); 3531 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3532 struct ahc_tmode_tstate *tstate; 3533 3534 tstate = ahc->enabled_targets[i]; 3535 if (tstate != NULL) { 3536 #if AHC_TARGET_MODE 3537 int j; 3538 3539 for (j = 0; j < AHC_NUM_LUNS; j++) { 3540 struct ahc_tmode_lstate *lstate; 3541 3542 lstate = tstate->enabled_luns[j]; 3543 if (lstate != NULL) { 3544 xpt_free_path(lstate->path); 3545 free(lstate, M_DEVBUF); 3546 } 3547 } 3548 #endif 3549 free(tstate, M_DEVBUF); 3550 } 3551 } 3552 #if AHC_TARGET_MODE 3553 if (ahc->black_hole != NULL) { 3554 xpt_free_path(ahc->black_hole->path); 3555 free(ahc->black_hole, M_DEVBUF); 3556 } 3557 #endif 3558 if (ahc->name != NULL) 3559 free(ahc->name, M_DEVBUF); 3560 #ifndef __FreeBSD__ 3561 free(ahc, M_DEVBUF); 3562 #endif 3563 return; 3564 } 3565 3566 void 3567 ahc_shutdown(void *arg) 3568 { 3569 struct ahc_softc *ahc; 3570 int i; 3571 3572 ahc = (struct ahc_softc *)arg; 3573 3574 /* This will reset most registers to 0, but not all */ 3575 ahc_reset(ahc); 3576 ahc_outb(ahc, SCSISEQ, 0); 3577 ahc_outb(ahc, SXFRCTL0, 0); 3578 ahc_outb(ahc, DSPCISTATUS, 0); 3579 3580 for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++) 3581 ahc_outb(ahc, i, 0); 3582 } 3583 3584 /* 3585 * Reset the controller and record some information about it 3586 * that is only availabel just after a reset. 3587 */ 3588 int 3589 ahc_reset(struct ahc_softc *ahc) 3590 { 3591 u_int sblkctl; 3592 u_int sxfrctl1_a, sxfrctl1_b; 3593 int wait; 3594 3595 /* 3596 * Preserve the value of the SXFRCTL1 register for all channels. 3597 * It contains settings that affect termination and we don't want 3598 * to disturb the integrity of the bus. 3599 */ 3600 ahc_pause(ahc); 3601 sxfrctl1_b = 0; 3602 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 3603 u_int sblkctl; 3604 3605 /* 3606 * Save channel B's settings in case this chip 3607 * is setup for TWIN channel operation. 3608 */ 3609 sblkctl = ahc_inb(ahc, SBLKCTL); 3610 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3611 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 3612 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3613 } 3614 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 3615 3616 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 3617 3618 /* 3619 * Ensure that the reset has finished 3620 */ 3621 wait = 1000; 3622 do { 3623 ahc_delay(1000); 3624 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 3625 3626 if (wait == 0) { 3627 printf("%s: WARNING - Failed chip reset! " 3628 "Trying to initialize anyway.\n", ahc_name(ahc)); 3629 } 3630 ahc_outb(ahc, HCNTRL, ahc->pause); 3631 3632 /* Determine channel configuration */ 3633 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 3634 /* No Twin Channel PCI cards */ 3635 if ((ahc->chip & AHC_PCI) != 0) 3636 sblkctl &= ~SELBUSB; 3637 switch (sblkctl) { 3638 case 0: 3639 /* Single Narrow Channel */ 3640 break; 3641 case 2: 3642 /* Wide Channel */ 3643 ahc->features |= AHC_WIDE; 3644 break; 3645 case 8: 3646 /* Twin Channel */ 3647 ahc->features |= AHC_TWIN; 3648 break; 3649 default: 3650 printf(" Unsupported adapter type. Ignoring\n"); 3651 return(-1); 3652 } 3653 3654 /* 3655 * Reload sxfrctl1. 3656 * 3657 * We must always initialize STPWEN to 1 before we 3658 * restore the saved values. STPWEN is initialized 3659 * to a tri-state condition which can only be cleared 3660 * by turning it on. 3661 */ 3662 if ((ahc->features & AHC_TWIN) != 0) { 3663 u_int sblkctl; 3664 3665 sblkctl = ahc_inb(ahc, SBLKCTL); 3666 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3667 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 3668 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3669 } 3670 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 3671 3672 #ifdef AHC_DUMP_SEQ 3673 if (ahc->init_level == 0) 3674 ahc_dumpseq(ahc); 3675 #endif 3676 3677 return (0); 3678 } 3679 3680 /* 3681 * Determine the number of SCBs available on the controller 3682 */ 3683 int 3684 ahc_probe_scbs(struct ahc_softc *ahc) { 3685 int i; 3686 3687 for (i = 0; i < AHC_SCB_MAX; i++) { 3688 3689 ahc_outb(ahc, SCBPTR, i); 3690 ahc_outb(ahc, SCB_BASE, i); 3691 if (ahc_inb(ahc, SCB_BASE) != i) 3692 break; 3693 ahc_outb(ahc, SCBPTR, 0); 3694 if (ahc_inb(ahc, SCB_BASE) != 0) 3695 break; 3696 } 3697 return (i); 3698 } 3699 3700 void 3701 ahc_init_probe_config(struct ahc_probe_config *probe_config) 3702 { 3703 probe_config->description = NULL; 3704 probe_config->channel = 'A'; 3705 probe_config->channel_b = 'B'; 3706 probe_config->chip = AHC_NONE; 3707 probe_config->features = AHC_FENONE; 3708 probe_config->bugs = AHC_BUGNONE; 3709 probe_config->flags = AHC_FNONE; 3710 } 3711 3712 static void 3713 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3714 { 3715 bus_addr_t *baddr; 3716 3717 baddr = (bus_addr_t *)arg; 3718 *baddr = segs->ds_addr; 3719 } 3720 3721 static void 3722 ahc_build_free_scb_list(struct ahc_softc *ahc) 3723 { 3724 int i; 3725 3726 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 3727 ahc_outb(ahc, SCBPTR, i); 3728 3729 /* Clear the control byte. */ 3730 ahc_outb(ahc, SCB_CONTROL, 0); 3731 3732 /* Set the next pointer */ 3733 if ((ahc->flags & AHC_PAGESCBS) != 0) 3734 ahc_outb(ahc, SCB_NEXT, i+1); 3735 else 3736 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 3737 3738 /* Make the tag number invalid */ 3739 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 3740 } 3741 3742 /* Make sure that the last SCB terminates the free list */ 3743 ahc_outb(ahc, SCBPTR, i-1); 3744 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 3745 3746 /* Ensure we clear the 0 SCB's control byte. */ 3747 ahc_outb(ahc, SCBPTR, 0); 3748 ahc_outb(ahc, SCB_CONTROL, 0); 3749 } 3750 3751 static int 3752 ahc_init_scbdata(struct ahc_softc *ahc) 3753 { 3754 struct scb_data *scb_data; 3755 3756 scb_data = ahc->scb_data; 3757 SLIST_INIT(&scb_data->free_scbs); 3758 SLIST_INIT(&scb_data->sg_maps); 3759 3760 /* Allocate SCB resources */ 3761 scb_data->scbarray = 3762 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX, 3763 M_DEVBUF, M_NOWAIT); 3764 if (scb_data->scbarray == NULL) 3765 return (ENOMEM); 3766 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX); 3767 3768 /* Determine the number of hardware SCBs and initialize them */ 3769 3770 scb_data->maxhscbs = ahc_probe_scbs(ahc); 3771 if ((ahc->flags & AHC_PAGESCBS) != 0) { 3772 /* SCB 0 heads the free list */ 3773 ahc_outb(ahc, FREE_SCBH, 0); 3774 } else { 3775 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 3776 } 3777 3778 if (ahc->scb_data->maxhscbs == 0) { 3779 printf("%s: No SCB space found\n", ahc_name(ahc)); 3780 return (ENXIO); 3781 } 3782 3783 ahc_build_free_scb_list(ahc); 3784 3785 /* 3786 * Create our DMA tags. These tags define the kinds of device 3787 * accessible memory allocations and memory mappings we will 3788 * need to perform during normal operation. 3789 * 3790 * Unless we need to further restrict the allocation, we rely 3791 * on the restrictions of the parent dmat, hence the common 3792 * use of MAXADDR and MAXSIZE. 3793 */ 3794 3795 /* DMA tag for our hardware scb structures */ 3796 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3797 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3798 /*highaddr*/BUS_SPACE_MAXADDR, 3799 /*filter*/NULL, /*filterarg*/NULL, 3800 AHC_SCB_MAX * sizeof(struct hardware_scb), 3801 /*nsegments*/1, 3802 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3803 /*flags*/0, &scb_data->hscb_dmat) != 0) { 3804 goto error_exit; 3805 } 3806 3807 scb_data->init_level++; 3808 3809 /* Allocation for our ccbs */ 3810 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, 3811 (void **)&scb_data->hscbs, 3812 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 3813 goto error_exit; 3814 } 3815 3816 scb_data->init_level++; 3817 3818 /* And permanently map them */ 3819 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 3820 scb_data->hscbs, 3821 AHC_SCB_MAX * sizeof(struct hardware_scb), 3822 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 3823 3824 scb_data->init_level++; 3825 3826 /* DMA tag for our sense buffers */ 3827 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3828 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3829 /*highaddr*/BUS_SPACE_MAXADDR, 3830 /*filter*/NULL, /*filterarg*/NULL, 3831 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 3832 /*nsegments*/1, 3833 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3834 /*flags*/0, &scb_data->sense_dmat) != 0) { 3835 goto error_exit; 3836 } 3837 3838 scb_data->init_level++; 3839 3840 /* Allocate them */ 3841 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, 3842 (void **)&scb_data->sense, 3843 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 3844 goto error_exit; 3845 } 3846 3847 scb_data->init_level++; 3848 3849 /* And permanently map them */ 3850 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 3851 scb_data->sense, 3852 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 3853 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 3854 3855 scb_data->init_level++; 3856 3857 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 3858 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3859 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3860 /*highaddr*/BUS_SPACE_MAXADDR, 3861 /*filter*/NULL, /*filterarg*/NULL, 3862 PAGE_SIZE, /*nsegments*/1, 3863 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3864 /*flags*/0, &scb_data->sg_dmat) != 0) { 3865 goto error_exit; 3866 } 3867 3868 scb_data->init_level++; 3869 3870 /* Perform initial CCB allocation */ 3871 memset(scb_data->hscbs, 0, AHC_SCB_MAX * sizeof(struct hardware_scb)); 3872 ahc_alloc_scbs(ahc); 3873 3874 if (scb_data->numscbs == 0) { 3875 printf("%s: ahc_init_scbdata - " 3876 "Unable to allocate initial scbs\n", 3877 ahc_name(ahc)); 3878 goto error_exit; 3879 } 3880 3881 /* 3882 * Tell the sequencer which SCB will be the next one it receives. 3883 */ 3884 ahc->next_queued_scb = ahc_get_scb(ahc); 3885 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 3886 3887 /* 3888 * Note that we were successfull 3889 */ 3890 return (0); 3891 3892 error_exit: 3893 3894 return (ENOMEM); 3895 } 3896 3897 static void 3898 ahc_fini_scbdata(struct ahc_softc *ahc) 3899 { 3900 struct scb_data *scb_data; 3901 3902 scb_data = ahc->scb_data; 3903 if (scb_data == NULL) 3904 return; 3905 3906 switch (scb_data->init_level) { 3907 default: 3908 case 7: 3909 { 3910 struct sg_map_node *sg_map; 3911 3912 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 3913 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 3914 ahc_dmamap_unload(ahc, scb_data->sg_dmat, 3915 sg_map->sg_dmamap); 3916 ahc_dmamem_free(ahc, scb_data->sg_dmat, 3917 sg_map->sg_vaddr, 3918 sg_map->sg_dmamap); 3919 free(sg_map, M_DEVBUF); 3920 } 3921 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 3922 } 3923 case 6: 3924 ahc_dmamap_unload(ahc, scb_data->sense_dmat, 3925 scb_data->sense_dmamap); 3926 case 5: 3927 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 3928 scb_data->sense_dmamap); 3929 ahc_dmamap_destroy(ahc, scb_data->sense_dmat, 3930 scb_data->sense_dmamap); 3931 case 4: 3932 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); 3933 case 3: 3934 ahc_dmamap_unload(ahc, scb_data->hscb_dmat, 3935 scb_data->hscb_dmamap); 3936 case 2: 3937 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 3938 scb_data->hscb_dmamap); 3939 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, 3940 scb_data->hscb_dmamap); 3941 case 1: 3942 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); 3943 break; 3944 case 0: 3945 break; 3946 } 3947 if (scb_data->scbarray != NULL) 3948 free(scb_data->scbarray, M_DEVBUF); 3949 } 3950 3951 void 3952 ahc_alloc_scbs(struct ahc_softc *ahc) 3953 { 3954 struct scb_data *scb_data; 3955 struct scb *next_scb; 3956 struct sg_map_node *sg_map; 3957 bus_addr_t physaddr; 3958 struct ahc_dma_seg *segs; 3959 int newcount; 3960 int i; 3961 3962 scb_data = ahc->scb_data; 3963 if (scb_data->numscbs >= AHC_SCB_MAX) 3964 /* Can't allocate any more */ 3965 return; 3966 3967 next_scb = &scb_data->scbarray[scb_data->numscbs]; 3968 3969 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 3970 3971 if (sg_map == NULL) 3972 return; 3973 3974 /* Allocate S/G space for the next batch of SCBS */ 3975 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 3976 (void **)&sg_map->sg_vaddr, 3977 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 3978 free(sg_map, M_DEVBUF); 3979 return; 3980 } 3981 3982 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 3983 3984 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 3985 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 3986 &sg_map->sg_physaddr, /*flags*/0); 3987 3988 segs = sg_map->sg_vaddr; 3989 physaddr = sg_map->sg_physaddr; 3990 3991 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 3992 for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) { 3993 struct scb_platform_data *pdata; 3994 #ifndef __linux__ 3995 int error; 3996 #endif 3997 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 3998 M_DEVBUF, M_NOWAIT); 3999 if (pdata == NULL) 4000 break; 4001 next_scb->platform_data = pdata; 4002 next_scb->sg_list = segs; 4003 /* 4004 * The sequencer always starts with the second entry. 4005 * The first entry is embedded in the scb. 4006 */ 4007 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4008 next_scb->ahc_softc = ahc; 4009 next_scb->flags = SCB_FREE; 4010 #ifndef __linux__ 4011 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 4012 &next_scb->dmamap); 4013 if (error != 0) 4014 break; 4015 #endif 4016 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4017 next_scb->hscb->tag = ahc->scb_data->numscbs; 4018 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4019 next_scb, links.sle); 4020 segs += AHC_NSEG; 4021 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4022 next_scb++; 4023 ahc->scb_data->numscbs++; 4024 } 4025 } 4026 4027 void 4028 ahc_controller_info(struct ahc_softc *ahc, char *buf) 4029 { 4030 int len; 4031 4032 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4033 buf += len; 4034 if ((ahc->features & AHC_TWIN) != 0) 4035 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 4036 "B SCSI Id=%d, primary %c, ", 4037 ahc->our_id, ahc->our_id_b, 4038 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4039 else { 4040 const char *speed; 4041 const char *type; 4042 4043 speed = ""; 4044 if ((ahc->features & AHC_ULTRA) != 0) { 4045 speed = "Ultra "; 4046 } else if ((ahc->features & AHC_DT) != 0) { 4047 speed = "Ultra160 "; 4048 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4049 speed = "Ultra2 "; 4050 } 4051 if ((ahc->features & AHC_WIDE) != 0) { 4052 type = "Wide"; 4053 } else { 4054 type = "Single"; 4055 } 4056 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", 4057 speed, type, ahc->channel, ahc->our_id); 4058 } 4059 buf += len; 4060 4061 if ((ahc->flags & AHC_PAGESCBS) != 0) 4062 sprintf(buf, "%d/%d SCBs", 4063 ahc->scb_data->maxhscbs, AHC_SCB_MAX); 4064 else 4065 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 4066 } 4067 4068 /* 4069 * Start the board, ready for normal operation 4070 */ 4071 int 4072 ahc_init(struct ahc_softc *ahc) 4073 { 4074 int max_targ; 4075 int i; 4076 int term; 4077 u_int scsi_conf; 4078 u_int scsiseq_template; 4079 u_int ultraenb; 4080 u_int discenable; 4081 u_int tagenable; 4082 size_t driver_data_size; 4083 uint32_t physaddr; 4084 4085 #ifdef AHC_DEBUG_SEQUENCER 4086 ahc->flags |= AHC_SEQUENCER_DEBUG; 4087 #endif 4088 4089 #ifdef AHC_PRINT_SRAM 4090 printf("Scratch Ram:"); 4091 for (i = 0x20; i < 0x5f; i++) { 4092 if (((i % 8) == 0) && (i != 0)) { 4093 printf ("\n "); 4094 } 4095 printf (" 0x%x", ahc_inb(ahc, i)); 4096 } 4097 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4098 for (i = 0x70; i < 0x7f; i++) { 4099 if (((i % 8) == 0) && (i != 0)) { 4100 printf ("\n "); 4101 } 4102 printf (" 0x%x", ahc_inb(ahc, i)); 4103 } 4104 } 4105 printf ("\n"); 4106 #endif 4107 max_targ = 15; 4108 4109 /* 4110 * Assume we have a board at this stage and it has been reset. 4111 */ 4112 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4113 ahc->our_id = ahc->our_id_b = 7; 4114 4115 /* 4116 * Default to allowing initiator operations. 4117 */ 4118 ahc->flags |= AHC_INITIATORROLE; 4119 4120 /* 4121 * Only allow target mode features if this unit has them enabled. 4122 */ 4123 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4124 ahc->features &= ~AHC_TARGETMODE; 4125 4126 #ifndef __linux__ 4127 /* DMA tag for mapping buffers into device visible space. */ 4128 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4129 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 4130 /*highaddr*/BUS_SPACE_MAXADDR, 4131 /*filter*/NULL, /*filterarg*/NULL, 4132 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 4133 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4134 /*flags*/BUS_DMA_ALLOCNOW, 4135 &ahc->buffer_dmat) != 0) { 4136 return (ENOMEM); 4137 } 4138 #endif 4139 4140 ahc->init_level++; 4141 4142 /* 4143 * DMA tag for our command fifos and other data in system memory 4144 * the card's sequencer must be able to access. For initiator 4145 * roles, we need to allocate space for the the qinfifo and qoutfifo. 4146 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4147 * When providing for the target mode role, we must additionally 4148 * provide space for the incoming target command fifo and an extra 4149 * byte to deal with a dma bug in some chip versions. 4150 */ 4151 driver_data_size = 2 * 256 * sizeof(uint8_t); 4152 if ((ahc->features & AHC_TARGETMODE) != 0) 4153 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4154 + /*DMA WideOdd Bug Buffer*/1; 4155 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4156 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 4157 /*highaddr*/BUS_SPACE_MAXADDR, 4158 /*filter*/NULL, /*filterarg*/NULL, 4159 driver_data_size, 4160 /*nsegments*/1, 4161 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4162 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4163 return (ENOMEM); 4164 } 4165 4166 ahc->init_level++; 4167 4168 /* Allocation of driver data */ 4169 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, 4170 (void **)&ahc->qoutfifo, 4171 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4172 return (ENOMEM); 4173 } 4174 4175 ahc->init_level++; 4176 4177 /* And permanently map it in */ 4178 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 4179 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 4180 &ahc->shared_data_busaddr, /*flags*/0); 4181 4182 if ((ahc->features & AHC_TARGETMODE) != 0) { 4183 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4184 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4185 ahc->dma_bug_buf = ahc->shared_data_busaddr 4186 + driver_data_size - 1; 4187 /* All target command blocks start out invalid. */ 4188 for (i = 0; i < AHC_TMODE_CMDS; i++) 4189 ahc->targetcmds[i].cmd_valid = 0; 4190 ahc->tqinfifonext = 1; 4191 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4192 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4193 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4194 } 4195 ahc->qinfifo = &ahc->qoutfifo[256]; 4196 4197 ahc->init_level++; 4198 4199 /* Allocate SCB data now that buffer_dmat is initialized */ 4200 if (ahc->scb_data->maxhscbs == 0) 4201 if (ahc_init_scbdata(ahc) != 0) 4202 return (ENOMEM); 4203 4204 /* 4205 * Allocate a tstate to house information for our 4206 * initiator presence on the bus as well as the user 4207 * data for any target mode initiator. 4208 */ 4209 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4210 printf("%s: unable to allocate ahc_tmode_tstate. " 4211 "Failing attach\n", ahc_name(ahc)); 4212 return (ENOMEM); 4213 } 4214 4215 if ((ahc->features & AHC_TWIN) != 0) { 4216 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4217 printf("%s: unable to allocate ahc_tmode_tstate. " 4218 "Failing attach\n", ahc_name(ahc)); 4219 return (ENOMEM); 4220 } 4221 } 4222 4223 ahc_outb(ahc, SEQ_FLAGS, 0); 4224 ahc_outb(ahc, SEQ_FLAGS2, 0); 4225 4226 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) { 4227 ahc->flags |= AHC_PAGESCBS; 4228 } else { 4229 ahc->flags &= ~AHC_PAGESCBS; 4230 } 4231 4232 #ifdef AHC_DEBUG 4233 if (ahc_debug & AHC_SHOWMISC) { 4234 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 4235 "ahc_dma %d bytes\n", 4236 ahc_name(ahc), 4237 sizeof(struct hardware_scb), 4238 sizeof(struct scb), 4239 sizeof(struct ahc_dma_seg)); 4240 } 4241 #endif /* AHC_DEBUG */ 4242 4243 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4244 if (ahc->features & AHC_TWIN) { 4245 4246 /* 4247 * The device is gated to channel B after a chip reset, 4248 * so set those values first 4249 */ 4250 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4251 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4252 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4253 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4254 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4255 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4256 if ((ahc->features & AHC_ULTRA2) != 0) 4257 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4258 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4259 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4260 4261 if ((scsi_conf & RESET_SCSI) != 0 4262 && (ahc->flags & AHC_INITIATORROLE) != 0) 4263 ahc->flags |= AHC_RESET_BUS_B; 4264 4265 /* Select Channel A */ 4266 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4267 } 4268 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4269 if ((ahc->features & AHC_ULTRA2) != 0) 4270 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4271 else 4272 ahc_outb(ahc, SCSIID, ahc->our_id); 4273 scsi_conf = ahc_inb(ahc, SCSICONF); 4274 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4275 |term|ahc->seltime 4276 |ENSTIMER|ACTNEGEN); 4277 if ((ahc->features & AHC_ULTRA2) != 0) 4278 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4279 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4280 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4281 4282 if ((scsi_conf & RESET_SCSI) != 0 4283 && (ahc->flags & AHC_INITIATORROLE) != 0) 4284 ahc->flags |= AHC_RESET_BUS_A; 4285 4286 /* 4287 * Look at the information that board initialization or 4288 * the board bios has left us. 4289 */ 4290 ultraenb = 0; 4291 tagenable = ALL_TARGETS_MASK; 4292 4293 /* Grab the disconnection disable table and invert it for our needs */ 4294 if (ahc->flags & AHC_USEDEFAULTS) { 4295 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4296 "device parameters\n", ahc_name(ahc)); 4297 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4298 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4299 discenable = ALL_TARGETS_MASK; 4300 if ((ahc->features & AHC_ULTRA) != 0) 4301 ultraenb = ALL_TARGETS_MASK; 4302 } else { 4303 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4304 | ahc_inb(ahc, DISC_DSB)); 4305 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4306 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4307 | ahc_inb(ahc, ULTRA_ENB); 4308 } 4309 4310 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4311 max_targ = 7; 4312 4313 for (i = 0; i <= max_targ; i++) { 4314 struct ahc_initiator_tinfo *tinfo; 4315 struct ahc_tmode_tstate *tstate; 4316 u_int our_id; 4317 u_int target_id; 4318 char channel; 4319 4320 channel = 'A'; 4321 our_id = ahc->our_id; 4322 target_id = i; 4323 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4324 channel = 'B'; 4325 our_id = ahc->our_id_b; 4326 target_id = i % 8; 4327 } 4328 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4329 target_id, &tstate); 4330 /* Default to async narrow across the board */ 4331 memset(tinfo, 0, sizeof(*tinfo)); 4332 if (ahc->flags & AHC_USEDEFAULTS) { 4333 if ((ahc->features & AHC_WIDE) != 0) 4334 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4335 4336 /* 4337 * These will be truncated when we determine the 4338 * connection type we have with the target. 4339 */ 4340 tinfo->user.period = ahc_syncrates->period; 4341 tinfo->user.offset = ~0; 4342 } else { 4343 u_int scsirate; 4344 uint16_t mask; 4345 4346 /* Take the settings leftover in scratch RAM. */ 4347 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4348 mask = (0x01 << i); 4349 if ((ahc->features & AHC_ULTRA2) != 0) { 4350 u_int offset; 4351 u_int maxsync; 4352 4353 if ((scsirate & SOFS) == 0x0F) { 4354 /* 4355 * Haven't negotiated yet, 4356 * so the format is different. 4357 */ 4358 scsirate = (scsirate & SXFR) >> 4 4359 | (ultraenb & mask) 4360 ? 0x08 : 0x0 4361 | (scsirate & WIDEXFER); 4362 offset = MAX_OFFSET_ULTRA2; 4363 } else 4364 offset = ahc_inb(ahc, TARG_OFFSET + i); 4365 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4366 /* Set to the lowest sync rate, 5MHz */ 4367 scsirate |= 0x1c; 4368 maxsync = AHC_SYNCRATE_ULTRA2; 4369 if ((ahc->features & AHC_DT) != 0) 4370 maxsync = AHC_SYNCRATE_DT; 4371 tinfo->user.period = 4372 ahc_find_period(ahc, scsirate, maxsync); 4373 if (offset == 0) 4374 tinfo->user.period = 0; 4375 else 4376 tinfo->user.offset = ~0; 4377 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4378 && (ahc->features & AHC_DT) != 0) 4379 tinfo->user.ppr_options = 4380 MSG_EXT_PPR_DT_REQ; 4381 } else if ((scsirate & SOFS) != 0) { 4382 if ((scsirate & SXFR) == 0x40 4383 && (ultraenb & mask) != 0) { 4384 /* Treat 10MHz as a non-ultra speed */ 4385 scsirate &= ~SXFR; 4386 ultraenb &= ~mask; 4387 } 4388 tinfo->user.period = 4389 ahc_find_period(ahc, scsirate, 4390 (ultraenb & mask) 4391 ? AHC_SYNCRATE_ULTRA 4392 : AHC_SYNCRATE_FAST); 4393 if (tinfo->user.period != 0) 4394 tinfo->user.offset = ~0; 4395 } 4396 if (tinfo->user.period == 0) 4397 tinfo->user.offset = 0; 4398 if ((scsirate & WIDEXFER) != 0 4399 && (ahc->features & AHC_WIDE) != 0) 4400 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4401 tinfo->user.protocol_version = 4; 4402 if ((ahc->features & AHC_DT) != 0) 4403 tinfo->user.transport_version = 3; 4404 else 4405 tinfo->user.transport_version = 2; 4406 tinfo->goal.protocol_version = 2; 4407 tinfo->goal.transport_version = 2; 4408 tinfo->curr.protocol_version = 2; 4409 tinfo->curr.transport_version = 2; 4410 } 4411 tstate->ultraenb = ultraenb; 4412 } 4413 ahc->user_discenable = discenable; 4414 ahc->user_tagenable = tagenable; 4415 4416 /* There are no untagged SCBs active yet. */ 4417 for (i = 0; i < 16; i++) { 4418 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4419 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4420 int lun; 4421 4422 /* 4423 * The SCB based BTT allows an entry per 4424 * target and lun pair. 4425 */ 4426 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4427 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4428 } 4429 } 4430 4431 /* All of our queues are empty */ 4432 for (i = 0; i < 256; i++) 4433 ahc->qoutfifo[i] = SCB_LIST_NULL; 4434 4435 for (i = 0; i < 256; i++) 4436 ahc->qinfifo[i] = SCB_LIST_NULL; 4437 4438 if ((ahc->features & AHC_MULTI_TID) != 0) { 4439 ahc_outb(ahc, TARGID, 0); 4440 ahc_outb(ahc, TARGID + 1, 0); 4441 } 4442 4443 /* 4444 * Tell the sequencer where it can find our arrays in memory. 4445 */ 4446 physaddr = ahc->scb_data->hscb_busaddr; 4447 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4448 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4449 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4450 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4451 4452 physaddr = ahc->shared_data_busaddr; 4453 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4454 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4455 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4456 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4457 4458 /* 4459 * Initialize the group code to command length table. 4460 * This overrides the values in TARG_SCSIRATE, so only 4461 * setup the table after we have processed that information. 4462 */ 4463 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4464 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4465 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4466 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4467 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4468 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4469 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4470 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4471 4472 /* Tell the sequencer of our initial queue positions */ 4473 ahc_outb(ahc, KERNEL_QINPOS, 0); 4474 ahc_outb(ahc, QINPOS, 0); 4475 ahc_outb(ahc, QOUTPOS, 0); 4476 4477 /* 4478 * Use the built in queue management registers 4479 * if they are available. 4480 */ 4481 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4482 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4483 ahc_outb(ahc, SDSCB_QOFF, 0); 4484 ahc_outb(ahc, SNSCB_QOFF, 0); 4485 ahc_outb(ahc, HNSCB_QOFF, 0); 4486 } 4487 4488 4489 /* We don't have any waiting selections */ 4490 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4491 4492 /* Our disconnection list is empty too */ 4493 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4494 4495 /* Message out buffer starts empty */ 4496 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4497 4498 /* 4499 * Setup the allowed SCSI Sequences based on operational mode. 4500 * If we are a target, we'll enalbe select in operations once 4501 * we've had a lun enabled. 4502 */ 4503 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4504 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4505 scsiseq_template |= ENRSELI; 4506 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4507 4508 /* 4509 * Load the Sequencer program and Enable the adapter 4510 * in "fast" mode. 4511 */ 4512 if (bootverbose) 4513 printf("%s: Downloading Sequencer Program...", 4514 ahc_name(ahc)); 4515 4516 ahc_loadseq(ahc); 4517 4518 if ((ahc->features & AHC_ULTRA2) != 0) { 4519 int wait; 4520 4521 /* 4522 * Wait for up to 500ms for our transceivers 4523 * to settle. If the adapter does not have 4524 * a cable attached, the tranceivers may 4525 * never settle, so don't complain if we 4526 * fail here. 4527 */ 4528 ahc_pause(ahc); 4529 for (wait = 5000; 4530 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4531 wait--) 4532 ahc_delay(100); 4533 ahc_unpause(ahc); 4534 } 4535 return (0); 4536 } 4537 4538 void 4539 ahc_intr_enable(struct ahc_softc *ahc, int enable) 4540 { 4541 u_int hcntrl; 4542 4543 hcntrl = ahc_inb(ahc, HCNTRL); 4544 hcntrl &= ~INTEN; 4545 ahc->pause &= ~INTEN; 4546 ahc->unpause &= ~INTEN; 4547 if (enable) { 4548 hcntrl |= INTEN; 4549 ahc->pause |= INTEN; 4550 ahc->unpause |= INTEN; 4551 } 4552 ahc_outb(ahc, HCNTRL, hcntrl); 4553 } 4554 4555 /* 4556 * Ensure that the card is paused in a location 4557 * outside of all critical sections and that all 4558 * pending work is completed prior to returning. 4559 * This routine should only be called from outside 4560 * an interrupt context. 4561 */ 4562 void 4563 ahc_pause_and_flushwork(struct ahc_softc *ahc) 4564 { 4565 int intstat; 4566 int maxloops; 4567 4568 maxloops = 1000; 4569 ahc->flags |= AHC_ALL_INTERRUPTS; 4570 intstat = 0; 4571 do { 4572 ahc_intr(ahc); 4573 ahc_pause(ahc); 4574 ahc_clear_critical_section(ahc); 4575 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 4576 break; 4577 maxloops--; 4578 } while (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) && --maxloops); 4579 if (maxloops == 0) { 4580 printf("Infinite interrupt loop, INTSTAT = %x", 4581 ahc_inb(ahc, INTSTAT)); 4582 } 4583 ahc_platform_flushwork(ahc); 4584 ahc->flags &= ~AHC_ALL_INTERRUPTS; 4585 } 4586 4587 int 4588 ahc_suspend(struct ahc_softc *ahc) 4589 { 4590 uint8_t *ptr; 4591 int i; 4592 4593 ahc_pause_and_flushwork(ahc); 4594 4595 if (LIST_FIRST(&ahc->pending_scbs) != NULL) 4596 return (EBUSY); 4597 4598 #if AHC_TARGET_MODE 4599 /* 4600 * XXX What about ATIOs that have not yet been serviced? 4601 * Perhaps we should just refuse to be suspended if we 4602 * are acting in a target role. 4603 */ 4604 if (ahc->pending_device != NULL) 4605 return (EBUSY); 4606 #endif 4607 4608 /* Save volatile registers */ 4609 if ((ahc->features & AHC_TWIN) != 0) { 4610 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4611 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ); 4612 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4613 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4614 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0); 4615 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1); 4616 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER); 4617 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL); 4618 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4619 } 4620 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ); 4621 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4622 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4623 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0); 4624 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1); 4625 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER); 4626 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL); 4627 4628 if ((ahc->chip & AHC_PCI) != 0) { 4629 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0); 4630 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS); 4631 } 4632 4633 if ((ahc->features & AHC_DT) != 0) { 4634 u_int sfunct; 4635 4636 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4637 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4638 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE); 4639 ahc_outb(ahc, SFUNCT, sfunct); 4640 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); 4641 } 4642 4643 if ((ahc->features & AHC_MULTI_FUNC) != 0) 4644 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR); 4645 4646 if ((ahc->features & AHC_ULTRA2) != 0) 4647 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH); 4648 4649 ptr = ahc->suspend_state.scratch_ram; 4650 for (i = 0; i < 64; i++) 4651 *ptr++ = ahc_inb(ahc, SRAM_BASE + i); 4652 4653 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4654 for (i = 0; i < 16; i++) 4655 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i); 4656 } 4657 4658 ptr = ahc->suspend_state.btt; 4659 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4660 for (i = 0;i < AHC_NUM_TARGETS; i++) { 4661 int j; 4662 4663 for (j = 0;j < AHC_NUM_LUNS; j++) { 4664 u_int tcl; 4665 4666 tcl = BUILD_TCL(i << 4, j); 4667 *ptr = ahc_index_busy_tcl(ahc, tcl); 4668 } 4669 } 4670 } 4671 ahc_shutdown(ahc); 4672 return (0); 4673 } 4674 4675 int 4676 ahc_resume(struct ahc_softc *ahc) 4677 { 4678 uint8_t *ptr; 4679 int i; 4680 4681 ahc_reset(ahc); 4682 4683 ahc_build_free_scb_list(ahc); 4684 4685 /* Restore volatile registers */ 4686 if ((ahc->features & AHC_TWIN) != 0) { 4687 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4688 ahc_outb(ahc, SCSIID, ahc->our_id); 4689 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq); 4690 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0); 4691 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1); 4692 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0); 4693 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1); 4694 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer); 4695 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl); 4696 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4697 } 4698 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq); 4699 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0); 4700 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1); 4701 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0); 4702 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1); 4703 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer); 4704 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl); 4705 if ((ahc->features & AHC_ULTRA2) != 0) 4706 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4707 else 4708 ahc_outb(ahc, SCSIID, ahc->our_id); 4709 4710 if ((ahc->chip & AHC_PCI) != 0) { 4711 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0); 4712 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus); 4713 } 4714 4715 if ((ahc->features & AHC_DT) != 0) { 4716 u_int sfunct; 4717 4718 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4719 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4720 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode); 4721 ahc_outb(ahc, SFUNCT, sfunct); 4722 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1); 4723 } 4724 4725 if ((ahc->features & AHC_MULTI_FUNC) != 0) 4726 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr); 4727 4728 if ((ahc->features & AHC_ULTRA2) != 0) 4729 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh); 4730 4731 ptr = ahc->suspend_state.scratch_ram; 4732 for (i = 0; i < 64; i++) 4733 ahc_outb(ahc, SRAM_BASE + i, *ptr++); 4734 4735 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4736 for (i = 0; i < 16; i++) 4737 ahc_outb(ahc, TARG_OFFSET + i, *ptr++); 4738 } 4739 4740 ptr = ahc->suspend_state.btt; 4741 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4742 for (i = 0;i < AHC_NUM_TARGETS; i++) { 4743 int j; 4744 4745 for (j = 0;j < AHC_NUM_LUNS; j++) { 4746 u_int tcl; 4747 4748 tcl = BUILD_TCL(i << 4, j); 4749 ahc_busy_tcl(ahc, tcl, *ptr); 4750 } 4751 } 4752 } 4753 return (0); 4754 } 4755 4756 /************************** Busy Target Table *********************************/ 4757 /* 4758 * Return the untagged transaction id for a given target/channel lun. 4759 * Optionally, clear the entry. 4760 */ 4761 u_int 4762 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 4763 { 4764 u_int scbid; 4765 u_int target_offset; 4766 4767 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4768 u_int saved_scbptr; 4769 4770 saved_scbptr = ahc_inb(ahc, SCBPTR); 4771 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4772 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 4773 ahc_outb(ahc, SCBPTR, saved_scbptr); 4774 } else { 4775 target_offset = TCL_TARGET_OFFSET(tcl); 4776 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 4777 } 4778 4779 return (scbid); 4780 } 4781 4782 void 4783 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 4784 { 4785 u_int target_offset; 4786 4787 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4788 u_int saved_scbptr; 4789 4790 saved_scbptr = ahc_inb(ahc, SCBPTR); 4791 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4792 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 4793 ahc_outb(ahc, SCBPTR, saved_scbptr); 4794 } else { 4795 target_offset = TCL_TARGET_OFFSET(tcl); 4796 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 4797 } 4798 } 4799 4800 void 4801 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 4802 { 4803 u_int target_offset; 4804 4805 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4806 u_int saved_scbptr; 4807 4808 saved_scbptr = ahc_inb(ahc, SCBPTR); 4809 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4810 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 4811 ahc_outb(ahc, SCBPTR, saved_scbptr); 4812 } else { 4813 target_offset = TCL_TARGET_OFFSET(tcl); 4814 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 4815 } 4816 } 4817 4818 /************************** SCB and SCB queue management **********************/ 4819 int 4820 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 4821 char channel, int lun, u_int tag, role_t role) 4822 { 4823 int targ = SCB_GET_TARGET(ahc, scb); 4824 char chan = SCB_GET_CHANNEL(ahc, scb); 4825 int slun = SCB_GET_LUN(scb); 4826 int match; 4827 4828 match = ((chan == channel) || (channel == ALL_CHANNELS)); 4829 if (match != 0) 4830 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 4831 if (match != 0) 4832 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 4833 if (match != 0) { 4834 #if AHC_TARGET_MODE 4835 int group; 4836 4837 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 4838 if (role == ROLE_INITIATOR) { 4839 match = (group != XPT_FC_GROUP_TMODE) 4840 && ((tag == scb->hscb->tag) 4841 || (tag == SCB_LIST_NULL)); 4842 } else if (role == ROLE_TARGET) { 4843 match = (group == XPT_FC_GROUP_TMODE) 4844 && ((tag == scb->io_ctx->csio.tag_id) 4845 || (tag == SCB_LIST_NULL)); 4846 } 4847 #else /* !AHC_TARGET_MODE */ 4848 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 4849 #endif /* AHC_TARGET_MODE */ 4850 } 4851 4852 return match; 4853 } 4854 4855 void 4856 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 4857 { 4858 int target; 4859 char channel; 4860 int lun; 4861 4862 target = SCB_GET_TARGET(ahc, scb); 4863 lun = SCB_GET_LUN(scb); 4864 channel = SCB_GET_CHANNEL(ahc, scb); 4865 4866 ahc_search_qinfifo(ahc, target, channel, lun, 4867 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 4868 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 4869 4870 ahc_platform_freeze_devq(ahc, scb); 4871 } 4872 4873 void 4874 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 4875 { 4876 struct scb *prev_scb; 4877 4878 prev_scb = NULL; 4879 if (ahc_qinfifo_count(ahc) != 0) { 4880 u_int prev_tag; 4881 uint8_t prev_pos; 4882 4883 prev_pos = ahc->qinfifonext - 1; 4884 prev_tag = ahc->qinfifo[prev_pos]; 4885 prev_scb = ahc_lookup_scb(ahc, prev_tag); 4886 } 4887 ahc_qinfifo_requeue(ahc, prev_scb, scb); 4888 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4889 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 4890 } else { 4891 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 4892 } 4893 } 4894 4895 static void 4896 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 4897 struct scb *scb) 4898 { 4899 if (prev_scb == NULL) 4900 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 4901 else 4902 prev_scb->hscb->next = scb->hscb->tag; 4903 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 4904 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 4905 } 4906 4907 static int 4908 ahc_qinfifo_count(struct ahc_softc *ahc) 4909 { 4910 u_int8_t qinpos; 4911 u_int8_t diff; 4912 4913 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4914 qinpos = ahc_inb(ahc, SNSCB_QOFF); 4915 ahc_outb(ahc, SNSCB_QOFF, qinpos); 4916 } else 4917 qinpos = ahc_inb(ahc, QINPOS); 4918 diff = ahc->qinfifonext - qinpos; 4919 return (diff); 4920 } 4921 4922 int 4923 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 4924 int lun, u_int tag, role_t role, uint32_t status, 4925 ahc_search_action action) 4926 { 4927 struct scb *scb; 4928 struct scb *prev_scb; 4929 uint8_t qinstart; 4930 uint8_t qinpos; 4931 uint8_t qintail; 4932 uint8_t next, prev; 4933 uint8_t curscbptr; 4934 int found; 4935 int maxtarget; 4936 int i; 4937 int have_qregs; 4938 4939 qintail = ahc->qinfifonext; 4940 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 4941 if (have_qregs) { 4942 qinstart = ahc_inb(ahc, SNSCB_QOFF); 4943 ahc_outb(ahc, SNSCB_QOFF, qinstart); 4944 } else 4945 qinstart = ahc_inb(ahc, QINPOS); 4946 qinpos = qinstart; 4947 next = ahc_inb(ahc, NEXT_QUEUED_SCB); 4948 found = 0; 4949 prev_scb = NULL; 4950 4951 if (action == SEARCH_COMPLETE) { 4952 /* 4953 * Don't attempt to run any queued untagged transactions 4954 * until we are done with the abort process. 4955 */ 4956 ahc_freeze_untagged_queues(ahc); 4957 } 4958 4959 /* 4960 * Start with an empty queue. Entries that are not chosen 4961 * for removal will be re-added to the queue as we go. 4962 */ 4963 ahc->qinfifonext = qinpos; 4964 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4965 4966 while (qinpos != qintail) { 4967 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 4968 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 4969 /* 4970 * We found an scb that needs to be acted on. 4971 */ 4972 found++; 4973 switch (action) { 4974 case SEARCH_COMPLETE: 4975 { 4976 cam_status ostat; 4977 cam_status cstat; 4978 4979 ostat = ahc_get_transaction_status(scb); 4980 if (ostat == CAM_REQ_INPROG) 4981 ahc_set_transaction_status(scb, 4982 status); 4983 cstat = ahc_get_transaction_status(scb); 4984 if (cstat != CAM_REQ_CMP) 4985 ahc_freeze_scb(scb); 4986 if ((scb->flags & SCB_ACTIVE) == 0) 4987 printf("Inactive SCB in qinfifo\n"); 4988 ahc_done(ahc, scb); 4989 4990 /* FALLTHROUGH */ 4991 case SEARCH_REMOVE: 4992 break; 4993 } 4994 case SEARCH_COUNT: 4995 ahc_qinfifo_requeue(ahc, prev_scb, scb); 4996 prev_scb = scb; 4997 break; 4998 } 4999 } else { 5000 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5001 prev_scb = scb; 5002 } 5003 qinpos++; 5004 } 5005 5006 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5007 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5008 } else { 5009 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5010 } 5011 5012 if (action != SEARCH_COUNT 5013 && (found != 0) 5014 && (qinstart != ahc->qinfifonext)) { 5015 /* 5016 * The sequencer may be in the process of dmaing 5017 * down the SCB at the beginning of the queue. 5018 * This could be problematic if either the first, 5019 * or the second SCB is removed from the queue 5020 * (the first SCB includes a pointer to the "next" 5021 * SCB to dma). If we have removed any entries, swap 5022 * the first element in the queue with the next HSCB 5023 * so the sequencer will notice that NEXT_QUEUED_SCB 5024 * has changed during its dma attempt and will retry 5025 * the DMA. 5026 */ 5027 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5028 5029 /* 5030 * ahc_swap_with_next_hscb forces our next pointer to 5031 * point to the reserved SCB for future commands. Save 5032 * and restore our original next pointer to maintain 5033 * queue integrity. 5034 */ 5035 next = scb->hscb->next; 5036 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5037 ahc_swap_with_next_hscb(ahc, scb); 5038 scb->hscb->next = next; 5039 ahc->qinfifo[qinstart] = scb->hscb->tag; 5040 5041 /* Tell the card about the new head of the qinfifo. */ 5042 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5043 5044 /* Fixup the tail "next" pointer. */ 5045 qintail = ahc->qinfifonext - 1; 5046 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5047 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5048 } 5049 5050 /* 5051 * Search waiting for selection list. 5052 */ 5053 curscbptr = ahc_inb(ahc, SCBPTR); 5054 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5055 prev = SCB_LIST_NULL; 5056 5057 while (next != SCB_LIST_NULL) { 5058 uint8_t scb_index; 5059 5060 ahc_outb(ahc, SCBPTR, next); 5061 scb_index = ahc_inb(ahc, SCB_TAG); 5062 if (scb_index >= ahc->scb_data->numscbs) { 5063 printf("Waiting List inconsistency. " 5064 "SCB index == %d, yet numscbs == %d.", 5065 scb_index, ahc->scb_data->numscbs); 5066 ahc_dump_card_state(ahc); 5067 panic("for safety"); 5068 } 5069 scb = ahc_lookup_scb(ahc, scb_index); 5070 if (ahc_match_scb(ahc, scb, target, channel, 5071 lun, SCB_LIST_NULL, role)) { 5072 /* 5073 * We found an scb that needs to be acted on. 5074 */ 5075 found++; 5076 switch (action) { 5077 case SEARCH_COMPLETE: 5078 { 5079 cam_status ostat; 5080 cam_status cstat; 5081 5082 ostat = ahc_get_transaction_status(scb); 5083 if (ostat == CAM_REQ_INPROG) 5084 ahc_set_transaction_status(scb, 5085 status); 5086 cstat = ahc_get_transaction_status(scb); 5087 if (cstat != CAM_REQ_CMP) 5088 ahc_freeze_scb(scb); 5089 if ((scb->flags & SCB_ACTIVE) == 0) 5090 printf("Inactive SCB in Waiting List\n"); 5091 ahc_done(ahc, scb); 5092 /* FALLTHROUGH */ 5093 } 5094 case SEARCH_REMOVE: 5095 next = ahc_rem_wscb(ahc, next, prev); 5096 break; 5097 case SEARCH_COUNT: 5098 prev = next; 5099 next = ahc_inb(ahc, SCB_NEXT); 5100 break; 5101 } 5102 } else { 5103 5104 prev = next; 5105 next = ahc_inb(ahc, SCB_NEXT); 5106 } 5107 } 5108 ahc_outb(ahc, SCBPTR, curscbptr); 5109 5110 /* 5111 * And lastly, the untagged holding queues. 5112 */ 5113 i = 0; 5114 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5115 5116 maxtarget = 16; 5117 if (target != CAM_TARGET_WILDCARD) { 5118 5119 i = target; 5120 if (channel == 'B') 5121 i += 8; 5122 maxtarget = i + 1; 5123 } 5124 } else { 5125 maxtarget = 0; 5126 } 5127 5128 for (; i < maxtarget; i++) { 5129 struct scb_tailq *untagged_q; 5130 struct scb *next_scb; 5131 5132 untagged_q = &(ahc->untagged_queues[i]); 5133 next_scb = TAILQ_FIRST(untagged_q); 5134 while (next_scb != NULL) { 5135 5136 scb = next_scb; 5137 next_scb = TAILQ_NEXT(scb, links.tqe); 5138 5139 /* 5140 * The head of the list may be the currently 5141 * active untagged command for a device. 5142 * We're only searching for commands that 5143 * have not been started. A transaction 5144 * marked active but still in the qinfifo 5145 * is removed by the qinfifo scanning code 5146 * above. 5147 */ 5148 if ((scb->flags & SCB_ACTIVE) != 0) 5149 continue; 5150 5151 if (ahc_match_scb(ahc, scb, target, channel, 5152 lun, SCB_LIST_NULL, role)) { 5153 /* 5154 * We found an scb that needs to be acted on. 5155 */ 5156 found++; 5157 switch (action) { 5158 case SEARCH_COMPLETE: 5159 { 5160 cam_status ostat; 5161 cam_status cstat; 5162 5163 ostat = ahc_get_transaction_status(scb); 5164 if (ostat == CAM_REQ_INPROG) 5165 ahc_set_transaction_status(scb, 5166 status); 5167 cstat = ahc_get_transaction_status(scb); 5168 if (cstat != CAM_REQ_CMP) 5169 ahc_freeze_scb(scb); 5170 if ((scb->flags & SCB_ACTIVE) == 0) 5171 printf("Inactive SCB in untaggedQ\n"); 5172 ahc_done(ahc, scb); 5173 break; 5174 } 5175 case SEARCH_REMOVE: 5176 TAILQ_REMOVE(untagged_q, scb, 5177 links.tqe); 5178 break; 5179 case SEARCH_COUNT: 5180 break; 5181 } 5182 } 5183 } 5184 } 5185 5186 if (action == SEARCH_COMPLETE) 5187 ahc_release_untagged_queues(ahc); 5188 return (found); 5189 } 5190 5191 int 5192 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5193 int lun, u_int tag, int stop_on_first, int remove, 5194 int save_state) 5195 { 5196 struct scb *scbp; 5197 u_int next; 5198 u_int prev; 5199 u_int count; 5200 u_int active_scb; 5201 5202 count = 0; 5203 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5204 prev = SCB_LIST_NULL; 5205 5206 if (save_state) { 5207 /* restore this when we're done */ 5208 active_scb = ahc_inb(ahc, SCBPTR); 5209 } else 5210 /* Silence compiler */ 5211 active_scb = SCB_LIST_NULL; 5212 5213 while (next != SCB_LIST_NULL) { 5214 u_int scb_index; 5215 5216 ahc_outb(ahc, SCBPTR, next); 5217 scb_index = ahc_inb(ahc, SCB_TAG); 5218 if (scb_index >= ahc->scb_data->numscbs) { 5219 printf("Disconnected List inconsistency. " 5220 "SCB index == %d, yet numscbs == %d.", 5221 scb_index, ahc->scb_data->numscbs); 5222 ahc_dump_card_state(ahc); 5223 panic("for safety"); 5224 } 5225 5226 if (next == prev) { 5227 panic("Disconnected List Loop. " 5228 "cur SCBPTR == %x, prev SCBPTR == %x.", 5229 next, prev); 5230 } 5231 scbp = ahc_lookup_scb(ahc, scb_index); 5232 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5233 tag, ROLE_INITIATOR)) { 5234 count++; 5235 if (remove) { 5236 next = 5237 ahc_rem_scb_from_disc_list(ahc, prev, next); 5238 } else { 5239 prev = next; 5240 next = ahc_inb(ahc, SCB_NEXT); 5241 } 5242 if (stop_on_first) 5243 break; 5244 } else { 5245 prev = next; 5246 next = ahc_inb(ahc, SCB_NEXT); 5247 } 5248 } 5249 if (save_state) 5250 ahc_outb(ahc, SCBPTR, active_scb); 5251 return (count); 5252 } 5253 5254 /* 5255 * Remove an SCB from the on chip list of disconnected transactions. 5256 * This is empty/unused if we are not performing SCB paging. 5257 */ 5258 static u_int 5259 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5260 { 5261 u_int next; 5262 5263 ahc_outb(ahc, SCBPTR, scbptr); 5264 next = ahc_inb(ahc, SCB_NEXT); 5265 5266 ahc_outb(ahc, SCB_CONTROL, 0); 5267 5268 ahc_add_curscb_to_free_list(ahc); 5269 5270 if (prev != SCB_LIST_NULL) { 5271 ahc_outb(ahc, SCBPTR, prev); 5272 ahc_outb(ahc, SCB_NEXT, next); 5273 } else 5274 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5275 5276 return (next); 5277 } 5278 5279 /* 5280 * Add the SCB as selected by SCBPTR onto the on chip list of 5281 * free hardware SCBs. This list is empty/unused if we are not 5282 * performing SCB paging. 5283 */ 5284 static void 5285 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5286 { 5287 /* 5288 * Invalidate the tag so that our abort 5289 * routines don't think it's active. 5290 */ 5291 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5292 5293 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5294 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5295 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5296 } 5297 } 5298 5299 /* 5300 * Manipulate the waiting for selection list and return the 5301 * scb that follows the one that we remove. 5302 */ 5303 static u_int 5304 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5305 { 5306 u_int curscb, next; 5307 5308 /* 5309 * Select the SCB we want to abort and 5310 * pull the next pointer out of it. 5311 */ 5312 curscb = ahc_inb(ahc, SCBPTR); 5313 ahc_outb(ahc, SCBPTR, scbpos); 5314 next = ahc_inb(ahc, SCB_NEXT); 5315 5316 /* Clear the necessary fields */ 5317 ahc_outb(ahc, SCB_CONTROL, 0); 5318 5319 ahc_add_curscb_to_free_list(ahc); 5320 5321 /* update the waiting list */ 5322 if (prev == SCB_LIST_NULL) { 5323 /* First in the list */ 5324 ahc_outb(ahc, WAITING_SCBH, next); 5325 5326 /* 5327 * Ensure we aren't attempting to perform 5328 * selection for this entry. 5329 */ 5330 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5331 } else { 5332 /* 5333 * Select the scb that pointed to us 5334 * and update its next pointer. 5335 */ 5336 ahc_outb(ahc, SCBPTR, prev); 5337 ahc_outb(ahc, SCB_NEXT, next); 5338 } 5339 5340 /* 5341 * Point us back at the original scb position. 5342 */ 5343 ahc_outb(ahc, SCBPTR, curscb); 5344 return next; 5345 } 5346 5347 /******************************** Error Handling ******************************/ 5348 /* 5349 * Abort all SCBs that match the given description (target/channel/lun/tag), 5350 * setting their status to the passed in status if the status has not already 5351 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5352 * is paused before it is called. 5353 */ 5354 int 5355 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5356 int lun, u_int tag, role_t role, uint32_t status) 5357 { 5358 struct scb *scbp; 5359 struct scb *scbp_next; 5360 u_int active_scb; 5361 int i, j; 5362 int maxtarget; 5363 int minlun; 5364 int maxlun; 5365 5366 int found; 5367 5368 /* 5369 * Don't attempt to run any queued untagged transactions 5370 * until we are done with the abort process. 5371 */ 5372 ahc_freeze_untagged_queues(ahc); 5373 5374 /* restore this when we're done */ 5375 active_scb = ahc_inb(ahc, SCBPTR); 5376 5377 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5378 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5379 5380 /* 5381 * Clean out the busy target table for any untagged commands. 5382 */ 5383 i = 0; 5384 maxtarget = 16; 5385 if (target != CAM_TARGET_WILDCARD) { 5386 i = target; 5387 if (channel == 'B') 5388 i += 8; 5389 maxtarget = i + 1; 5390 } 5391 5392 if (lun == CAM_LUN_WILDCARD) { 5393 5394 /* 5395 * Unless we are using an SCB based 5396 * busy targets table, there is only 5397 * one table entry for all luns of 5398 * a target. 5399 */ 5400 minlun = 0; 5401 maxlun = 1; 5402 if ((ahc->flags & AHC_SCB_BTT) != 0) 5403 maxlun = AHC_NUM_LUNS; 5404 } else { 5405 minlun = lun; 5406 maxlun = lun + 1; 5407 } 5408 5409 for (;i < maxtarget; i++) { 5410 for (j = minlun;j < maxlun; j++) 5411 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5412 } 5413 5414 /* 5415 * Go through the disconnected list and remove any entries we 5416 * have queued for completion, 0'ing their control byte too. 5417 * We save the active SCB and restore it ourselves, so there 5418 * is no reason for this search to restore it too. 5419 */ 5420 ahc_search_disc_list(ahc, target, channel, lun, tag, 5421 /*stop_on_first*/FALSE, /*remove*/TRUE, 5422 /*save_state*/FALSE); 5423 5424 /* 5425 * Go through the hardware SCB array looking for commands that 5426 * were active but not on any list. 5427 */ 5428 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5429 u_int scbid; 5430 5431 ahc_outb(ahc, SCBPTR, i); 5432 scbid = ahc_inb(ahc, SCB_TAG); 5433 scbp = ahc_lookup_scb(ahc, scbid); 5434 if (scbp != NULL 5435 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) 5436 ahc_add_curscb_to_free_list(ahc); 5437 } 5438 5439 /* 5440 * Go through the pending CCB list and look for 5441 * commands for this target that are still active. 5442 * These are other tagged commands that were 5443 * disconnected when the reset occurred. 5444 */ 5445 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5446 while (scbp_next != NULL) { 5447 scbp = scbp_next; 5448 scbp_next = LIST_NEXT(scbp, pending_links); 5449 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5450 cam_status ostat; 5451 5452 ostat = ahc_get_transaction_status(scbp); 5453 if (ostat == CAM_REQ_INPROG) 5454 ahc_set_transaction_status(scbp, status); 5455 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5456 ahc_freeze_scb(scbp); 5457 if ((scbp->flags & SCB_ACTIVE) == 0) 5458 printf("Inactive SCB on pending list\n"); 5459 ahc_done(ahc, scbp); 5460 found++; 5461 } 5462 } 5463 ahc_outb(ahc, SCBPTR, active_scb); 5464 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5465 ahc_release_untagged_queues(ahc); 5466 return found; 5467 } 5468 5469 static void 5470 ahc_reset_current_bus(struct ahc_softc *ahc) 5471 { 5472 uint8_t scsiseq; 5473 5474 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5475 scsiseq = ahc_inb(ahc, SCSISEQ); 5476 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5477 ahc_delay(AHC_BUSRESET_DELAY); 5478 /* Turn off the bus reset */ 5479 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5480 5481 ahc_clear_intstat(ahc); 5482 5483 /* Re-enable reset interrupts */ 5484 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5485 } 5486 5487 int 5488 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5489 { 5490 struct ahc_devinfo devinfo; 5491 u_int initiator, target, max_scsiid; 5492 u_int sblkctl; 5493 int found; 5494 int restart_needed; 5495 char cur_channel; 5496 5497 ahc->pending_device = NULL; 5498 5499 ahc_compile_devinfo(&devinfo, 5500 CAM_TARGET_WILDCARD, 5501 CAM_TARGET_WILDCARD, 5502 CAM_LUN_WILDCARD, 5503 channel, ROLE_UNKNOWN); 5504 ahc_pause(ahc); 5505 5506 /* Make sure the sequencer is in a safe location. */ 5507 ahc_clear_critical_section(ahc); 5508 5509 /* 5510 * Run our command complete fifos to ensure that we perform 5511 * completion processing on any commands that 'completed' 5512 * before the reset occurred. 5513 */ 5514 ahc_run_qoutfifo(ahc); 5515 #if AHC_TARGET_MODE 5516 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5517 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5518 } 5519 #endif 5520 5521 /* 5522 * Reset the bus if we are initiating this reset 5523 */ 5524 sblkctl = ahc_inb(ahc, SBLKCTL); 5525 cur_channel = 'A'; 5526 if ((ahc->features & AHC_TWIN) != 0 5527 && ((sblkctl & SELBUSB) != 0)) 5528 cur_channel = 'B'; 5529 if (cur_channel != channel) { 5530 /* Case 1: Command for another bus is active 5531 * Stealthily reset the other bus without 5532 * upsetting the current bus. 5533 */ 5534 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5535 ahc_outb(ahc, SIMODE1, 5536 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); 5537 ahc_outb(ahc, SCSISEQ, 5538 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 5539 if (initiate_reset) 5540 ahc_reset_current_bus(ahc); 5541 ahc_clear_intstat(ahc); 5542 ahc_outb(ahc, SBLKCTL, sblkctl); 5543 restart_needed = FALSE; 5544 } else { 5545 /* Case 2: A command from this bus is active or we're idle */ 5546 ahc_clear_msg_state(ahc); 5547 ahc_outb(ahc, SIMODE1, 5548 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); 5549 ahc_outb(ahc, SCSISEQ, 5550 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 5551 if (initiate_reset) 5552 ahc_reset_current_bus(ahc); 5553 ahc_clear_intstat(ahc); 5554 restart_needed = TRUE; 5555 } 5556 5557 /* 5558 * Clean up all the state information for the 5559 * pending transactions on this bus. 5560 */ 5561 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 5562 CAM_LUN_WILDCARD, SCB_LIST_NULL, 5563 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 5564 5565 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 5566 5567 #ifdef AHC_TARGET_MODE 5568 /* 5569 * Send an immediate notify ccb to all target more peripheral 5570 * drivers affected by this action. 5571 */ 5572 for (target = 0; target <= max_scsiid; target++) { 5573 struct ahc_tmode_tstate* tstate; 5574 u_int lun; 5575 5576 tstate = ahc->enabled_targets[target]; 5577 if (tstate == NULL) 5578 continue; 5579 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 5580 struct ahc_tmode_lstate* lstate; 5581 5582 lstate = tstate->enabled_luns[lun]; 5583 if (lstate == NULL) 5584 continue; 5585 5586 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 5587 EVENT_TYPE_BUS_RESET, /*arg*/0); 5588 ahc_send_lstate_events(ahc, lstate); 5589 } 5590 } 5591 #endif 5592 /* Notify the XPT that a bus reset occurred */ 5593 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 5594 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 5595 5596 /* 5597 * Revert to async/narrow transfers until we renegotiate. 5598 */ 5599 for (target = 0; target <= max_scsiid; target++) { 5600 5601 if (ahc->enabled_targets[target] == NULL) 5602 continue; 5603 for (initiator = 0; initiator <= max_scsiid; initiator++) { 5604 struct ahc_devinfo devinfo; 5605 5606 ahc_compile_devinfo(&devinfo, target, initiator, 5607 CAM_LUN_WILDCARD, 5608 channel, ROLE_UNKNOWN); 5609 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5610 AHC_TRANS_CUR, /*paused*/TRUE); 5611 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 5612 /*period*/0, /*offset*/0, 5613 /*ppr_options*/0, AHC_TRANS_CUR, 5614 /*paused*/TRUE); 5615 } 5616 } 5617 5618 if (restart_needed) 5619 ahc_restart(ahc); 5620 else 5621 ahc_unpause(ahc); 5622 return found; 5623 } 5624 5625 5626 /***************************** Residual Processing ****************************/ 5627 /* 5628 * Calculate the residual for a just completed SCB. 5629 */ 5630 void 5631 ahc_calc_residual(struct scb *scb) 5632 { 5633 struct hardware_scb *hscb; 5634 struct status_pkt *spkt; 5635 uint32_t sgptr; 5636 uint32_t resid_sgptr; 5637 uint32_t resid; 5638 5639 /* 5640 * 5 cases. 5641 * 1) No residual. 5642 * SG_RESID_VALID clear in sgptr. 5643 * 2) Transferless command 5644 * 3) Never performed any transfers. 5645 * sgptr has SG_FULL_RESID set. 5646 * 4) No residual but target did not 5647 * save data pointers after the 5648 * last transfer, so sgptr was 5649 * never updated. 5650 * 5) We have a partial residual. 5651 * Use residual_sgptr to determine 5652 * where we are. 5653 */ 5654 5655 hscb = scb->hscb; 5656 sgptr = ahc_le32toh(hscb->sgptr); 5657 if ((sgptr & SG_RESID_VALID) == 0) 5658 /* Case 1 */ 5659 return; 5660 sgptr &= ~SG_RESID_VALID; 5661 5662 if ((sgptr & SG_LIST_NULL) != 0) 5663 /* Case 2 */ 5664 return; 5665 5666 spkt = &hscb->shared_data.status; 5667 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 5668 if ((sgptr & SG_FULL_RESID) != 0) { 5669 /* Case 3 */ 5670 resid = ahc_get_transfer_length(scb); 5671 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 5672 /* Case 4 */ 5673 return; 5674 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 5675 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 5676 } else { 5677 struct ahc_dma_seg *sg; 5678 5679 /* 5680 * Remainder of the SG where the transfer 5681 * stopped. 5682 */ 5683 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 5684 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 5685 5686 /* The residual sg_ptr always points to the next sg */ 5687 sg--; 5688 5689 /* 5690 * Add up the contents of all residual 5691 * SG segments that are after the SG where 5692 * the transfer stopped. 5693 */ 5694 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 5695 sg++; 5696 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 5697 } 5698 } 5699 if ((scb->flags & SCB_SENSE) == 0) 5700 ahc_set_residual(scb, resid); 5701 else 5702 ahc_set_sense_residual(scb, resid); 5703 5704 #ifdef AHC_DEBUG 5705 if (ahc_debug & AHC_SHOWMISC) { 5706 ahc_print_path(ahc, scb); 5707 printf("Handled Residual of %d bytes\n", resid); 5708 } 5709 #endif 5710 } 5711 5712 /******************************* Target Mode **********************************/ 5713 #ifdef AHC_TARGET_MODE 5714 /* 5715 * Add a target mode event to this lun's queue 5716 */ 5717 static void 5718 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 5719 u_int initiator_id, u_int event_type, u_int event_arg) 5720 { 5721 struct ahc_tmode_event *event; 5722 int pending; 5723 5724 xpt_freeze_devq(lstate->path, /*count*/1); 5725 if (lstate->event_w_idx >= lstate->event_r_idx) 5726 pending = lstate->event_w_idx - lstate->event_r_idx; 5727 else 5728 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 5729 - (lstate->event_r_idx - lstate->event_w_idx); 5730 5731 if (event_type == EVENT_TYPE_BUS_RESET 5732 || event_type == MSG_BUS_DEV_RESET) { 5733 /* 5734 * Any earlier events are irrelevant, so reset our buffer. 5735 * This has the effect of allowing us to deal with reset 5736 * floods (an external device holding down the reset line) 5737 * without losing the event that is really interesting. 5738 */ 5739 lstate->event_r_idx = 0; 5740 lstate->event_w_idx = 0; 5741 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 5742 } 5743 5744 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 5745 xpt_print_path(lstate->path); 5746 printf("immediate event %x:%x lost\n", 5747 lstate->event_buffer[lstate->event_r_idx].event_type, 5748 lstate->event_buffer[lstate->event_r_idx].event_arg); 5749 lstate->event_r_idx++; 5750 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 5751 lstate->event_r_idx = 0; 5752 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 5753 } 5754 5755 event = &lstate->event_buffer[lstate->event_w_idx]; 5756 event->initiator_id = initiator_id; 5757 event->event_type = event_type; 5758 event->event_arg = event_arg; 5759 lstate->event_w_idx++; 5760 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 5761 lstate->event_w_idx = 0; 5762 } 5763 5764 /* 5765 * Send any target mode events queued up waiting 5766 * for immediate notify resources. 5767 */ 5768 void 5769 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 5770 { 5771 struct ccb_hdr *ccbh; 5772 struct ccb_immed_notify *inot; 5773 5774 while (lstate->event_r_idx != lstate->event_w_idx 5775 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 5776 struct ahc_tmode_event *event; 5777 5778 event = &lstate->event_buffer[lstate->event_r_idx]; 5779 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 5780 inot = (struct ccb_immed_notify *)ccbh; 5781 switch (event->event_type) { 5782 case EVENT_TYPE_BUS_RESET: 5783 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 5784 break; 5785 default: 5786 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 5787 inot->message_args[0] = event->event_type; 5788 inot->message_args[1] = event->event_arg; 5789 break; 5790 } 5791 inot->initiator_id = event->initiator_id; 5792 inot->sense_len = 0; 5793 xpt_done((union ccb *)inot); 5794 lstate->event_r_idx++; 5795 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 5796 lstate->event_r_idx = 0; 5797 } 5798 } 5799 #endif 5800 5801 /******************** Sequencer Program Patching/Download *********************/ 5802 5803 #ifdef AHC_DUMP_SEQ 5804 void 5805 ahc_dumpseq(struct ahc_softc* ahc) 5806 { 5807 int i; 5808 int max_prog; 5809 5810 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 5811 max_prog = 448; 5812 else if ((ahc->features & AHC_ULTRA2) != 0) 5813 max_prog = 768; 5814 else 5815 max_prog = 512; 5816 5817 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5818 ahc_outb(ahc, SEQADDR0, 0); 5819 ahc_outb(ahc, SEQADDR1, 0); 5820 for (i = 0; i < max_prog; i++) { 5821 uint8_t ins_bytes[4]; 5822 5823 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 5824 printf("0x%08x\n", ins_bytes[0] << 24 5825 | ins_bytes[1] << 16 5826 | ins_bytes[2] << 8 5827 | ins_bytes[3]); 5828 } 5829 } 5830 #endif 5831 5832 static void 5833 ahc_loadseq(struct ahc_softc *ahc) 5834 { 5835 struct cs cs_table[num_critical_sections]; 5836 u_int begin_set[num_critical_sections]; 5837 u_int end_set[num_critical_sections]; 5838 struct patch *cur_patch; 5839 u_int cs_count; 5840 u_int cur_cs; 5841 u_int i; 5842 int downloaded; 5843 u_int skip_addr; 5844 u_int sg_prefetch_cnt; 5845 uint8_t download_consts[7]; 5846 5847 /* 5848 * Start out with 0 critical sections 5849 * that apply to this firmware load. 5850 */ 5851 cs_count = 0; 5852 cur_cs = 0; 5853 memset(begin_set, 0, sizeof(begin_set)); 5854 memset(end_set, 0, sizeof(end_set)); 5855 5856 /* Setup downloadable constant table */ 5857 download_consts[QOUTFIFO_OFFSET] = 0; 5858 if (ahc->targetcmds != NULL) 5859 download_consts[QOUTFIFO_OFFSET] += 32; 5860 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 5861 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 5862 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 5863 sg_prefetch_cnt = ahc->pci_cachesize; 5864 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 5865 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 5866 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 5867 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 5868 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 5869 5870 cur_patch = patches; 5871 downloaded = 0; 5872 skip_addr = 0; 5873 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5874 ahc_outb(ahc, SEQADDR0, 0); 5875 ahc_outb(ahc, SEQADDR1, 0); 5876 5877 for (i = 0; i < sizeof(seqprog)/4; i++) { 5878 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 5879 /* 5880 * Don't download this instruction as it 5881 * is in a patch that was removed. 5882 */ 5883 continue; 5884 } 5885 /* 5886 * Move through the CS table until we find a CS 5887 * that might apply to this instruction. 5888 */ 5889 for (; cur_cs < num_critical_sections; cur_cs++) { 5890 if (critical_sections[cur_cs].end <= i) { 5891 if (begin_set[cs_count] == TRUE 5892 && end_set[cs_count] == FALSE) { 5893 cs_table[cs_count].end = downloaded; 5894 end_set[cs_count] = TRUE; 5895 cs_count++; 5896 } 5897 continue; 5898 } 5899 if (critical_sections[cur_cs].begin <= i 5900 && begin_set[cs_count] == FALSE) { 5901 cs_table[cs_count].begin = downloaded; 5902 begin_set[cs_count] = TRUE; 5903 } 5904 break; 5905 } 5906 ahc_download_instr(ahc, i, download_consts); 5907 downloaded++; 5908 } 5909 5910 ahc->num_critical_sections = cs_count; 5911 if (cs_count != 0) { 5912 5913 cs_count *= sizeof(struct cs); 5914 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 5915 if (ahc->critical_sections == NULL) 5916 panic("ahc_loadseq: Could not malloc"); 5917 memcpy(ahc->critical_sections, cs_table, cs_count); 5918 } 5919 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 5920 ahc_restart(ahc); 5921 5922 if (bootverbose) 5923 printf(" %d instructions downloaded\n", downloaded); 5924 } 5925 5926 static int 5927 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 5928 u_int start_instr, u_int *skip_addr) 5929 { 5930 struct patch *cur_patch; 5931 struct patch *last_patch; 5932 u_int num_patches; 5933 5934 num_patches = sizeof(patches)/sizeof(struct patch); 5935 last_patch = &patches[num_patches]; 5936 cur_patch = *start_patch; 5937 5938 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 5939 5940 if (cur_patch->patch_func(ahc) == 0) { 5941 5942 /* Start rejecting code */ 5943 *skip_addr = start_instr + cur_patch->skip_instr; 5944 cur_patch += cur_patch->skip_patch; 5945 } else { 5946 /* Accepted this patch. Advance to the next 5947 * one and wait for our intruction pointer to 5948 * hit this point. 5949 */ 5950 cur_patch++; 5951 } 5952 } 5953 5954 *start_patch = cur_patch; 5955 if (start_instr < *skip_addr) 5956 /* Still skipping */ 5957 return (0); 5958 5959 return (1); 5960 } 5961 5962 static void 5963 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 5964 { 5965 union ins_formats instr; 5966 struct ins_format1 *fmt1_ins; 5967 struct ins_format3 *fmt3_ins; 5968 u_int opcode; 5969 5970 /* 5971 * The firmware is always compiled into a little endian format. 5972 */ 5973 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 5974 5975 fmt1_ins = &instr.format1; 5976 fmt3_ins = NULL; 5977 5978 /* Pull the opcode */ 5979 opcode = instr.format1.opcode; 5980 switch (opcode) { 5981 case AIC_OP_JMP: 5982 case AIC_OP_JC: 5983 case AIC_OP_JNC: 5984 case AIC_OP_CALL: 5985 case AIC_OP_JNE: 5986 case AIC_OP_JNZ: 5987 case AIC_OP_JE: 5988 case AIC_OP_JZ: 5989 { 5990 struct patch *cur_patch; 5991 int address_offset; 5992 u_int address; 5993 u_int skip_addr; 5994 u_int i; 5995 5996 fmt3_ins = &instr.format3; 5997 address_offset = 0; 5998 address = fmt3_ins->address; 5999 cur_patch = patches; 6000 skip_addr = 0; 6001 6002 for (i = 0; i < address;) { 6003 6004 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6005 6006 if (skip_addr > i) { 6007 int end_addr; 6008 6009 end_addr = MIN(address, skip_addr); 6010 address_offset += end_addr - i; 6011 i = skip_addr; 6012 } else { 6013 i++; 6014 } 6015 } 6016 address -= address_offset; 6017 fmt3_ins->address = address; 6018 /* FALLTHROUGH */ 6019 } 6020 case AIC_OP_OR: 6021 case AIC_OP_AND: 6022 case AIC_OP_XOR: 6023 case AIC_OP_ADD: 6024 case AIC_OP_ADC: 6025 case AIC_OP_BMOV: 6026 if (fmt1_ins->parity != 0) { 6027 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6028 } 6029 fmt1_ins->parity = 0; 6030 if ((ahc->features & AHC_CMD_CHAN) == 0 6031 && opcode == AIC_OP_BMOV) { 6032 /* 6033 * Block move was added at the same time 6034 * as the command channel. Verify that 6035 * this is only a move of a single element 6036 * and convert the BMOV to a MOV 6037 * (AND with an immediate of FF). 6038 */ 6039 if (fmt1_ins->immediate != 1) 6040 panic("%s: BMOV not supported\n", 6041 ahc_name(ahc)); 6042 fmt1_ins->opcode = AIC_OP_AND; 6043 fmt1_ins->immediate = 0xff; 6044 } 6045 /* FALLTHROUGH */ 6046 case AIC_OP_ROL: 6047 if ((ahc->features & AHC_ULTRA2) != 0) { 6048 int i, count; 6049 6050 /* Calculate odd parity for the instruction */ 6051 for (i = 0, count = 0; i < 31; i++) { 6052 uint32_t mask; 6053 6054 mask = 0x01 << i; 6055 if ((instr.integer & mask) != 0) 6056 count++; 6057 } 6058 if ((count & 0x01) == 0) 6059 instr.format1.parity = 1; 6060 } else { 6061 /* Compress the instruction for older sequencers */ 6062 if (fmt3_ins != NULL) { 6063 instr.integer = 6064 fmt3_ins->immediate 6065 | (fmt3_ins->source << 8) 6066 | (fmt3_ins->address << 16) 6067 | (fmt3_ins->opcode << 25); 6068 } else { 6069 instr.integer = 6070 fmt1_ins->immediate 6071 | (fmt1_ins->source << 8) 6072 | (fmt1_ins->destination << 16) 6073 | (fmt1_ins->ret << 24) 6074 | (fmt1_ins->opcode << 25); 6075 } 6076 } 6077 /* The sequencer is a little endian cpu */ 6078 instr.integer = ahc_htole32(instr.integer); 6079 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6080 break; 6081 default: 6082 panic("Unknown opcode encountered in seq program"); 6083 break; 6084 } 6085 } 6086 6087 void 6088 ahc_dump_card_state(struct ahc_softc *ahc) 6089 { 6090 struct scb *scb; 6091 struct scb_tailq *untagged_q; 6092 int target; 6093 int maxtarget; 6094 int i; 6095 uint8_t last_phase; 6096 uint8_t qinpos; 6097 uint8_t qintail; 6098 uint8_t qoutpos; 6099 uint8_t scb_index; 6100 uint8_t saved_scbptr; 6101 6102 saved_scbptr = ahc_inb(ahc, SCBPTR); 6103 6104 last_phase = ahc_inb(ahc, LASTPHASE); 6105 printf("%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6106 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6107 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6108 printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x\n", 6109 ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL)); 6110 printf(" DFCNTRL = 0x%x, DFSTATUS = 0x%x\n", 6111 ahc_inb(ahc, DFCNTRL), ahc_inb(ahc, DFSTATUS)); 6112 printf("LASTPHASE = 0x%x, SCSISIGI = 0x%x, SXFRCTL0 = 0x%x\n", 6113 last_phase, ahc_inb(ahc, SCSISIGI), ahc_inb(ahc, SXFRCTL0)); 6114 printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x\n", 6115 ahc_inb(ahc, SSTAT0), ahc_inb(ahc, SSTAT1)); 6116 if ((ahc->features & AHC_DT) != 0) 6117 printf("SCSIPHASE = 0x%x\n", ahc_inb(ahc, SCSIPHASE)); 6118 printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n", 6119 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6120 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6121 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6122 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8)); 6123 printf("SCB count = %d\n", ahc->scb_data->numscbs); 6124 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6125 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6126 /* QINFIFO */ 6127 printf("QINFIFO entries: "); 6128 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6129 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6130 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6131 } else 6132 qinpos = ahc_inb(ahc, QINPOS); 6133 qintail = ahc->qinfifonext; 6134 while (qinpos != qintail) { 6135 printf("%d ", ahc->qinfifo[qinpos]); 6136 qinpos++; 6137 } 6138 printf("\n"); 6139 6140 printf("Waiting Queue entries: "); 6141 scb_index = ahc_inb(ahc, WAITING_SCBH); 6142 i = 0; 6143 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6144 ahc_outb(ahc, SCBPTR, scb_index); 6145 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6146 scb_index = ahc_inb(ahc, SCB_NEXT); 6147 } 6148 printf("\n"); 6149 6150 printf("Disconnected Queue entries: "); 6151 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6152 i = 0; 6153 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6154 ahc_outb(ahc, SCBPTR, scb_index); 6155 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6156 scb_index = ahc_inb(ahc, SCB_NEXT); 6157 } 6158 printf("\n"); 6159 6160 printf("QOUTFIFO entries: "); 6161 qoutpos = ahc->qoutfifonext; 6162 i = 0; 6163 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6164 printf("%d ", ahc->qoutfifo[qoutpos]); 6165 qoutpos++; 6166 } 6167 printf("\n"); 6168 6169 printf("Sequencer Free SCB List: "); 6170 scb_index = ahc_inb(ahc, FREE_SCBH); 6171 i = 0; 6172 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6173 ahc_outb(ahc, SCBPTR, scb_index); 6174 printf("%d ", scb_index); 6175 scb_index = ahc_inb(ahc, SCB_NEXT); 6176 } 6177 printf("\n"); 6178 6179 printf("Pending list: "); 6180 i = 0; 6181 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6182 if (i++ > 256) 6183 break; 6184 printf("%d ", scb->hscb->tag); 6185 } 6186 printf("\n"); 6187 6188 printf("Kernel Free SCB list: "); 6189 i = 0; 6190 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6191 if (i++ > 256) 6192 break; 6193 printf("%d ", scb->hscb->tag); 6194 } 6195 printf("\n"); 6196 6197 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6198 for (target = 0; target <= maxtarget; target++) { 6199 untagged_q = &ahc->untagged_queues[target]; 6200 if (TAILQ_FIRST(untagged_q) == NULL) 6201 continue; 6202 printf("Untagged Q(%d): ", target); 6203 i = 0; 6204 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6205 if (i++ > 256) 6206 break; 6207 printf("%d ", scb->hscb->tag); 6208 } 6209 printf("\n"); 6210 } 6211 6212 ahc_platform_dump_card_state(ahc); 6213 ahc_outb(ahc, SCBPTR, saved_scbptr); 6214 } 6215 6216 /************************* Target Mode ****************************************/ 6217 #ifdef AHC_TARGET_MODE 6218 cam_status 6219 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6220 struct ahc_tmode_tstate **tstate, 6221 struct ahc_tmode_lstate **lstate, 6222 int notfound_failure) 6223 { 6224 6225 if ((ahc->features & AHC_TARGETMODE) == 0) 6226 return (CAM_REQ_INVALID); 6227 6228 /* 6229 * Handle the 'black hole' device that sucks up 6230 * requests to unattached luns on enabled targets. 6231 */ 6232 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6233 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6234 *tstate = NULL; 6235 *lstate = ahc->black_hole; 6236 } else { 6237 u_int max_id; 6238 6239 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6240 if (ccb->ccb_h.target_id > max_id) 6241 return (CAM_TID_INVALID); 6242 6243 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6244 return (CAM_LUN_INVALID); 6245 6246 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6247 *lstate = NULL; 6248 if (*tstate != NULL) 6249 *lstate = 6250 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6251 } 6252 6253 if (notfound_failure != 0 && *lstate == NULL) 6254 return (CAM_PATH_INVALID); 6255 6256 return (CAM_REQ_CMP); 6257 } 6258 6259 void 6260 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6261 { 6262 struct ahc_tmode_tstate *tstate; 6263 struct ahc_tmode_lstate *lstate; 6264 struct ccb_en_lun *cel; 6265 cam_status status; 6266 u_int target; 6267 u_int lun; 6268 u_int target_mask; 6269 u_long s; 6270 char channel; 6271 6272 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6273 /*notfound_failure*/FALSE); 6274 6275 if (status != CAM_REQ_CMP) { 6276 ccb->ccb_h.status = status; 6277 return; 6278 } 6279 6280 if ((ahc->features & AHC_MULTIROLE) != 0) { 6281 u_int our_id; 6282 6283 if (cam_sim_bus(sim) == 0) 6284 our_id = ahc->our_id; 6285 else 6286 our_id = ahc->our_id_b; 6287 6288 if (ccb->ccb_h.target_id != our_id) { 6289 if ((ahc->features & AHC_MULTI_TID) != 0 6290 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6291 /* 6292 * Only allow additional targets if 6293 * the initiator role is disabled. 6294 * The hardware cannot handle a re-select-in 6295 * on the initiator id during a re-select-out 6296 * on a different target id. 6297 */ 6298 status = CAM_TID_INVALID; 6299 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6300 || ahc->enabled_luns > 0) { 6301 /* 6302 * Only allow our target id to change 6303 * if the initiator role is not configured 6304 * and there are no enabled luns which 6305 * are attached to the currently registered 6306 * scsi id. 6307 */ 6308 status = CAM_TID_INVALID; 6309 } 6310 } 6311 } 6312 6313 if (status != CAM_REQ_CMP) { 6314 ccb->ccb_h.status = status; 6315 return; 6316 } 6317 6318 /* 6319 * We now have an id that is valid. 6320 * If we aren't in target mode, switch modes. 6321 */ 6322 if ((ahc->flags & AHC_TARGETROLE) == 0 6323 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6324 u_long s; 6325 6326 printf("Configuring Target Mode\n"); 6327 ahc_lock(ahc, &s); 6328 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6329 ccb->ccb_h.status = CAM_BUSY; 6330 ahc_unlock(ahc, &s); 6331 return; 6332 } 6333 ahc->flags |= AHC_TARGETROLE; 6334 if ((ahc->features & AHC_MULTIROLE) == 0) 6335 ahc->flags &= ~AHC_INITIATORROLE; 6336 ahc_pause(ahc); 6337 ahc_loadseq(ahc); 6338 ahc_unlock(ahc, &s); 6339 } 6340 cel = &ccb->cel; 6341 target = ccb->ccb_h.target_id; 6342 lun = ccb->ccb_h.target_lun; 6343 channel = SIM_CHANNEL(ahc, sim); 6344 target_mask = 0x01 << target; 6345 if (channel == 'B') 6346 target_mask <<= 8; 6347 6348 if (cel->enable != 0) { 6349 u_int scsiseq; 6350 6351 /* Are we already enabled?? */ 6352 if (lstate != NULL) { 6353 xpt_print_path(ccb->ccb_h.path); 6354 printf("Lun already enabled\n"); 6355 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6356 return; 6357 } 6358 6359 if (cel->grp6_len != 0 6360 || cel->grp7_len != 0) { 6361 /* 6362 * Don't (yet?) support vendor 6363 * specific commands. 6364 */ 6365 ccb->ccb_h.status = CAM_REQ_INVALID; 6366 printf("Non-zero Group Codes\n"); 6367 return; 6368 } 6369 6370 /* 6371 * Seems to be okay. 6372 * Setup our data structures. 6373 */ 6374 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6375 tstate = ahc_alloc_tstate(ahc, target, channel); 6376 if (tstate == NULL) { 6377 xpt_print_path(ccb->ccb_h.path); 6378 printf("Couldn't allocate tstate\n"); 6379 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6380 return; 6381 } 6382 } 6383 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6384 if (lstate == NULL) { 6385 xpt_print_path(ccb->ccb_h.path); 6386 printf("Couldn't allocate lstate\n"); 6387 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6388 return; 6389 } 6390 memset(lstate, 0, sizeof(*lstate)); 6391 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6392 xpt_path_path_id(ccb->ccb_h.path), 6393 xpt_path_target_id(ccb->ccb_h.path), 6394 xpt_path_lun_id(ccb->ccb_h.path)); 6395 if (status != CAM_REQ_CMP) { 6396 free(lstate, M_DEVBUF); 6397 xpt_print_path(ccb->ccb_h.path); 6398 printf("Couldn't allocate path\n"); 6399 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6400 return; 6401 } 6402 SLIST_INIT(&lstate->accept_tios); 6403 SLIST_INIT(&lstate->immed_notifies); 6404 ahc_lock(ahc, &s); 6405 ahc_pause(ahc); 6406 if (target != CAM_TARGET_WILDCARD) { 6407 tstate->enabled_luns[lun] = lstate; 6408 ahc->enabled_luns++; 6409 6410 if ((ahc->features & AHC_MULTI_TID) != 0) { 6411 u_int targid_mask; 6412 6413 targid_mask = ahc_inb(ahc, TARGID) 6414 | (ahc_inb(ahc, TARGID + 1) << 8); 6415 6416 targid_mask |= target_mask; 6417 ahc_outb(ahc, TARGID, targid_mask); 6418 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 6419 6420 ahc_update_scsiid(ahc, targid_mask); 6421 } else { 6422 u_int our_id; 6423 char channel; 6424 6425 channel = SIM_CHANNEL(ahc, sim); 6426 our_id = SIM_SCSI_ID(ahc, sim); 6427 6428 /* 6429 * This can only happen if selections 6430 * are not enabled 6431 */ 6432 if (target != our_id) { 6433 u_int sblkctl; 6434 char cur_channel; 6435 int swap; 6436 6437 sblkctl = ahc_inb(ahc, SBLKCTL); 6438 cur_channel = (sblkctl & SELBUSB) 6439 ? 'B' : 'A'; 6440 if ((ahc->features & AHC_TWIN) == 0) 6441 cur_channel = 'A'; 6442 swap = cur_channel != channel; 6443 if (channel == 'A') 6444 ahc->our_id = target; 6445 else 6446 ahc->our_id_b = target; 6447 6448 if (swap) 6449 ahc_outb(ahc, SBLKCTL, 6450 sblkctl ^ SELBUSB); 6451 6452 ahc_outb(ahc, SCSIID, target); 6453 6454 if (swap) 6455 ahc_outb(ahc, SBLKCTL, sblkctl); 6456 } 6457 } 6458 } else 6459 ahc->black_hole = lstate; 6460 /* Allow select-in operations */ 6461 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 6462 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6463 scsiseq |= ENSELI; 6464 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 6465 scsiseq = ahc_inb(ahc, SCSISEQ); 6466 scsiseq |= ENSELI; 6467 ahc_outb(ahc, SCSISEQ, scsiseq); 6468 } 6469 ahc_unpause(ahc); 6470 ahc_unlock(ahc, &s); 6471 ccb->ccb_h.status = CAM_REQ_CMP; 6472 xpt_print_path(ccb->ccb_h.path); 6473 printf("Lun now enabled for target mode\n"); 6474 } else { 6475 struct scb *scb; 6476 int i, empty; 6477 6478 if (lstate == NULL) { 6479 ccb->ccb_h.status = CAM_LUN_INVALID; 6480 return; 6481 } 6482 6483 ahc_lock(ahc, &s); 6484 6485 ccb->ccb_h.status = CAM_REQ_CMP; 6486 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6487 struct ccb_hdr *ccbh; 6488 6489 ccbh = &scb->io_ctx->ccb_h; 6490 if (ccbh->func_code == XPT_CONT_TARGET_IO 6491 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 6492 printf("CTIO pending\n"); 6493 ccb->ccb_h.status = CAM_REQ_INVALID; 6494 ahc_unlock(ahc, &s); 6495 return; 6496 } 6497 } 6498 6499 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 6500 printf("ATIOs pending\n"); 6501 ccb->ccb_h.status = CAM_REQ_INVALID; 6502 } 6503 6504 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 6505 printf("INOTs pending\n"); 6506 ccb->ccb_h.status = CAM_REQ_INVALID; 6507 } 6508 6509 if (ccb->ccb_h.status != CAM_REQ_CMP) { 6510 ahc_unlock(ahc, &s); 6511 return; 6512 } 6513 6514 xpt_print_path(ccb->ccb_h.path); 6515 printf("Target mode disabled\n"); 6516 xpt_free_path(lstate->path); 6517 free(lstate, M_DEVBUF); 6518 6519 ahc_pause(ahc); 6520 /* Can we clean up the target too? */ 6521 if (target != CAM_TARGET_WILDCARD) { 6522 tstate->enabled_luns[lun] = NULL; 6523 ahc->enabled_luns--; 6524 for (empty = 1, i = 0; i < 8; i++) 6525 if (tstate->enabled_luns[i] != NULL) { 6526 empty = 0; 6527 break; 6528 } 6529 6530 if (empty) { 6531 ahc_free_tstate(ahc, target, channel, 6532 /*force*/FALSE); 6533 if (ahc->features & AHC_MULTI_TID) { 6534 u_int targid_mask; 6535 6536 targid_mask = ahc_inb(ahc, TARGID) 6537 | (ahc_inb(ahc, TARGID + 1) 6538 << 8); 6539 6540 targid_mask &= ~target_mask; 6541 ahc_outb(ahc, TARGID, targid_mask); 6542 ahc_outb(ahc, TARGID+1, 6543 (targid_mask >> 8)); 6544 ahc_update_scsiid(ahc, targid_mask); 6545 } 6546 } 6547 } else { 6548 6549 ahc->black_hole = NULL; 6550 6551 /* 6552 * We can't allow selections without 6553 * our black hole device. 6554 */ 6555 empty = TRUE; 6556 } 6557 if (ahc->enabled_luns == 0) { 6558 /* Disallow select-in */ 6559 u_int scsiseq; 6560 6561 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6562 scsiseq &= ~ENSELI; 6563 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 6564 scsiseq = ahc_inb(ahc, SCSISEQ); 6565 scsiseq &= ~ENSELI; 6566 ahc_outb(ahc, SCSISEQ, scsiseq); 6567 6568 if ((ahc->features & AHC_MULTIROLE) == 0) { 6569 printf("Configuring Initiator Mode\n"); 6570 ahc->flags &= ~AHC_TARGETROLE; 6571 ahc->flags |= AHC_INITIATORROLE; 6572 ahc_pause(ahc); 6573 ahc_loadseq(ahc); 6574 } 6575 } 6576 ahc_unpause(ahc); 6577 ahc_unlock(ahc, &s); 6578 } 6579 } 6580 6581 static void 6582 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 6583 { 6584 u_int scsiid_mask; 6585 u_int scsiid; 6586 6587 if ((ahc->features & AHC_MULTI_TID) == 0) 6588 panic("ahc_update_scsiid called on non-multitid unit\n"); 6589 6590 /* 6591 * Since we will rely on the the TARGID mask 6592 * for selection enables, ensure that OID 6593 * in SCSIID is not set to some other ID 6594 * that we don't want to allow selections on. 6595 */ 6596 if ((ahc->features & AHC_ULTRA2) != 0) 6597 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 6598 else 6599 scsiid = ahc_inb(ahc, SCSIID); 6600 scsiid_mask = 0x1 << (scsiid & OID); 6601 if ((targid_mask & scsiid_mask) == 0) { 6602 u_int our_id; 6603 6604 /* ffs counts from 1 */ 6605 our_id = ffs(targid_mask); 6606 if (our_id == 0) 6607 our_id = ahc->our_id; 6608 else 6609 our_id--; 6610 scsiid &= TID; 6611 scsiid |= our_id; 6612 } 6613 if ((ahc->features & AHC_ULTRA2) != 0) 6614 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 6615 else 6616 ahc_outb(ahc, SCSIID, scsiid); 6617 } 6618 6619 void 6620 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 6621 { 6622 struct target_cmd *cmd; 6623 6624 /* 6625 * If the card supports auto-access pause, 6626 * we can access the card directly regardless 6627 * of whether it is paused or not. 6628 */ 6629 if ((ahc->features & AHC_AUTOPAUSE) != 0) 6630 paused = TRUE; 6631 6632 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 6633 6634 /* 6635 * Only advance through the queue if we 6636 * have the resources to process the command. 6637 */ 6638 if (ahc_handle_target_cmd(ahc, cmd) != 0) 6639 break; 6640 6641 ahc->tqinfifonext++; 6642 cmd->cmd_valid = 0; 6643 6644 /* 6645 * Lazily update our position in the target mode incoming 6646 * command queue as seen by the sequencer. 6647 */ 6648 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 6649 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 6650 u_int hs_mailbox; 6651 6652 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 6653 hs_mailbox &= ~HOST_TQINPOS; 6654 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 6655 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 6656 } else { 6657 if (!paused) 6658 ahc_pause(ahc); 6659 ahc_outb(ahc, KERNEL_TQINPOS, 6660 ahc->tqinfifonext & HOST_TQINPOS); 6661 if (!paused) 6662 ahc_unpause(ahc); 6663 } 6664 } 6665 } 6666 } 6667 6668 static int 6669 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 6670 { 6671 struct ahc_tmode_tstate *tstate; 6672 struct ahc_tmode_lstate *lstate; 6673 struct ccb_accept_tio *atio; 6674 uint8_t *byte; 6675 int initiator; 6676 int target; 6677 int lun; 6678 6679 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 6680 target = SCSIID_OUR_ID(cmd->scsiid); 6681 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 6682 6683 byte = cmd->bytes; 6684 tstate = ahc->enabled_targets[target]; 6685 lstate = NULL; 6686 if (tstate != NULL) 6687 lstate = tstate->enabled_luns[lun]; 6688 6689 /* 6690 * Commands for disabled luns go to the black hole driver. 6691 */ 6692 if (lstate == NULL) 6693 lstate = ahc->black_hole; 6694 6695 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 6696 if (atio == NULL) { 6697 ahc->flags |= AHC_TQINFIFO_BLOCKED; 6698 /* 6699 * Wait for more ATIOs from the peripheral driver for this lun. 6700 */ 6701 return (1); 6702 } else 6703 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 6704 #if 0 6705 printf("Incoming command from %d for %d:%d%s\n", 6706 initiator, target, lun, 6707 lstate == ahc->black_hole ? "(Black Holed)" : ""); 6708 #endif 6709 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 6710 6711 if (lstate == ahc->black_hole) { 6712 /* Fill in the wildcards */ 6713 atio->ccb_h.target_id = target; 6714 atio->ccb_h.target_lun = lun; 6715 } 6716 6717 /* 6718 * Package it up and send it off to 6719 * whomever has this lun enabled. 6720 */ 6721 atio->sense_len = 0; 6722 atio->init_id = initiator; 6723 if (byte[0] != 0xFF) { 6724 /* Tag was included */ 6725 atio->tag_action = *byte++; 6726 atio->tag_id = *byte++; 6727 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 6728 } else { 6729 atio->ccb_h.flags = 0; 6730 } 6731 byte++; 6732 6733 /* Okay. Now determine the cdb size based on the command code */ 6734 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 6735 case 0: 6736 atio->cdb_len = 6; 6737 break; 6738 case 1: 6739 case 2: 6740 atio->cdb_len = 10; 6741 break; 6742 case 4: 6743 atio->cdb_len = 16; 6744 break; 6745 case 5: 6746 atio->cdb_len = 12; 6747 break; 6748 case 3: 6749 default: 6750 /* Only copy the opcode. */ 6751 atio->cdb_len = 1; 6752 printf("Reserved or VU command code type encountered\n"); 6753 break; 6754 } 6755 6756 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 6757 6758 atio->ccb_h.status |= CAM_CDB_RECVD; 6759 6760 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 6761 /* 6762 * We weren't allowed to disconnect. 6763 * We're hanging on the bus until a 6764 * continue target I/O comes in response 6765 * to this accept tio. 6766 */ 6767 #if 0 6768 printf("Received Immediate Command %d:%d:%d - %p\n", 6769 initiator, target, lun, ahc->pending_device); 6770 #endif 6771 ahc->pending_device = lstate; 6772 ahc_freeze_ccb((union ccb *)atio); 6773 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 6774 } 6775 xpt_done((union ccb*)atio); 6776 return (0); 6777 } 6778 6779 #endif 6780