1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * Copyright (c) 2000-2001 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#59 $ 41 * 42 * $FreeBSD$ 43 */ 44 45 #ifdef __linux__ 46 #include "aic7xxx_osm.h" 47 #include "aic7xxx_inline.h" 48 #include "aicasm/aicasm_insformat.h" 49 #else 50 #include <dev/aic7xxx/aic7xxx_osm.h> 51 #include <dev/aic7xxx/aic7xxx_inline.h> 52 #include <dev/aic7xxx/aicasm/aicasm_insformat.h> 53 #endif 54 55 /****************************** Softc Data ************************************/ 56 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 57 58 /***************************** Lookup Tables **********************************/ 59 char *ahc_chip_names[] = 60 { 61 "NONE", 62 "aic7770", 63 "aic7850", 64 "aic7855", 65 "aic7859", 66 "aic7860", 67 "aic7870", 68 "aic7880", 69 "aic7895", 70 "aic7895C", 71 "aic7890/91", 72 "aic7896/97", 73 "aic7892", 74 "aic7899" 75 }; 76 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 77 78 /* 79 * Hardware error codes. 80 */ 81 struct ahc_hard_error_entry { 82 uint8_t errno; 83 char *errmesg; 84 }; 85 86 static struct ahc_hard_error_entry ahc_hard_errors[] = { 87 { ILLHADDR, "Illegal Host Access" }, 88 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 89 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 90 { SQPARERR, "Sequencer Parity Error" }, 91 { DPARERR, "Data-path Parity Error" }, 92 { MPARERR, "Scratch or SCB Memory Parity Error" }, 93 { PCIERRSTAT, "PCI Error detected" }, 94 { CIOPARERR, "CIOBUS Parity Error" }, 95 }; 96 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 97 98 static struct ahc_phase_table_entry ahc_phase_table[] = 99 { 100 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 101 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 102 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 103 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 104 { P_COMMAND, MSG_NOOP, "in Command phase" }, 105 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 106 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 107 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 108 { P_BUSFREE, MSG_NOOP, "while idle" }, 109 { 0, MSG_NOOP, "in unknown phase" } 110 }; 111 112 /* 113 * In most cases we only wish to itterate over real phases, so 114 * exclude the last element from the count. 115 */ 116 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 117 118 /* 119 * Valid SCSIRATE values. (p. 3-17) 120 * Provides a mapping of tranfer periods in ns to the proper value to 121 * stick in the scsixfer reg. 122 */ 123 static struct ahc_syncrate ahc_syncrates[] = 124 { 125 /* ultra2 fast/ultra period rate */ 126 { 0x42, 0x000, 9, "80.0" }, 127 { 0x03, 0x000, 10, "40.0" }, 128 { 0x04, 0x000, 11, "33.0" }, 129 { 0x05, 0x100, 12, "20.0" }, 130 { 0x06, 0x110, 15, "16.0" }, 131 { 0x07, 0x120, 18, "13.4" }, 132 { 0x08, 0x000, 25, "10.0" }, 133 { 0x19, 0x010, 31, "8.0" }, 134 { 0x1a, 0x020, 37, "6.67" }, 135 { 0x1b, 0x030, 43, "5.7" }, 136 { 0x1c, 0x040, 50, "5.0" }, 137 { 0x00, 0x050, 56, "4.4" }, 138 { 0x00, 0x060, 62, "4.0" }, 139 { 0x00, 0x070, 68, "3.6" }, 140 { 0x00, 0x000, 0, NULL } 141 }; 142 143 /* Our Sequencer Program */ 144 #include "aic7xxx_seq.h" 145 146 /**************************** Function Declarations ***************************/ 147 static void ahc_force_renegotiation(struct ahc_softc *ahc); 148 static struct ahc_tmode_tstate* 149 ahc_alloc_tstate(struct ahc_softc *ahc, 150 u_int scsi_id, char channel); 151 #ifdef AHC_TARGET_MODE 152 static void ahc_free_tstate(struct ahc_softc *ahc, 153 u_int scsi_id, char channel, int force); 154 #endif 155 static struct ahc_syncrate* 156 ahc_devlimited_syncrate(struct ahc_softc *ahc, 157 struct ahc_initiator_tinfo *, 158 u_int *period, 159 u_int *ppr_options, 160 role_t role); 161 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 162 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 163 struct ahc_devinfo *devinfo); 164 static void ahc_scb_devinfo(struct ahc_softc *ahc, 165 struct ahc_devinfo *devinfo, 166 struct scb *scb); 167 static void ahc_assert_atn(struct ahc_softc *ahc); 168 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 169 struct ahc_devinfo *devinfo, 170 struct scb *scb); 171 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 172 struct ahc_devinfo *devinfo); 173 static void ahc_construct_sdtr(struct ahc_softc *ahc, 174 struct ahc_devinfo *devinfo, 175 u_int period, u_int offset); 176 static void ahc_construct_wdtr(struct ahc_softc *ahc, 177 struct ahc_devinfo *devinfo, 178 u_int bus_width); 179 static void ahc_construct_ppr(struct ahc_softc *ahc, 180 struct ahc_devinfo *devinfo, 181 u_int period, u_int offset, 182 u_int bus_width, u_int ppr_options); 183 static void ahc_clear_msg_state(struct ahc_softc *ahc); 184 static void ahc_handle_message_phase(struct ahc_softc *ahc); 185 typedef enum { 186 AHCMSG_1B, 187 AHCMSG_2B, 188 AHCMSG_EXT 189 } ahc_msgtype; 190 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 191 u_int msgval, int full); 192 static int ahc_parse_msg(struct ahc_softc *ahc, 193 struct ahc_devinfo *devinfo); 194 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 195 struct ahc_devinfo *devinfo); 196 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 197 struct ahc_devinfo *devinfo); 198 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 199 static void ahc_handle_devreset(struct ahc_softc *ahc, 200 struct ahc_devinfo *devinfo, 201 cam_status status, char *message, 202 int verbose_level); 203 #if AHC_TARGET_MODE 204 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 205 struct ahc_devinfo *devinfo, 206 struct scb *scb); 207 #endif 208 209 static bus_dmamap_callback_t ahc_dmamap_cb; 210 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 211 static int ahc_init_scbdata(struct ahc_softc *ahc); 212 static void ahc_fini_scbdata(struct ahc_softc *ahc); 213 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 214 struct scb *prev_scb, 215 struct scb *scb); 216 static int ahc_qinfifo_count(struct ahc_softc *ahc); 217 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 218 u_int prev, u_int scbptr); 219 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 220 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 221 u_int scbpos, u_int prev); 222 static void ahc_reset_current_bus(struct ahc_softc *ahc); 223 #ifdef AHC_DUMP_SEQ 224 static void ahc_dumpseq(struct ahc_softc *ahc); 225 #endif 226 static void ahc_loadseq(struct ahc_softc *ahc); 227 static int ahc_check_patch(struct ahc_softc *ahc, 228 struct patch **start_patch, 229 u_int start_instr, u_int *skip_addr); 230 static void ahc_download_instr(struct ahc_softc *ahc, 231 u_int instrptr, uint8_t *dconsts); 232 #ifdef AHC_TARGET_MODE 233 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 234 struct ahc_tmode_lstate *lstate, 235 u_int initiator_id, 236 u_int event_type, 237 u_int event_arg); 238 static void ahc_update_scsiid(struct ahc_softc *ahc, 239 u_int targid_mask); 240 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 241 struct target_cmd *cmd); 242 #endif 243 /************************* Sequencer Execution Control ************************/ 244 /* 245 * Restart the sequencer program from address zero 246 */ 247 void 248 ahc_restart(struct ahc_softc *ahc) 249 { 250 251 ahc_pause(ahc); 252 253 /* No more pending messages. */ 254 ahc_clear_msg_state(ahc); 255 256 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 257 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 258 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 259 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 260 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 261 ahc_outb(ahc, SAVED_LUN, 0xFF); 262 263 /* 264 * Ensure that the sequencer's idea of TQINPOS 265 * matches our own. The sequencer increments TQINPOS 266 * only after it sees a DMA complete and a reset could 267 * occur before the increment leaving the kernel to believe 268 * the command arrived but the sequencer to not. 269 */ 270 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 271 272 /* Always allow reselection */ 273 ahc_outb(ahc, SCSISEQ, 274 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 275 if ((ahc->features & AHC_CMD_CHAN) != 0) { 276 /* Ensure that no DMA operations are in progress */ 277 ahc_outb(ahc, CCSCBCNT, 0); 278 ahc_outb(ahc, CCSGCTL, 0); 279 ahc_outb(ahc, CCSCBCTL, 0); 280 } 281 /* 282 * If we were in the process of DMA'ing SCB data into 283 * an SCB, replace that SCB on the free list. This prevents 284 * an SCB leak. 285 */ 286 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 287 ahc_add_curscb_to_free_list(ahc); 288 ahc_outb(ahc, SEQ_FLAGS2, 289 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 290 } 291 ahc_outb(ahc, MWI_RESIDUAL, 0); 292 ahc_outb(ahc, SEQCTL, FASTMODE); 293 ahc_outb(ahc, SEQADDR0, 0); 294 ahc_outb(ahc, SEQADDR1, 0); 295 ahc_unpause(ahc); 296 } 297 298 /************************* Input/Output Queues ********************************/ 299 void 300 ahc_run_qoutfifo(struct ahc_softc *ahc) 301 { 302 struct scb *scb; 303 u_int scb_index; 304 305 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 306 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 307 308 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 309 if ((ahc->qoutfifonext & 0x03) == 0x03) { 310 u_int modnext; 311 312 /* 313 * Clear 32bits of QOUTFIFO at a time 314 * so that we don't clobber an incoming 315 * byte DMA to the array on architectures 316 * that only support 32bit load and store 317 * operations. 318 */ 319 modnext = ahc->qoutfifonext & ~0x3; 320 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 321 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 322 ahc->shared_data_dmamap, 323 /*offset*/modnext, /*len*/4, 324 BUS_DMASYNC_PREREAD); 325 } 326 ahc->qoutfifonext++; 327 328 scb = ahc_lookup_scb(ahc, scb_index); 329 if (scb == NULL) { 330 printf("%s: WARNING no command for scb %d " 331 "(cmdcmplt)\nQOUTPOS = %d\n", 332 ahc_name(ahc), scb_index, 333 ahc->qoutfifonext - 1); 334 continue; 335 } 336 337 /* 338 * Save off the residual 339 * if there is one. 340 */ 341 ahc_update_residual(ahc, scb); 342 ahc_done(ahc, scb); 343 } 344 } 345 346 void 347 ahc_run_untagged_queues(struct ahc_softc *ahc) 348 { 349 int i; 350 351 for (i = 0; i < 16; i++) 352 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 353 } 354 355 void 356 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 357 { 358 struct scb *scb; 359 360 if (ahc->untagged_queue_lock != 0) 361 return; 362 363 if ((scb = TAILQ_FIRST(queue)) != NULL 364 && (scb->flags & SCB_ACTIVE) == 0) { 365 scb->flags |= SCB_ACTIVE; 366 ahc_queue_scb(ahc, scb); 367 } 368 } 369 370 /************************* Interrupt Handling *********************************/ 371 void 372 ahc_handle_brkadrint(struct ahc_softc *ahc) 373 { 374 /* 375 * We upset the sequencer :-( 376 * Lookup the error message 377 */ 378 int i; 379 int error; 380 381 error = ahc_inb(ahc, ERROR); 382 for (i = 0; error != 1 && i < num_errors; i++) 383 error >>= 1; 384 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 385 ahc_name(ahc), ahc_hard_errors[i].errmesg, 386 ahc_inb(ahc, SEQADDR0) | 387 (ahc_inb(ahc, SEQADDR1) << 8)); 388 389 ahc_dump_card_state(ahc); 390 391 /* Tell everyone that this HBA is no longer availible */ 392 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 393 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 394 CAM_NO_HBA); 395 396 /* Disable all interrupt sources by resetting the controller */ 397 ahc_shutdown(ahc); 398 } 399 400 void 401 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 402 { 403 struct scb *scb; 404 struct ahc_devinfo devinfo; 405 406 ahc_fetch_devinfo(ahc, &devinfo); 407 408 /* 409 * Clear the upper byte that holds SEQINT status 410 * codes and clear the SEQINT bit. We will unpause 411 * the sequencer, if appropriate, after servicing 412 * the request. 413 */ 414 ahc_outb(ahc, CLRINT, CLRSEQINT); 415 switch (intstat & SEQINT_MASK) { 416 case BAD_STATUS: 417 { 418 u_int scb_index; 419 struct hardware_scb *hscb; 420 421 /* 422 * Set the default return value to 0 (don't 423 * send sense). The sense code will change 424 * this if needed. 425 */ 426 ahc_outb(ahc, RETURN_1, 0); 427 428 /* 429 * The sequencer will notify us when a command 430 * has an error that would be of interest to 431 * the kernel. This allows us to leave the sequencer 432 * running in the common case of command completes 433 * without error. The sequencer will already have 434 * dma'd the SCB back up to us, so we can reference 435 * the in kernel copy directly. 436 */ 437 scb_index = ahc_inb(ahc, SCB_TAG); 438 scb = ahc_lookup_scb(ahc, scb_index); 439 if (scb == NULL) { 440 printf("%s:%c:%d: ahc_intr - referenced scb " 441 "not valid during seqint 0x%x scb(%d)\n", 442 ahc_name(ahc), devinfo.channel, 443 devinfo.target, intstat, scb_index); 444 ahc_dump_card_state(ahc); 445 panic("for safety"); 446 goto unpause; 447 } 448 449 hscb = scb->hscb; 450 451 /* Don't want to clobber the original sense code */ 452 if ((scb->flags & SCB_SENSE) != 0) { 453 /* 454 * Clear the SCB_SENSE Flag and have 455 * the sequencer do a normal command 456 * complete. 457 */ 458 scb->flags &= ~SCB_SENSE; 459 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 460 break; 461 } 462 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 463 /* Freeze the queue until the client sees the error. */ 464 ahc_freeze_devq(ahc, scb); 465 ahc_freeze_scb(scb); 466 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 467 switch (hscb->shared_data.status.scsi_status) { 468 case SCSI_STATUS_OK: 469 printf("%s: Interrupted for staus of 0???\n", 470 ahc_name(ahc)); 471 break; 472 case SCSI_STATUS_CMD_TERMINATED: 473 case SCSI_STATUS_CHECK_COND: 474 { 475 struct ahc_dma_seg *sg; 476 struct scsi_sense *sc; 477 struct ahc_initiator_tinfo *targ_info; 478 struct ahc_tmode_tstate *tstate; 479 struct ahc_transinfo *tinfo; 480 #ifdef AHC_DEBUG 481 if (ahc_debug & AHC_SHOWSENSE) { 482 ahc_print_path(ahc, scb); 483 printf("SCB %d: requests Check Status\n", 484 scb->hscb->tag); 485 } 486 #endif 487 488 if (ahc_perform_autosense(scb) == 0) 489 break; 490 491 targ_info = ahc_fetch_transinfo(ahc, 492 devinfo.channel, 493 devinfo.our_scsiid, 494 devinfo.target, 495 &tstate); 496 tinfo = &targ_info->curr; 497 sg = scb->sg_list; 498 sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 499 /* 500 * Save off the residual if there is one. 501 */ 502 ahc_update_residual(ahc, scb); 503 #ifdef AHC_DEBUG 504 if (ahc_debug & AHC_SHOWSENSE) { 505 ahc_print_path(ahc, scb); 506 printf("Sending Sense\n"); 507 } 508 #endif 509 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 510 sg->len = ahc_get_sense_bufsize(ahc, scb); 511 sg->len |= AHC_DMA_LAST_SEG; 512 513 /* Fixup byte order */ 514 sg->addr = ahc_htole32(sg->addr); 515 sg->len = ahc_htole32(sg->len); 516 517 sc->opcode = REQUEST_SENSE; 518 sc->byte2 = 0; 519 if (tinfo->protocol_version <= SCSI_REV_2 520 && SCB_GET_LUN(scb) < 8) 521 sc->byte2 = SCB_GET_LUN(scb) << 5; 522 sc->unused[0] = 0; 523 sc->unused[1] = 0; 524 sc->length = sg->len; 525 sc->control = 0; 526 527 /* 528 * We can't allow the target to disconnect. 529 * This will be an untagged transaction and 530 * having the target disconnect will make this 531 * transaction indestinguishable from outstanding 532 * tagged transactions. 533 */ 534 hscb->control = 0; 535 536 /* 537 * This request sense could be because the 538 * the device lost power or in some other 539 * way has lost our transfer negotiations. 540 * Renegotiate if appropriate. Unit attention 541 * errors will be reported before any data 542 * phases occur. 543 */ 544 if (ahc_get_residual(scb) 545 == ahc_get_transfer_length(scb)) { 546 ahc_update_neg_request(ahc, &devinfo, 547 tstate, targ_info, 548 /*force*/TRUE); 549 } 550 if (tstate->auto_negotiate & devinfo.target_mask) { 551 hscb->control |= MK_MESSAGE; 552 scb->flags &= ~SCB_NEGOTIATE; 553 scb->flags |= SCB_AUTO_NEGOTIATE; 554 } 555 hscb->cdb_len = sizeof(*sc); 556 hscb->dataptr = sg->addr; 557 hscb->datacnt = sg->len; 558 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 559 hscb->sgptr = ahc_htole32(hscb->sgptr); 560 scb->sg_count = 1; 561 scb->flags |= SCB_SENSE; 562 ahc_qinfifo_requeue_tail(ahc, scb); 563 ahc_outb(ahc, RETURN_1, SEND_SENSE); 564 #ifdef __FreeBSD__ 565 /* 566 * Ensure we have enough time to actually 567 * retrieve the sense. 568 */ 569 untimeout(ahc_timeout, (caddr_t)scb, 570 scb->io_ctx->ccb_h.timeout_ch); 571 scb->io_ctx->ccb_h.timeout_ch = 572 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 573 #endif 574 break; 575 } 576 default: 577 break; 578 } 579 break; 580 } 581 case NO_MATCH: 582 { 583 /* Ensure we don't leave the selection hardware on */ 584 ahc_outb(ahc, SCSISEQ, 585 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 586 587 printf("%s:%c:%d: no active SCB for reconnecting " 588 "target - issuing BUS DEVICE RESET\n", 589 ahc_name(ahc), devinfo.channel, devinfo.target); 590 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 591 "ARG_1 == 0x%x ACCUM = 0x%x\n", 592 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 593 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 594 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 595 "SINDEX == 0x%x\n", 596 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 597 ahc_index_busy_tcl(ahc, 598 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 599 ahc_inb(ahc, SAVED_LUN))), 600 ahc_inb(ahc, SINDEX)); 601 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 602 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 603 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 604 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 605 ahc_inb(ahc, SCB_CONTROL)); 606 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 607 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 608 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 609 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 610 ahc_dump_card_state(ahc); 611 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 612 ahc->msgout_len = 1; 613 ahc->msgout_index = 0; 614 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 615 ahc_outb(ahc, MSG_OUT, HOST_MSG); 616 ahc_assert_atn(ahc); 617 break; 618 } 619 case SEND_REJECT: 620 { 621 u_int rejbyte = ahc_inb(ahc, ACCUM); 622 printf("%s:%c:%d: Warning - unknown message received from " 623 "target (0x%x). Rejecting\n", 624 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 625 break; 626 } 627 case NO_IDENT: 628 { 629 /* 630 * The reconnecting target either did not send an identify 631 * message, or did, but we didn't find an SCB to match and 632 * before it could respond to our ATN/abort, it hit a dataphase. 633 * The only safe thing to do is to blow it away with a bus 634 * reset. 635 */ 636 int found; 637 638 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 639 "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n", 640 ahc_name(ahc), devinfo.channel, devinfo.target, 641 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID)); 642 found = ahc_reset_channel(ahc, devinfo.channel, 643 /*initiate reset*/TRUE); 644 printf("%s: Issued Channel %c Bus Reset. " 645 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 646 found); 647 return; 648 } 649 case IGN_WIDE_RES: 650 ahc_handle_ign_wide_residue(ahc, &devinfo); 651 break; 652 case PDATA_REINIT: 653 ahc_reinitialize_dataptrs(ahc); 654 break; 655 case BAD_PHASE: 656 { 657 u_int lastphase; 658 659 lastphase = ahc_inb(ahc, LASTPHASE); 660 printf("%s:%c:%d: unknown scsi bus phase %x, " 661 "lastphase = 0x%x. Attempting to continue\n", 662 ahc_name(ahc), devinfo.channel, devinfo.target, 663 lastphase, ahc_inb(ahc, SCSISIGI)); 664 break; 665 } 666 case MISSED_BUSFREE: 667 { 668 u_int lastphase; 669 670 lastphase = ahc_inb(ahc, LASTPHASE); 671 printf("%s:%c:%d: Missed busfree. " 672 "Lastphase = 0x%x, Curphase = 0x%x\n", 673 ahc_name(ahc), devinfo.channel, devinfo.target, 674 lastphase, ahc_inb(ahc, SCSISIGI)); 675 ahc_restart(ahc); 676 return; 677 } 678 case HOST_MSG_LOOP: 679 { 680 /* 681 * The sequencer has encountered a message phase 682 * that requires host assistance for completion. 683 * While handling the message phase(s), we will be 684 * notified by the sequencer after each byte is 685 * transfered so we can track bus phase changes. 686 * 687 * If this is the first time we've seen a HOST_MSG_LOOP 688 * interrupt, initialize the state of the host message 689 * loop. 690 */ 691 if (ahc->msg_type == MSG_TYPE_NONE) { 692 struct scb *scb; 693 u_int scb_index; 694 u_int bus_phase; 695 696 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 697 if (bus_phase != P_MESGIN 698 && bus_phase != P_MESGOUT) { 699 printf("ahc_intr: HOST_MSG_LOOP bad " 700 "phase 0x%x\n", 701 bus_phase); 702 /* 703 * Probably transitioned to bus free before 704 * we got here. Just punt the message. 705 */ 706 ahc_clear_intstat(ahc); 707 ahc_restart(ahc); 708 return; 709 } 710 711 scb_index = ahc_inb(ahc, SCB_TAG); 712 scb = ahc_lookup_scb(ahc, scb_index); 713 if (devinfo.role == ROLE_INITIATOR) { 714 if (scb == NULL) 715 panic("HOST_MSG_LOOP with " 716 "invalid SCB %x\n", scb_index); 717 718 if (bus_phase == P_MESGOUT) 719 ahc_setup_initiator_msgout(ahc, 720 &devinfo, 721 scb); 722 else { 723 ahc->msg_type = 724 MSG_TYPE_INITIATOR_MSGIN; 725 ahc->msgin_index = 0; 726 } 727 } 728 #if AHC_TARGET_MODE 729 else { 730 if (bus_phase == P_MESGOUT) { 731 ahc->msg_type = 732 MSG_TYPE_TARGET_MSGOUT; 733 ahc->msgin_index = 0; 734 } 735 else 736 ahc_setup_target_msgin(ahc, 737 &devinfo, 738 scb); 739 } 740 #endif 741 } 742 743 ahc_handle_message_phase(ahc); 744 break; 745 } 746 case PERR_DETECTED: 747 { 748 /* 749 * If we've cleared the parity error interrupt 750 * but the sequencer still believes that SCSIPERR 751 * is true, it must be that the parity error is 752 * for the currently presented byte on the bus, 753 * and we are not in a phase (data-in) where we will 754 * eventually ack this byte. Ack the byte and 755 * throw it away in the hope that the target will 756 * take us to message out to deliver the appropriate 757 * error message. 758 */ 759 if ((intstat & SCSIINT) == 0 760 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 761 762 if ((ahc->features & AHC_DT) == 0) { 763 u_int curphase; 764 765 /* 766 * The hardware will only let you ack bytes 767 * if the expected phase in SCSISIGO matches 768 * the current phase. Make sure this is 769 * currently the case. 770 */ 771 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 772 ahc_outb(ahc, LASTPHASE, curphase); 773 ahc_outb(ahc, SCSISIGO, curphase); 774 } 775 ahc_inb(ahc, SCSIDATL); 776 } 777 break; 778 } 779 case DATA_OVERRUN: 780 { 781 /* 782 * When the sequencer detects an overrun, it 783 * places the controller in "BITBUCKET" mode 784 * and allows the target to complete its transfer. 785 * Unfortunately, none of the counters get updated 786 * when the controller is in this mode, so we have 787 * no way of knowing how large the overrun was. 788 */ 789 u_int scbindex = ahc_inb(ahc, SCB_TAG); 790 u_int lastphase = ahc_inb(ahc, LASTPHASE); 791 u_int i; 792 793 scb = ahc_lookup_scb(ahc, scbindex); 794 for (i = 0; i < num_phases; i++) { 795 if (lastphase == ahc_phase_table[i].phase) 796 break; 797 } 798 ahc_print_path(ahc, scb); 799 printf("data overrun detected %s." 800 " Tag == 0x%x.\n", 801 ahc_phase_table[i].phasemsg, 802 scb->hscb->tag); 803 ahc_print_path(ahc, scb); 804 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 805 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 806 ahc_get_transfer_length(scb), scb->sg_count); 807 if (scb->sg_count > 0) { 808 for (i = 0; i < scb->sg_count; i++) { 809 810 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 811 i, 812 (ahc_le32toh(scb->sg_list[i].len) >> 24 813 & SG_HIGH_ADDR_BITS), 814 ahc_le32toh(scb->sg_list[i].addr), 815 ahc_le32toh(scb->sg_list[i].len) 816 & AHC_SG_LEN_MASK); 817 } 818 } 819 /* 820 * Set this and it will take effect when the 821 * target does a command complete. 822 */ 823 ahc_freeze_devq(ahc, scb); 824 if ((scb->flags & SCB_SENSE) == 0) { 825 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 826 } else { 827 scb->flags &= ~SCB_SENSE; 828 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 829 } 830 ahc_freeze_scb(scb); 831 832 if ((ahc->features & AHC_ULTRA2) != 0) { 833 /* 834 * Clear the channel in case we return 835 * to data phase later. 836 */ 837 ahc_outb(ahc, SXFRCTL0, 838 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 839 ahc_outb(ahc, SXFRCTL0, 840 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 841 } 842 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 843 u_int dscommand1; 844 845 /* Ensure HHADDR is 0 for future DMA operations. */ 846 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 847 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 848 ahc_outb(ahc, HADDR, 0); 849 ahc_outb(ahc, DSCOMMAND1, dscommand1); 850 } 851 break; 852 } 853 case MKMSG_FAILED: 854 { 855 u_int scbindex; 856 857 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 858 ahc_name(ahc), devinfo.channel, devinfo.target, 859 devinfo.lun); 860 scbindex = ahc_inb(ahc, SCB_TAG); 861 scb = ahc_lookup_scb(ahc, scbindex); 862 if (scb != NULL 863 && (scb->flags & SCB_RECOVERY_SCB) != 0) 864 /* 865 * Ensure that we didn't put a second instance of this 866 * SCB into the QINFIFO. 867 */ 868 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 869 SCB_GET_CHANNEL(ahc, scb), 870 SCB_GET_LUN(scb), scb->hscb->tag, 871 ROLE_INITIATOR, /*status*/0, 872 SEARCH_REMOVE); 873 break; 874 } 875 case NO_FREE_SCB: 876 { 877 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 878 ahc_dump_card_state(ahc); 879 panic("for safety"); 880 break; 881 } 882 case SCB_MISMATCH: 883 { 884 u_int scbptr; 885 886 scbptr = ahc_inb(ahc, SCBPTR); 887 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 888 scbptr, ahc_inb(ahc, ARG_1), 889 ahc->scb_data->hscbs[scbptr].tag); 890 ahc_dump_card_state(ahc); 891 panic("for saftey"); 892 break; 893 } 894 case OUT_OF_RANGE: 895 { 896 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 897 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 898 "ARG_1 == 0x%x ACCUM = 0x%x\n", 899 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 900 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 901 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 902 "SINDEX == 0x%x\n, A == 0x%x\n", 903 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 904 ahc_index_busy_tcl(ahc, 905 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 906 ahc_inb(ahc, SAVED_LUN))), 907 ahc_inb(ahc, SINDEX), 908 ahc_inb(ahc, ACCUM)); 909 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 910 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 911 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 912 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 913 ahc_inb(ahc, SCB_CONTROL)); 914 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 915 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 916 ahc_dump_card_state(ahc); 917 panic("for safety"); 918 break; 919 } 920 default: 921 printf("ahc_intr: seqint, " 922 "intstat == 0x%x, scsisigi = 0x%x\n", 923 intstat, ahc_inb(ahc, SCSISIGI)); 924 break; 925 } 926 unpause: 927 /* 928 * The sequencer is paused immediately on 929 * a SEQINT, so we should restart it when 930 * we're done. 931 */ 932 ahc_unpause(ahc); 933 } 934 935 void 936 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 937 { 938 u_int scb_index; 939 u_int status0; 940 u_int status; 941 struct scb *scb; 942 char cur_channel; 943 char intr_channel; 944 945 /* Make sure the sequencer is in a safe location. */ 946 ahc_clear_critical_section(ahc); 947 948 if ((ahc->features & AHC_TWIN) != 0 949 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 950 cur_channel = 'B'; 951 else 952 cur_channel = 'A'; 953 intr_channel = cur_channel; 954 955 if ((ahc->features & AHC_ULTRA2) != 0) 956 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 957 else 958 status0 = 0; 959 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 960 if (status == 0 && status0 == 0) { 961 if ((ahc->features & AHC_TWIN) != 0) { 962 /* Try the other channel */ 963 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 964 status = ahc_inb(ahc, SSTAT1) 965 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 966 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 967 } 968 if (status == 0) { 969 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 970 ahc_outb(ahc, CLRINT, CLRSCSIINT); 971 ahc_unpause(ahc); 972 return; 973 } 974 } 975 976 scb_index = ahc_inb(ahc, SCB_TAG); 977 scb = ahc_lookup_scb(ahc, scb_index); 978 if (scb != NULL 979 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0) 980 scb = NULL; 981 982 if ((ahc->features & AHC_ULTRA2) != 0 983 && (status0 & IOERR) != 0) { 984 int now_lvd; 985 986 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 987 printf("%s: Transceiver State Has Changed to %s mode\n", 988 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 989 ahc_outb(ahc, CLRSINT0, CLRIOERR); 990 /* 991 * When transitioning to SE mode, the reset line 992 * glitches, triggering an arbitration bug in some 993 * Ultra2 controllers. This bug is cleared when we 994 * assert the reset line. Since a reset glitch has 995 * already occurred with this transition and a 996 * transceiver state change is handled just like 997 * a bus reset anyway, asserting the reset line 998 * ourselves is safe. 999 */ 1000 ahc_reset_channel(ahc, intr_channel, 1001 /*Initiate Reset*/now_lvd == 0); 1002 } else if ((status & SCSIRSTI) != 0) { 1003 printf("%s: Someone reset channel %c\n", 1004 ahc_name(ahc), intr_channel); 1005 if (intr_channel != cur_channel) 1006 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1007 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1008 } else if ((status & SCSIPERR) != 0) { 1009 /* 1010 * Determine the bus phase and queue an appropriate message. 1011 * SCSIPERR is latched true as soon as a parity error 1012 * occurs. If the sequencer acked the transfer that 1013 * caused the parity error and the currently presented 1014 * transfer on the bus has correct parity, SCSIPERR will 1015 * be cleared by CLRSCSIPERR. Use this to determine if 1016 * we should look at the last phase the sequencer recorded, 1017 * or the current phase presented on the bus. 1018 */ 1019 u_int mesg_out; 1020 u_int curphase; 1021 u_int errorphase; 1022 u_int lastphase; 1023 u_int scsirate; 1024 u_int i; 1025 u_int sstat2; 1026 1027 lastphase = ahc_inb(ahc, LASTPHASE); 1028 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1029 sstat2 = ahc_inb(ahc, SSTAT2); 1030 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1031 /* 1032 * For all phases save DATA, the sequencer won't 1033 * automatically ack a byte that has a parity error 1034 * in it. So the only way that the current phase 1035 * could be 'data-in' is if the parity error is for 1036 * an already acked byte in the data phase. During 1037 * synchronous data-in transfers, we may actually 1038 * ack bytes before latching the current phase in 1039 * LASTPHASE, leading to the discrepancy between 1040 * curphase and lastphase. 1041 */ 1042 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1043 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1044 errorphase = curphase; 1045 else 1046 errorphase = lastphase; 1047 1048 for (i = 0; i < num_phases; i++) { 1049 if (errorphase == ahc_phase_table[i].phase) 1050 break; 1051 } 1052 mesg_out = ahc_phase_table[i].mesg_out; 1053 if (scb != NULL) 1054 ahc_print_path(ahc, scb); 1055 else 1056 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1057 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1058 scsirate = ahc_inb(ahc, SCSIRATE); 1059 printf("parity error detected %s. " 1060 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1061 ahc_phase_table[i].phasemsg, 1062 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8), 1063 scsirate); 1064 1065 if ((ahc->features & AHC_DT) != 0) { 1066 1067 if ((sstat2 & CRCVALERR) != 0) 1068 printf("\tCRC Value Mismatch\n"); 1069 if ((sstat2 & CRCENDERR) != 0) 1070 printf("\tNo terminal CRC packet recevied\n"); 1071 if ((sstat2 & CRCREQERR) != 0) 1072 printf("\tIllegal CRC packet request\n"); 1073 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1074 printf("\tUnexpected %sDT Data Phase\n", 1075 (scsirate & SINGLE_EDGE) ? "" : "non-"); 1076 } 1077 1078 /* 1079 * We've set the hardware to assert ATN if we 1080 * get a parity error on "in" phases, so all we 1081 * need to do is stuff the message buffer with 1082 * the appropriate message. "In" phases have set 1083 * mesg_out to something other than MSG_NOP. 1084 */ 1085 if (mesg_out != MSG_NOOP) { 1086 if (ahc->msg_type != MSG_TYPE_NONE) 1087 ahc->send_msg_perror = TRUE; 1088 else 1089 ahc_outb(ahc, MSG_OUT, mesg_out); 1090 } 1091 /* 1092 * Force a renegotiation with this target just in 1093 * case we are out of sync for some external reason 1094 * unknown (or unreported) by the target. 1095 */ 1096 ahc_force_renegotiation(ahc); 1097 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1098 ahc_unpause(ahc); 1099 } else if ((status & SELTO) != 0) { 1100 u_int scbptr; 1101 1102 /* Stop the selection */ 1103 ahc_outb(ahc, SCSISEQ, 0); 1104 1105 /* No more pending messages */ 1106 ahc_clear_msg_state(ahc); 1107 1108 /* Clear interrupt state */ 1109 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1110 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1111 1112 /* 1113 * Although the driver does not care about the 1114 * 'Selection in Progress' status bit, the busy 1115 * LED does. SELINGO is only cleared by a sucessfull 1116 * selection, so we must manually clear it to insure 1117 * the LED turns off just incase no future successful 1118 * selections occur (e.g. no devices on the bus). 1119 */ 1120 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1121 1122 scbptr = ahc_inb(ahc, WAITING_SCBH); 1123 ahc_outb(ahc, SCBPTR, scbptr); 1124 scb_index = ahc_inb(ahc, SCB_TAG); 1125 1126 scb = ahc_lookup_scb(ahc, scb_index); 1127 if (scb == NULL) { 1128 printf("%s: ahc_intr - referenced scb not " 1129 "valid during SELTO scb(%d, %d)\n", 1130 ahc_name(ahc), scbptr, scb_index); 1131 } else { 1132 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1133 ahc_freeze_devq(ahc, scb); 1134 } 1135 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1136 /* 1137 * Force a renegotiation with this target just in 1138 * case the cable was pulled and will later be 1139 * re-attached. The target may forget its negotiation 1140 * settings with us should it attempt to reselect 1141 * during the interruption. The target will not issue 1142 * a unit attention in this case, so we must always 1143 * renegotiate. 1144 */ 1145 ahc_force_renegotiation(ahc); 1146 ahc_restart(ahc); 1147 } else if ((status & BUSFREE) != 0 1148 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1149 u_int lastphase; 1150 u_int saved_scsiid; 1151 u_int saved_lun; 1152 u_int target; 1153 u_int initiator_role_id; 1154 char channel; 1155 int printerror; 1156 1157 /* 1158 * Clear our selection hardware as soon as possible. 1159 * We may have an entry in the waiting Q for this target, 1160 * that is affected by this busfree and we don't want to 1161 * go about selecting the target while we handle the event. 1162 */ 1163 ahc_outb(ahc, SCSISEQ, 1164 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1165 1166 /* 1167 * Disable busfree interrupts and clear the busfree 1168 * interrupt status. We do this here so that several 1169 * bus transactions occur prior to clearing the SCSIINT 1170 * latch. It can take a bit for the clearing to take effect. 1171 */ 1172 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1173 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1174 1175 /* 1176 * Look at what phase we were last in. 1177 * If its message out, chances are pretty good 1178 * that the busfree was in response to one of 1179 * our abort requests. 1180 */ 1181 lastphase = ahc_inb(ahc, LASTPHASE); 1182 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1183 saved_lun = ahc_inb(ahc, SAVED_LUN); 1184 target = SCSIID_TARGET(ahc, saved_scsiid); 1185 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1186 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1187 printerror = 1; 1188 1189 if (lastphase == P_MESGOUT) { 1190 struct ahc_devinfo devinfo; 1191 u_int tag; 1192 1193 ahc_fetch_devinfo(ahc, &devinfo); 1194 tag = SCB_LIST_NULL; 1195 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1196 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1197 if (ahc->msgout_buf[ahc->msgout_index - 1] 1198 == MSG_ABORT_TAG) 1199 tag = scb->hscb->tag; 1200 ahc_print_path(ahc, scb); 1201 printf("SCB %d - Abort%s Completed.\n", 1202 scb->hscb->tag, tag == SCB_LIST_NULL ? 1203 "" : " Tag"); 1204 ahc_abort_scbs(ahc, target, channel, 1205 saved_lun, tag, 1206 ROLE_INITIATOR, 1207 CAM_REQ_ABORTED); 1208 printerror = 0; 1209 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1210 MSG_BUS_DEV_RESET, TRUE)) { 1211 #ifdef __FreeBSD__ 1212 /* 1213 * Don't mark the user's request for this BDR 1214 * as completing with CAM_BDR_SENT. CAM3 1215 * specifies CAM_REQ_CMP. 1216 */ 1217 if (scb != NULL 1218 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1219 && ahc_match_scb(ahc, scb, target, channel, 1220 CAM_LUN_WILDCARD, 1221 SCB_LIST_NULL, 1222 ROLE_INITIATOR)) { 1223 ahc_set_transaction_status(scb, CAM_REQ_CMP); 1224 } 1225 #endif 1226 ahc_compile_devinfo(&devinfo, 1227 initiator_role_id, 1228 target, 1229 CAM_LUN_WILDCARD, 1230 channel, 1231 ROLE_INITIATOR); 1232 ahc_handle_devreset(ahc, &devinfo, 1233 CAM_BDR_SENT, 1234 "Bus Device Reset", 1235 /*verbose_level*/0); 1236 printerror = 0; 1237 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1238 MSG_EXT_PPR, FALSE)) { 1239 struct ahc_initiator_tinfo *tinfo; 1240 struct ahc_tmode_tstate *tstate; 1241 1242 /* 1243 * PPR Rejected. Try non-ppr negotiation 1244 * and retry command. 1245 */ 1246 tinfo = ahc_fetch_transinfo(ahc, 1247 devinfo.channel, 1248 devinfo.our_scsiid, 1249 devinfo.target, 1250 &tstate); 1251 tinfo->curr.transport_version = 2; 1252 tinfo->goal.transport_version = 2; 1253 tinfo->goal.ppr_options = 0; 1254 ahc_qinfifo_requeue_tail(ahc, scb); 1255 printerror = 0; 1256 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1257 MSG_EXT_WDTR, FALSE) 1258 || ahc_sent_msg(ahc, AHCMSG_EXT, 1259 MSG_EXT_SDTR, FALSE)) { 1260 /* 1261 * Negotiation Rejected. Go-async and 1262 * retry command. 1263 */ 1264 ahc_set_width(ahc, &devinfo, 1265 MSG_EXT_WDTR_BUS_8_BIT, 1266 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1267 /*paused*/TRUE); 1268 ahc_set_syncrate(ahc, &devinfo, 1269 /*syncrate*/NULL, 1270 /*period*/0, /*offset*/0, 1271 /*ppr_options*/0, 1272 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1273 /*paused*/TRUE); 1274 ahc_qinfifo_requeue_tail(ahc, scb); 1275 printerror = 0; 1276 } 1277 } 1278 if (printerror != 0) { 1279 u_int i; 1280 1281 if (scb != NULL) { 1282 u_int tag; 1283 1284 if ((scb->hscb->control & TAG_ENB) != 0) 1285 tag = scb->hscb->tag; 1286 else 1287 tag = SCB_LIST_NULL; 1288 ahc_print_path(ahc, scb); 1289 ahc_abort_scbs(ahc, target, channel, 1290 SCB_GET_LUN(scb), tag, 1291 ROLE_INITIATOR, 1292 CAM_UNEXP_BUSFREE); 1293 } else { 1294 /* 1295 * We had not fully identified this connection, 1296 * so we cannot abort anything. 1297 */ 1298 printf("%s: ", ahc_name(ahc)); 1299 } 1300 for (i = 0; i < num_phases; i++) { 1301 if (lastphase == ahc_phase_table[i].phase) 1302 break; 1303 } 1304 printf("Unexpected busfree %s\n" 1305 "SEQADDR == 0x%x\n", 1306 ahc_phase_table[i].phasemsg, 1307 ahc_inb(ahc, SEQADDR0) 1308 | (ahc_inb(ahc, SEQADDR1) << 8)); 1309 } 1310 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1311 ahc_restart(ahc); 1312 } else { 1313 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1314 ahc_name(ahc), status); 1315 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1316 } 1317 } 1318 1319 /* 1320 * Force renegotiation to occur the next time we initiate 1321 * a command to the current device. 1322 */ 1323 static void 1324 ahc_force_renegotiation(struct ahc_softc *ahc) 1325 { 1326 struct ahc_devinfo devinfo; 1327 struct ahc_initiator_tinfo *targ_info; 1328 struct ahc_tmode_tstate *tstate; 1329 1330 ahc_fetch_devinfo(ahc, &devinfo); 1331 targ_info = ahc_fetch_transinfo(ahc, 1332 devinfo.channel, 1333 devinfo.our_scsiid, 1334 devinfo.target, 1335 &tstate); 1336 ahc_update_neg_request(ahc, &devinfo, tstate, 1337 targ_info, /*force*/TRUE); 1338 } 1339 1340 #define AHC_MAX_STEPS 2000 1341 void 1342 ahc_clear_critical_section(struct ahc_softc *ahc) 1343 { 1344 int stepping; 1345 int steps; 1346 u_int simode0; 1347 u_int simode1; 1348 1349 if (ahc->num_critical_sections == 0) 1350 return; 1351 1352 stepping = FALSE; 1353 steps = 0; 1354 simode0 = 0; 1355 simode1 = 0; 1356 for (;;) { 1357 struct cs *cs; 1358 u_int seqaddr; 1359 u_int i; 1360 1361 seqaddr = ahc_inb(ahc, SEQADDR0) 1362 | (ahc_inb(ahc, SEQADDR1) << 8); 1363 1364 /* 1365 * Seqaddr represents the next instruction to execute, 1366 * so we are really executing the instruction just 1367 * before it. 1368 */ 1369 if (seqaddr != 0) 1370 seqaddr -= 1; 1371 cs = ahc->critical_sections; 1372 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1373 1374 if (cs->begin < seqaddr && cs->end >= seqaddr) 1375 break; 1376 } 1377 1378 if (i == ahc->num_critical_sections) 1379 break; 1380 1381 if (steps > AHC_MAX_STEPS) { 1382 printf("%s: Infinite loop in critical section\n", 1383 ahc_name(ahc)); 1384 ahc_dump_card_state(ahc); 1385 panic("critical section loop"); 1386 } 1387 1388 steps++; 1389 if (stepping == FALSE) { 1390 1391 /* 1392 * Disable all interrupt sources so that the 1393 * sequencer will not be stuck by a pausing 1394 * interrupt condition while we attempt to 1395 * leave a critical section. 1396 */ 1397 simode0 = ahc_inb(ahc, SIMODE0); 1398 ahc_outb(ahc, SIMODE0, 0); 1399 simode1 = ahc_inb(ahc, SIMODE1); 1400 ahc_outb(ahc, SIMODE1, 0); 1401 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1402 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1403 stepping = TRUE; 1404 } 1405 ahc_outb(ahc, HCNTRL, ahc->unpause); 1406 while (!ahc_is_paused(ahc)) 1407 ahc_delay(200); 1408 } 1409 if (stepping) { 1410 ahc_outb(ahc, SIMODE0, simode0); 1411 ahc_outb(ahc, SIMODE1, simode1); 1412 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1413 } 1414 } 1415 1416 /* 1417 * Clear any pending interrupt status. 1418 */ 1419 void 1420 ahc_clear_intstat(struct ahc_softc *ahc) 1421 { 1422 /* Clear any interrupt conditions this may have caused */ 1423 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1424 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1425 CLRREQINIT); 1426 ahc_flush_device_writes(ahc); 1427 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1428 ahc_flush_device_writes(ahc); 1429 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1430 ahc_flush_device_writes(ahc); 1431 } 1432 1433 /**************************** Debugging Routines ******************************/ 1434 #ifdef AHC_DEBUG 1435 int ahc_debug = AHC_DEBUG; 1436 #endif 1437 1438 void 1439 ahc_print_scb(struct scb *scb) 1440 { 1441 int i; 1442 1443 struct hardware_scb *hscb = scb->hscb; 1444 1445 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1446 (void *)scb, 1447 hscb->control, 1448 hscb->scsiid, 1449 hscb->lun, 1450 hscb->cdb_len); 1451 printf("Shared Data: "); 1452 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1453 printf("%#02x", hscb->shared_data.cdb[i]); 1454 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1455 ahc_le32toh(hscb->dataptr), 1456 ahc_le32toh(hscb->datacnt), 1457 ahc_le32toh(hscb->sgptr), 1458 hscb->tag); 1459 if (scb->sg_count > 0) { 1460 for (i = 0; i < scb->sg_count; i++) { 1461 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1462 i, 1463 (ahc_le32toh(scb->sg_list[i].len) >> 24 1464 & SG_HIGH_ADDR_BITS), 1465 ahc_le32toh(scb->sg_list[i].addr), 1466 ahc_le32toh(scb->sg_list[i].len)); 1467 } 1468 } 1469 } 1470 1471 /************************* Transfer Negotiation *******************************/ 1472 /* 1473 * Allocate per target mode instance (ID we respond to as a target) 1474 * transfer negotiation data structures. 1475 */ 1476 static struct ahc_tmode_tstate * 1477 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1478 { 1479 struct ahc_tmode_tstate *master_tstate; 1480 struct ahc_tmode_tstate *tstate; 1481 int i; 1482 1483 master_tstate = ahc->enabled_targets[ahc->our_id]; 1484 if (channel == 'B') { 1485 scsi_id += 8; 1486 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1487 } 1488 if (ahc->enabled_targets[scsi_id] != NULL 1489 && ahc->enabled_targets[scsi_id] != master_tstate) 1490 panic("%s: ahc_alloc_tstate - Target already allocated", 1491 ahc_name(ahc)); 1492 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1493 if (tstate == NULL) 1494 return (NULL); 1495 1496 /* 1497 * If we have allocated a master tstate, copy user settings from 1498 * the master tstate (taken from SRAM or the EEPROM) for this 1499 * channel, but reset our current and goal settings to async/narrow 1500 * until an initiator talks to us. 1501 */ 1502 if (master_tstate != NULL) { 1503 memcpy(tstate, master_tstate, sizeof(*tstate)); 1504 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1505 tstate->ultraenb = 0; 1506 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1507 memset(&tstate->transinfo[i].curr, 0, 1508 sizeof(tstate->transinfo[i].curr)); 1509 memset(&tstate->transinfo[i].goal, 0, 1510 sizeof(tstate->transinfo[i].goal)); 1511 } 1512 } else 1513 memset(tstate, 0, sizeof(*tstate)); 1514 ahc->enabled_targets[scsi_id] = tstate; 1515 return (tstate); 1516 } 1517 1518 #ifdef AHC_TARGET_MODE 1519 /* 1520 * Free per target mode instance (ID we respond to as a target) 1521 * transfer negotiation data structures. 1522 */ 1523 static void 1524 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1525 { 1526 struct ahc_tmode_tstate *tstate; 1527 1528 /* 1529 * Don't clean up our "master" tstate. 1530 * It has our default user settings. 1531 */ 1532 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1533 || (channel == 'A' && scsi_id == ahc->our_id)) 1534 && force == FALSE) 1535 return; 1536 1537 if (channel == 'B') 1538 scsi_id += 8; 1539 tstate = ahc->enabled_targets[scsi_id]; 1540 if (tstate != NULL) 1541 free(tstate, M_DEVBUF); 1542 ahc->enabled_targets[scsi_id] = NULL; 1543 } 1544 #endif 1545 1546 /* 1547 * Called when we have an active connection to a target on the bus, 1548 * this function finds the nearest syncrate to the input period limited 1549 * by the capabilities of the bus connectivity of and sync settings for 1550 * the target. 1551 */ 1552 struct ahc_syncrate * 1553 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1554 struct ahc_initiator_tinfo *tinfo, 1555 u_int *period, u_int *ppr_options, role_t role) 1556 { 1557 struct ahc_transinfo *transinfo; 1558 u_int maxsync; 1559 1560 if ((ahc->features & AHC_ULTRA2) != 0) { 1561 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1562 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1563 maxsync = AHC_SYNCRATE_DT; 1564 } else { 1565 maxsync = AHC_SYNCRATE_ULTRA; 1566 /* Can't do DT on an SE bus */ 1567 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1568 } 1569 } else if ((ahc->features & AHC_ULTRA) != 0) { 1570 maxsync = AHC_SYNCRATE_ULTRA; 1571 } else { 1572 maxsync = AHC_SYNCRATE_FAST; 1573 } 1574 /* 1575 * Never allow a value higher than our current goal 1576 * period otherwise we may allow a target initiated 1577 * negotiation to go above the limit as set by the 1578 * user. In the case of an initiator initiated 1579 * sync negotiation, we limit based on the user 1580 * setting. This allows the system to still accept 1581 * incoming negotiations even if target initiated 1582 * negotiation is not performed. 1583 */ 1584 if (role == ROLE_TARGET) 1585 transinfo = &tinfo->user; 1586 else 1587 transinfo = &tinfo->goal; 1588 *ppr_options &= transinfo->ppr_options; 1589 if (transinfo->period == 0) { 1590 *period = 0; 1591 *ppr_options = 0; 1592 return (NULL); 1593 } 1594 *period = MAX(*period, transinfo->period); 1595 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1596 } 1597 1598 /* 1599 * Look up the valid period to SCSIRATE conversion in our table. 1600 * Return the period and offset that should be sent to the target 1601 * if this was the beginning of an SDTR. 1602 */ 1603 struct ahc_syncrate * 1604 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1605 u_int *ppr_options, u_int maxsync) 1606 { 1607 struct ahc_syncrate *syncrate; 1608 1609 if ((ahc->features & AHC_DT) == 0) 1610 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1611 1612 /* Skip all DT only entries if DT is not available */ 1613 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1614 && maxsync < AHC_SYNCRATE_ULTRA2) 1615 maxsync = AHC_SYNCRATE_ULTRA2; 1616 1617 for (syncrate = &ahc_syncrates[maxsync]; 1618 syncrate->rate != NULL; 1619 syncrate++) { 1620 1621 /* 1622 * The Ultra2 table doesn't go as low 1623 * as for the Fast/Ultra cards. 1624 */ 1625 if ((ahc->features & AHC_ULTRA2) != 0 1626 && (syncrate->sxfr_u2 == 0)) 1627 break; 1628 1629 if (*period <= syncrate->period) { 1630 /* 1631 * When responding to a target that requests 1632 * sync, the requested rate may fall between 1633 * two rates that we can output, but still be 1634 * a rate that we can receive. Because of this, 1635 * we want to respond to the target with 1636 * the same rate that it sent to us even 1637 * if the period we use to send data to it 1638 * is lower. Only lower the response period 1639 * if we must. 1640 */ 1641 if (syncrate == &ahc_syncrates[maxsync]) 1642 *period = syncrate->period; 1643 1644 /* 1645 * At some speeds, we only support 1646 * ST transfers. 1647 */ 1648 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1649 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1650 break; 1651 } 1652 } 1653 1654 if ((*period == 0) 1655 || (syncrate->rate == NULL) 1656 || ((ahc->features & AHC_ULTRA2) != 0 1657 && (syncrate->sxfr_u2 == 0))) { 1658 /* Use asynchronous transfers. */ 1659 *period = 0; 1660 syncrate = NULL; 1661 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1662 } 1663 return (syncrate); 1664 } 1665 1666 /* 1667 * Convert from an entry in our syncrate table to the SCSI equivalent 1668 * sync "period" factor. 1669 */ 1670 u_int 1671 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1672 { 1673 struct ahc_syncrate *syncrate; 1674 1675 if ((ahc->features & AHC_ULTRA2) != 0) 1676 scsirate &= SXFR_ULTRA2; 1677 else 1678 scsirate &= SXFR; 1679 1680 syncrate = &ahc_syncrates[maxsync]; 1681 while (syncrate->rate != NULL) { 1682 1683 if ((ahc->features & AHC_ULTRA2) != 0) { 1684 if (syncrate->sxfr_u2 == 0) 1685 break; 1686 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1687 return (syncrate->period); 1688 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1689 return (syncrate->period); 1690 } 1691 syncrate++; 1692 } 1693 return (0); /* async */ 1694 } 1695 1696 /* 1697 * Truncate the given synchronous offset to a value the 1698 * current adapter type and syncrate are capable of. 1699 */ 1700 void 1701 ahc_validate_offset(struct ahc_softc *ahc, 1702 struct ahc_initiator_tinfo *tinfo, 1703 struct ahc_syncrate *syncrate, 1704 u_int *offset, int wide, role_t role) 1705 { 1706 u_int maxoffset; 1707 1708 /* Limit offset to what we can do */ 1709 if (syncrate == NULL) { 1710 maxoffset = 0; 1711 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1712 maxoffset = MAX_OFFSET_ULTRA2; 1713 } else { 1714 if (wide) 1715 maxoffset = MAX_OFFSET_16BIT; 1716 else 1717 maxoffset = MAX_OFFSET_8BIT; 1718 } 1719 *offset = MIN(*offset, maxoffset); 1720 if (tinfo != NULL) { 1721 if (role == ROLE_TARGET) 1722 *offset = MIN(*offset, tinfo->user.offset); 1723 else 1724 *offset = MIN(*offset, tinfo->goal.offset); 1725 } 1726 } 1727 1728 /* 1729 * Truncate the given transfer width parameter to a value the 1730 * current adapter type is capable of. 1731 */ 1732 void 1733 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1734 u_int *bus_width, role_t role) 1735 { 1736 switch (*bus_width) { 1737 default: 1738 if (ahc->features & AHC_WIDE) { 1739 /* Respond Wide */ 1740 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1741 break; 1742 } 1743 /* FALLTHROUGH */ 1744 case MSG_EXT_WDTR_BUS_8_BIT: 1745 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1746 break; 1747 } 1748 if (tinfo != NULL) { 1749 if (role == ROLE_TARGET) 1750 *bus_width = MIN(tinfo->user.width, *bus_width); 1751 else 1752 *bus_width = MIN(tinfo->goal.width, *bus_width); 1753 } 1754 } 1755 1756 /* 1757 * Update the bitmask of targets for which the controller should 1758 * negotiate with at the next convenient oportunity. This currently 1759 * means the next time we send the initial identify messages for 1760 * a new transaction. 1761 */ 1762 int 1763 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1764 struct ahc_tmode_tstate *tstate, 1765 struct ahc_initiator_tinfo *tinfo, int force) 1766 { 1767 u_int auto_negotiate_orig; 1768 1769 auto_negotiate_orig = tstate->auto_negotiate; 1770 if (tinfo->curr.period != tinfo->goal.period 1771 || tinfo->curr.width != tinfo->goal.width 1772 || tinfo->curr.offset != tinfo->goal.offset 1773 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1774 || (force 1775 && (tinfo->goal.period != 0 1776 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1777 || tinfo->goal.ppr_options != 0))) 1778 tstate->auto_negotiate |= devinfo->target_mask; 1779 else 1780 tstate->auto_negotiate &= ~devinfo->target_mask; 1781 1782 return (auto_negotiate_orig != tstate->auto_negotiate); 1783 } 1784 1785 /* 1786 * Update the user/goal/curr tables of synchronous negotiation 1787 * parameters as well as, in the case of a current or active update, 1788 * any data structures on the host controller. In the case of an 1789 * active update, the specified target is currently talking to us on 1790 * the bus, so the transfer parameter update must take effect 1791 * immediately. 1792 */ 1793 void 1794 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1795 struct ahc_syncrate *syncrate, u_int period, 1796 u_int offset, u_int ppr_options, u_int type, int paused) 1797 { 1798 struct ahc_initiator_tinfo *tinfo; 1799 struct ahc_tmode_tstate *tstate; 1800 u_int old_period; 1801 u_int old_offset; 1802 u_int old_ppr; 1803 int active; 1804 int update_needed; 1805 1806 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1807 update_needed = 0; 1808 1809 if (syncrate == NULL) { 1810 period = 0; 1811 offset = 0; 1812 } 1813 1814 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1815 devinfo->target, &tstate); 1816 1817 if ((type & AHC_TRANS_USER) != 0) { 1818 tinfo->user.period = period; 1819 tinfo->user.offset = offset; 1820 tinfo->user.ppr_options = ppr_options; 1821 } 1822 1823 if ((type & AHC_TRANS_GOAL) != 0) { 1824 tinfo->goal.period = period; 1825 tinfo->goal.offset = offset; 1826 tinfo->goal.ppr_options = ppr_options; 1827 } 1828 1829 old_period = tinfo->curr.period; 1830 old_offset = tinfo->curr.offset; 1831 old_ppr = tinfo->curr.ppr_options; 1832 1833 if ((type & AHC_TRANS_CUR) != 0 1834 && (old_period != period 1835 || old_offset != offset 1836 || old_ppr != ppr_options)) { 1837 u_int scsirate; 1838 1839 update_needed++; 1840 scsirate = tinfo->scsirate; 1841 if ((ahc->features & AHC_ULTRA2) != 0) { 1842 1843 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1844 if (syncrate != NULL) { 1845 scsirate |= syncrate->sxfr_u2; 1846 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1847 scsirate |= ENABLE_CRC; 1848 else 1849 scsirate |= SINGLE_EDGE; 1850 } 1851 } else { 1852 1853 scsirate &= ~(SXFR|SOFS); 1854 /* 1855 * Ensure Ultra mode is set properly for 1856 * this target. 1857 */ 1858 tstate->ultraenb &= ~devinfo->target_mask; 1859 if (syncrate != NULL) { 1860 if (syncrate->sxfr & ULTRA_SXFR) { 1861 tstate->ultraenb |= 1862 devinfo->target_mask; 1863 } 1864 scsirate |= syncrate->sxfr & SXFR; 1865 scsirate |= offset & SOFS; 1866 } 1867 if (active) { 1868 u_int sxfrctl0; 1869 1870 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1871 sxfrctl0 &= ~FAST20; 1872 if (tstate->ultraenb & devinfo->target_mask) 1873 sxfrctl0 |= FAST20; 1874 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1875 } 1876 } 1877 if (active) { 1878 ahc_outb(ahc, SCSIRATE, scsirate); 1879 if ((ahc->features & AHC_ULTRA2) != 0) 1880 ahc_outb(ahc, SCSIOFFSET, offset); 1881 } 1882 1883 tinfo->scsirate = scsirate; 1884 tinfo->curr.period = period; 1885 tinfo->curr.offset = offset; 1886 tinfo->curr.ppr_options = ppr_options; 1887 1888 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1889 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1890 if (bootverbose) { 1891 if (offset != 0) { 1892 printf("%s: target %d synchronous at %sMHz%s, " 1893 "offset = 0x%x\n", ahc_name(ahc), 1894 devinfo->target, syncrate->rate, 1895 (ppr_options & MSG_EXT_PPR_DT_REQ) 1896 ? " DT" : "", offset); 1897 } else { 1898 printf("%s: target %d using " 1899 "asynchronous transfers\n", 1900 ahc_name(ahc), devinfo->target); 1901 } 1902 } 1903 } 1904 1905 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1906 tinfo, /*force*/FALSE); 1907 1908 if (update_needed) 1909 ahc_update_pending_scbs(ahc); 1910 } 1911 1912 /* 1913 * Update the user/goal/curr tables of wide negotiation 1914 * parameters as well as, in the case of a current or active update, 1915 * any data structures on the host controller. In the case of an 1916 * active update, the specified target is currently talking to us on 1917 * the bus, so the transfer parameter update must take effect 1918 * immediately. 1919 */ 1920 void 1921 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1922 u_int width, u_int type, int paused) 1923 { 1924 struct ahc_initiator_tinfo *tinfo; 1925 struct ahc_tmode_tstate *tstate; 1926 u_int oldwidth; 1927 int active; 1928 int update_needed; 1929 1930 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1931 update_needed = 0; 1932 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1933 devinfo->target, &tstate); 1934 1935 if ((type & AHC_TRANS_USER) != 0) 1936 tinfo->user.width = width; 1937 1938 if ((type & AHC_TRANS_GOAL) != 0) 1939 tinfo->goal.width = width; 1940 1941 oldwidth = tinfo->curr.width; 1942 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1943 u_int scsirate; 1944 1945 update_needed++; 1946 scsirate = tinfo->scsirate; 1947 scsirate &= ~WIDEXFER; 1948 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1949 scsirate |= WIDEXFER; 1950 1951 tinfo->scsirate = scsirate; 1952 1953 if (active) 1954 ahc_outb(ahc, SCSIRATE, scsirate); 1955 1956 tinfo->curr.width = width; 1957 1958 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1959 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1960 if (bootverbose) { 1961 printf("%s: target %d using %dbit transfers\n", 1962 ahc_name(ahc), devinfo->target, 1963 8 * (0x01 << width)); 1964 } 1965 } 1966 1967 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1968 tinfo, /*force*/FALSE); 1969 if (update_needed) 1970 ahc_update_pending_scbs(ahc); 1971 } 1972 1973 /* 1974 * Update the current state of tagged queuing for a given target. 1975 */ 1976 void 1977 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1978 ahc_queue_alg alg) 1979 { 1980 ahc_platform_set_tags(ahc, devinfo, alg); 1981 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1982 devinfo->lun, AC_TRANSFER_NEG, &alg); 1983 } 1984 1985 /* 1986 * When the transfer settings for a connection change, update any 1987 * in-transit SCBs to contain the new data so the hardware will 1988 * be set correctly during future (re)selections. 1989 */ 1990 static void 1991 ahc_update_pending_scbs(struct ahc_softc *ahc) 1992 { 1993 struct scb *pending_scb; 1994 int pending_scb_count; 1995 int i; 1996 int paused; 1997 u_int saved_scbptr; 1998 1999 /* 2000 * Traverse the pending SCB list and ensure that all of the 2001 * SCBs there have the proper settings. 2002 */ 2003 pending_scb_count = 0; 2004 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2005 struct ahc_devinfo devinfo; 2006 struct hardware_scb *pending_hscb; 2007 struct ahc_initiator_tinfo *tinfo; 2008 struct ahc_tmode_tstate *tstate; 2009 2010 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2011 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2012 devinfo.our_scsiid, 2013 devinfo.target, &tstate); 2014 pending_hscb = pending_scb->hscb; 2015 pending_hscb->control &= ~ULTRAENB; 2016 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2017 pending_hscb->control |= ULTRAENB; 2018 pending_hscb->scsirate = tinfo->scsirate; 2019 pending_hscb->scsioffset = tinfo->curr.offset; 2020 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2021 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2022 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2023 pending_hscb->control &= ~MK_MESSAGE; 2024 } 2025 ahc_sync_scb(ahc, pending_scb, 2026 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2027 pending_scb_count++; 2028 } 2029 2030 if (pending_scb_count == 0) 2031 return; 2032 2033 if (ahc_is_paused(ahc)) { 2034 paused = 1; 2035 } else { 2036 paused = 0; 2037 ahc_pause(ahc); 2038 } 2039 2040 saved_scbptr = ahc_inb(ahc, SCBPTR); 2041 /* Ensure that the hscbs down on the card match the new information */ 2042 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2043 struct hardware_scb *pending_hscb; 2044 u_int control; 2045 u_int scb_tag; 2046 2047 ahc_outb(ahc, SCBPTR, i); 2048 scb_tag = ahc_inb(ahc, SCB_TAG); 2049 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2050 if (pending_scb == NULL) 2051 continue; 2052 2053 pending_hscb = pending_scb->hscb; 2054 control = ahc_inb(ahc, SCB_CONTROL); 2055 control &= ~(ULTRAENB|MK_MESSAGE); 2056 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2057 ahc_outb(ahc, SCB_CONTROL, control); 2058 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2059 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2060 } 2061 ahc_outb(ahc, SCBPTR, saved_scbptr); 2062 2063 if (paused == 0) 2064 ahc_unpause(ahc); 2065 } 2066 2067 /**************************** Pathing Information *****************************/ 2068 static void 2069 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2070 { 2071 u_int saved_scsiid; 2072 role_t role; 2073 int our_id; 2074 2075 if (ahc_inb(ahc, SSTAT0) & TARGET) 2076 role = ROLE_TARGET; 2077 else 2078 role = ROLE_INITIATOR; 2079 2080 if (role == ROLE_TARGET 2081 && (ahc->features & AHC_MULTI_TID) != 0 2082 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 2083 /* We were selected, so pull our id from TARGIDIN */ 2084 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2085 } else if ((ahc->features & AHC_ULTRA2) != 0) 2086 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2087 else 2088 our_id = ahc_inb(ahc, SCSIID) & OID; 2089 2090 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2091 ahc_compile_devinfo(devinfo, 2092 our_id, 2093 SCSIID_TARGET(ahc, saved_scsiid), 2094 ahc_inb(ahc, SAVED_LUN), 2095 SCSIID_CHANNEL(ahc, saved_scsiid), 2096 role); 2097 } 2098 2099 struct ahc_phase_table_entry* 2100 ahc_lookup_phase_entry(int phase) 2101 { 2102 struct ahc_phase_table_entry *entry; 2103 struct ahc_phase_table_entry *last_entry; 2104 2105 /* 2106 * num_phases doesn't include the default entry which 2107 * will be returned if the phase doesn't match. 2108 */ 2109 last_entry = &ahc_phase_table[num_phases]; 2110 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2111 if (phase == entry->phase) 2112 break; 2113 } 2114 return (entry); 2115 } 2116 2117 void 2118 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2119 u_int lun, char channel, role_t role) 2120 { 2121 devinfo->our_scsiid = our_id; 2122 devinfo->target = target; 2123 devinfo->lun = lun; 2124 devinfo->target_offset = target; 2125 devinfo->channel = channel; 2126 devinfo->role = role; 2127 if (channel == 'B') 2128 devinfo->target_offset += 8; 2129 devinfo->target_mask = (0x01 << devinfo->target_offset); 2130 } 2131 2132 static void 2133 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2134 struct scb *scb) 2135 { 2136 role_t role; 2137 int our_id; 2138 2139 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2140 role = ROLE_INITIATOR; 2141 if ((scb->hscb->control & TARGET_SCB) != 0) 2142 role = ROLE_TARGET; 2143 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2144 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2145 } 2146 2147 2148 /************************ Message Phase Processing ****************************/ 2149 static void 2150 ahc_assert_atn(struct ahc_softc *ahc) 2151 { 2152 u_int scsisigo; 2153 2154 scsisigo = ATNO; 2155 if ((ahc->features & AHC_DT) == 0) 2156 scsisigo |= ahc_inb(ahc, SCSISIGI); 2157 ahc_outb(ahc, SCSISIGO, scsisigo); 2158 } 2159 2160 /* 2161 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2162 * or enters the initial message out phase, we are interrupted. Fill our 2163 * outgoing message buffer with the appropriate message and beging handing 2164 * the message phase(s) manually. 2165 */ 2166 static void 2167 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2168 struct scb *scb) 2169 { 2170 /* 2171 * To facilitate adding multiple messages together, 2172 * each routine should increment the index and len 2173 * variables instead of setting them explicitly. 2174 */ 2175 ahc->msgout_index = 0; 2176 ahc->msgout_len = 0; 2177 2178 if ((scb->flags & SCB_DEVICE_RESET) == 0 2179 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2180 u_int identify_msg; 2181 2182 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2183 if ((scb->hscb->control & DISCENB) != 0) 2184 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2185 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2186 ahc->msgout_len++; 2187 2188 if ((scb->hscb->control & TAG_ENB) != 0) { 2189 ahc->msgout_buf[ahc->msgout_index++] = 2190 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2191 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2192 ahc->msgout_len += 2; 2193 } 2194 } 2195 2196 if (scb->flags & SCB_DEVICE_RESET) { 2197 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2198 ahc->msgout_len++; 2199 ahc_print_path(ahc, scb); 2200 printf("Bus Device Reset Message Sent\n"); 2201 /* 2202 * Clear our selection hardware in advance of 2203 * the busfree. We may have an entry in the waiting 2204 * Q for this target, and we don't want to go about 2205 * selecting while we handle the busfree and blow it 2206 * away. 2207 */ 2208 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2209 } else if ((scb->flags & SCB_ABORT) != 0) { 2210 if ((scb->hscb->control & TAG_ENB) != 0) 2211 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2212 else 2213 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2214 ahc->msgout_len++; 2215 ahc_print_path(ahc, scb); 2216 printf("Abort%s Message Sent\n", 2217 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2218 /* 2219 * Clear our selection hardware in advance of 2220 * the busfree. We may have an entry in the waiting 2221 * Q for this target, and we don't want to go about 2222 * selecting while we handle the busfree and blow it 2223 * away. 2224 */ 2225 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2226 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2227 ahc_build_transfer_msg(ahc, devinfo); 2228 } else { 2229 printf("ahc_intr: AWAITING_MSG for an SCB that " 2230 "does not have a waiting message\n"); 2231 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2232 devinfo->target_mask); 2233 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2234 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2235 ahc_inb(ahc, MSG_OUT), scb->flags); 2236 } 2237 2238 /* 2239 * Clear the MK_MESSAGE flag from the SCB so we aren't 2240 * asked to send this message again. 2241 */ 2242 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2243 scb->hscb->control &= ~MK_MESSAGE; 2244 ahc->msgout_index = 0; 2245 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2246 } 2247 2248 /* 2249 * Build an appropriate transfer negotiation message for the 2250 * currently active target. 2251 */ 2252 static void 2253 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2254 { 2255 /* 2256 * We need to initiate transfer negotiations. 2257 * If our current and goal settings are identical, 2258 * we want to renegotiate due to a check condition. 2259 */ 2260 struct ahc_initiator_tinfo *tinfo; 2261 struct ahc_tmode_tstate *tstate; 2262 struct ahc_syncrate *rate; 2263 int dowide; 2264 int dosync; 2265 int doppr; 2266 int use_ppr; 2267 u_int period; 2268 u_int ppr_options; 2269 u_int offset; 2270 2271 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2272 devinfo->target, &tstate); 2273 /* 2274 * Filter our period based on the current connection. 2275 * If we can't perform DT transfers on this segment (not in LVD 2276 * mode for instance), then our decision to issue a PPR message 2277 * may change. 2278 */ 2279 period = tinfo->goal.period; 2280 ppr_options = tinfo->goal.ppr_options; 2281 /* Target initiated PPR is not allowed in the SCSI spec */ 2282 if (devinfo->role == ROLE_TARGET) 2283 ppr_options = 0; 2284 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2285 &ppr_options, devinfo->role); 2286 dowide = tinfo->curr.width != tinfo->goal.width; 2287 dosync = tinfo->curr.period != period; 2288 doppr = tinfo->curr.ppr_options != ppr_options; 2289 2290 if (!dowide && !dosync && !doppr) { 2291 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2292 dosync = tinfo->goal.period != 0; 2293 doppr = tinfo->goal.ppr_options != 0; 2294 } 2295 2296 if (!dowide && !dosync && !doppr) { 2297 panic("ahc_intr: AWAITING_MSG for negotiation, " 2298 "but no negotiation needed\n"); 2299 } 2300 2301 use_ppr = (tinfo->curr.transport_version >= 3) || doppr; 2302 /* Target initiated PPR is not allowed in the SCSI spec */ 2303 if (devinfo->role == ROLE_TARGET) 2304 use_ppr = 0; 2305 2306 /* 2307 * Both the PPR message and SDTR message require the 2308 * goal syncrate to be limited to what the target device 2309 * is capable of handling (based on whether an LVD->SE 2310 * expander is on the bus), so combine these two cases. 2311 * Regardless, guarantee that if we are using WDTR and SDTR 2312 * messages that WDTR comes first. 2313 */ 2314 if (use_ppr || (dosync && !dowide)) { 2315 2316 offset = tinfo->goal.offset; 2317 ahc_validate_offset(ahc, tinfo, rate, &offset, 2318 use_ppr ? tinfo->goal.width 2319 : tinfo->curr.width, 2320 devinfo->role); 2321 if (use_ppr) { 2322 ahc_construct_ppr(ahc, devinfo, period, offset, 2323 tinfo->goal.width, ppr_options); 2324 } else { 2325 ahc_construct_sdtr(ahc, devinfo, period, offset); 2326 } 2327 } else { 2328 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2329 } 2330 } 2331 2332 /* 2333 * Build a synchronous negotiation message in our message 2334 * buffer based on the input parameters. 2335 */ 2336 static void 2337 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2338 u_int period, u_int offset) 2339 { 2340 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2341 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2342 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2343 ahc->msgout_buf[ahc->msgout_index++] = period; 2344 ahc->msgout_buf[ahc->msgout_index++] = offset; 2345 ahc->msgout_len += 5; 2346 if (bootverbose) { 2347 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2348 ahc_name(ahc), devinfo->channel, devinfo->target, 2349 devinfo->lun, period, offset); 2350 } 2351 } 2352 2353 /* 2354 * Build a wide negotiation message in our message 2355 * buffer based on the input parameters. 2356 */ 2357 static void 2358 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2359 u_int bus_width) 2360 { 2361 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2362 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2363 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2364 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2365 ahc->msgout_len += 4; 2366 if (bootverbose) { 2367 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2368 ahc_name(ahc), devinfo->channel, devinfo->target, 2369 devinfo->lun, bus_width); 2370 } 2371 } 2372 2373 /* 2374 * Build a parallel protocol request message in our message 2375 * buffer based on the input parameters. 2376 */ 2377 static void 2378 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2379 u_int period, u_int offset, u_int bus_width, 2380 u_int ppr_options) 2381 { 2382 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2383 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2384 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2385 ahc->msgout_buf[ahc->msgout_index++] = period; 2386 ahc->msgout_buf[ahc->msgout_index++] = 0; 2387 ahc->msgout_buf[ahc->msgout_index++] = offset; 2388 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2389 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2390 ahc->msgout_len += 8; 2391 if (bootverbose) { 2392 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2393 "offset %x, ppr_options %x\n", ahc_name(ahc), 2394 devinfo->channel, devinfo->target, devinfo->lun, 2395 bus_width, period, offset, ppr_options); 2396 } 2397 } 2398 2399 /* 2400 * Clear any active message state. 2401 */ 2402 static void 2403 ahc_clear_msg_state(struct ahc_softc *ahc) 2404 { 2405 ahc->msgout_len = 0; 2406 ahc->msgin_index = 0; 2407 ahc->msg_type = MSG_TYPE_NONE; 2408 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2409 /* 2410 * The target didn't care to respond to our 2411 * message request, so clear ATN. 2412 */ 2413 ahc_outb(ahc, CLRSINT1, CLRATNO); 2414 } 2415 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2416 ahc_outb(ahc, SEQ_FLAGS2, 2417 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2418 } 2419 2420 /* 2421 * Manual message loop handler. 2422 */ 2423 static void 2424 ahc_handle_message_phase(struct ahc_softc *ahc) 2425 { 2426 struct ahc_devinfo devinfo; 2427 u_int bus_phase; 2428 int end_session; 2429 2430 ahc_fetch_devinfo(ahc, &devinfo); 2431 end_session = FALSE; 2432 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2433 2434 reswitch: 2435 switch (ahc->msg_type) { 2436 case MSG_TYPE_INITIATOR_MSGOUT: 2437 { 2438 int lastbyte; 2439 int phasemis; 2440 int msgdone; 2441 2442 if (ahc->msgout_len == 0) 2443 panic("HOST_MSG_LOOP interrupt with no active message"); 2444 2445 phasemis = bus_phase != P_MESGOUT; 2446 if (phasemis) { 2447 if (bus_phase == P_MESGIN) { 2448 /* 2449 * Change gears and see if 2450 * this messages is of interest to 2451 * us or should be passed back to 2452 * the sequencer. 2453 */ 2454 ahc_outb(ahc, CLRSINT1, CLRATNO); 2455 ahc->send_msg_perror = FALSE; 2456 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2457 ahc->msgin_index = 0; 2458 goto reswitch; 2459 } 2460 end_session = TRUE; 2461 break; 2462 } 2463 2464 if (ahc->send_msg_perror) { 2465 ahc_outb(ahc, CLRSINT1, CLRATNO); 2466 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2467 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2468 break; 2469 } 2470 2471 msgdone = ahc->msgout_index == ahc->msgout_len; 2472 if (msgdone) { 2473 /* 2474 * The target has requested a retry. 2475 * Re-assert ATN, reset our message index to 2476 * 0, and try again. 2477 */ 2478 ahc->msgout_index = 0; 2479 ahc_assert_atn(ahc); 2480 } 2481 2482 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2483 if (lastbyte) { 2484 /* Last byte is signified by dropping ATN */ 2485 ahc_outb(ahc, CLRSINT1, CLRATNO); 2486 } 2487 2488 /* 2489 * Clear our interrupt status and present 2490 * the next byte on the bus. 2491 */ 2492 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2493 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2494 break; 2495 } 2496 case MSG_TYPE_INITIATOR_MSGIN: 2497 { 2498 int phasemis; 2499 int message_done; 2500 2501 phasemis = bus_phase != P_MESGIN; 2502 2503 if (phasemis) { 2504 ahc->msgin_index = 0; 2505 if (bus_phase == P_MESGOUT 2506 && (ahc->send_msg_perror == TRUE 2507 || (ahc->msgout_len != 0 2508 && ahc->msgout_index == 0))) { 2509 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2510 goto reswitch; 2511 } 2512 end_session = TRUE; 2513 break; 2514 } 2515 2516 /* Pull the byte in without acking it */ 2517 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2518 2519 message_done = ahc_parse_msg(ahc, &devinfo); 2520 2521 if (message_done) { 2522 /* 2523 * Clear our incoming message buffer in case there 2524 * is another message following this one. 2525 */ 2526 ahc->msgin_index = 0; 2527 2528 /* 2529 * If this message illicited a response, 2530 * assert ATN so the target takes us to the 2531 * message out phase. 2532 */ 2533 if (ahc->msgout_len != 0) 2534 ahc_assert_atn(ahc); 2535 } else 2536 ahc->msgin_index++; 2537 2538 /* Ack the byte */ 2539 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2540 ahc_inb(ahc, SCSIDATL); 2541 break; 2542 } 2543 case MSG_TYPE_TARGET_MSGIN: 2544 { 2545 int msgdone; 2546 int msgout_request; 2547 2548 if (ahc->msgout_len == 0) 2549 panic("Target MSGIN with no active message"); 2550 2551 /* 2552 * If we interrupted a mesgout session, the initiator 2553 * will not know this until our first REQ. So, we 2554 * only honor mesgout requests after we've sent our 2555 * first byte. 2556 */ 2557 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2558 && ahc->msgout_index > 0) 2559 msgout_request = TRUE; 2560 else 2561 msgout_request = FALSE; 2562 2563 if (msgout_request) { 2564 2565 /* 2566 * Change gears and see if 2567 * this messages is of interest to 2568 * us or should be passed back to 2569 * the sequencer. 2570 */ 2571 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2572 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2573 ahc->msgin_index = 0; 2574 /* Dummy read to REQ for first byte */ 2575 ahc_inb(ahc, SCSIDATL); 2576 ahc_outb(ahc, SXFRCTL0, 2577 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2578 break; 2579 } 2580 2581 msgdone = ahc->msgout_index == ahc->msgout_len; 2582 if (msgdone) { 2583 ahc_outb(ahc, SXFRCTL0, 2584 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2585 end_session = TRUE; 2586 break; 2587 } 2588 2589 /* 2590 * Present the next byte on the bus. 2591 */ 2592 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2593 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2594 break; 2595 } 2596 case MSG_TYPE_TARGET_MSGOUT: 2597 { 2598 int lastbyte; 2599 int msgdone; 2600 2601 /* 2602 * The initiator signals that this is 2603 * the last byte by dropping ATN. 2604 */ 2605 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2606 2607 /* 2608 * Read the latched byte, but turn off SPIOEN first 2609 * so that we don't inadvertently cause a REQ for the 2610 * next byte. 2611 */ 2612 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2613 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2614 msgdone = ahc_parse_msg(ahc, &devinfo); 2615 if (msgdone == MSGLOOP_TERMINATED) { 2616 /* 2617 * The message is *really* done in that it caused 2618 * us to go to bus free. The sequencer has already 2619 * been reset at this point, so pull the ejection 2620 * handle. 2621 */ 2622 return; 2623 } 2624 2625 ahc->msgin_index++; 2626 2627 /* 2628 * XXX Read spec about initiator dropping ATN too soon 2629 * and use msgdone to detect it. 2630 */ 2631 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2632 ahc->msgin_index = 0; 2633 2634 /* 2635 * If this message illicited a response, transition 2636 * to the Message in phase and send it. 2637 */ 2638 if (ahc->msgout_len != 0) { 2639 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2640 ahc_outb(ahc, SXFRCTL0, 2641 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2642 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2643 ahc->msgin_index = 0; 2644 break; 2645 } 2646 } 2647 2648 if (lastbyte) 2649 end_session = TRUE; 2650 else { 2651 /* Ask for the next byte. */ 2652 ahc_outb(ahc, SXFRCTL0, 2653 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2654 } 2655 2656 break; 2657 } 2658 default: 2659 panic("Unknown REQINIT message type"); 2660 } 2661 2662 if (end_session) { 2663 ahc_clear_msg_state(ahc); 2664 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2665 } else 2666 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2667 } 2668 2669 /* 2670 * See if we sent a particular extended message to the target. 2671 * If "full" is true, return true only if the target saw the full 2672 * message. If "full" is false, return true if the target saw at 2673 * least the first byte of the message. 2674 */ 2675 static int 2676 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2677 { 2678 int found; 2679 u_int index; 2680 2681 found = FALSE; 2682 index = 0; 2683 2684 while (index < ahc->msgout_len) { 2685 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2686 u_int end_index; 2687 2688 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2689 if (ahc->msgout_buf[index+2] == msgval 2690 && type == AHCMSG_EXT) { 2691 2692 if (full) { 2693 if (ahc->msgout_index > end_index) 2694 found = TRUE; 2695 } else if (ahc->msgout_index > index) 2696 found = TRUE; 2697 } 2698 index = end_index; 2699 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2700 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2701 2702 /* Skip tag type and tag id or residue param*/ 2703 index += 2; 2704 } else { 2705 /* Single byte message */ 2706 if (type == AHCMSG_1B 2707 && ahc->msgout_buf[index] == msgval 2708 && ahc->msgout_index > index) 2709 found = TRUE; 2710 index++; 2711 } 2712 2713 if (found) 2714 break; 2715 } 2716 return (found); 2717 } 2718 2719 /* 2720 * Wait for a complete incoming message, parse it, and respond accordingly. 2721 */ 2722 static int 2723 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2724 { 2725 struct ahc_initiator_tinfo *tinfo; 2726 struct ahc_tmode_tstate *tstate; 2727 int reject; 2728 int done; 2729 int response; 2730 u_int targ_scsirate; 2731 2732 done = MSGLOOP_IN_PROG; 2733 response = FALSE; 2734 reject = FALSE; 2735 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2736 devinfo->target, &tstate); 2737 targ_scsirate = tinfo->scsirate; 2738 2739 /* 2740 * Parse as much of the message as is availible, 2741 * rejecting it if we don't support it. When 2742 * the entire message is availible and has been 2743 * handled, return MSGLOOP_MSGCOMPLETE, indicating 2744 * that we have parsed an entire message. 2745 * 2746 * In the case of extended messages, we accept the length 2747 * byte outright and perform more checking once we know the 2748 * extended message type. 2749 */ 2750 switch (ahc->msgin_buf[0]) { 2751 case MSG_MESSAGE_REJECT: 2752 response = ahc_handle_msg_reject(ahc, devinfo); 2753 /* FALLTHROUGH */ 2754 case MSG_NOOP: 2755 done = MSGLOOP_MSGCOMPLETE; 2756 break; 2757 case MSG_EXTENDED: 2758 { 2759 /* Wait for enough of the message to begin validation */ 2760 if (ahc->msgin_index < 2) 2761 break; 2762 switch (ahc->msgin_buf[2]) { 2763 case MSG_EXT_SDTR: 2764 { 2765 struct ahc_syncrate *syncrate; 2766 u_int period; 2767 u_int ppr_options; 2768 u_int offset; 2769 u_int saved_offset; 2770 2771 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 2772 reject = TRUE; 2773 break; 2774 } 2775 2776 /* 2777 * Wait until we have both args before validating 2778 * and acting on this message. 2779 * 2780 * Add one to MSG_EXT_SDTR_LEN to account for 2781 * the extended message preamble. 2782 */ 2783 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 2784 break; 2785 2786 period = ahc->msgin_buf[3]; 2787 ppr_options = 0; 2788 saved_offset = offset = ahc->msgin_buf[4]; 2789 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2790 &ppr_options, 2791 devinfo->role); 2792 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 2793 targ_scsirate & WIDEXFER, 2794 devinfo->role); 2795 if (bootverbose) { 2796 printf("(%s:%c:%d:%d): Received " 2797 "SDTR period %x, offset %x\n\t" 2798 "Filtered to period %x, offset %x\n", 2799 ahc_name(ahc), devinfo->channel, 2800 devinfo->target, devinfo->lun, 2801 ahc->msgin_buf[3], saved_offset, 2802 period, offset); 2803 } 2804 ahc_set_syncrate(ahc, devinfo, 2805 syncrate, period, 2806 offset, ppr_options, 2807 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2808 /*paused*/TRUE); 2809 2810 /* 2811 * See if we initiated Sync Negotiation 2812 * and didn't have to fall down to async 2813 * transfers. 2814 */ 2815 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 2816 /* We started it */ 2817 if (saved_offset != offset) { 2818 /* Went too low - force async */ 2819 reject = TRUE; 2820 } 2821 } else { 2822 /* 2823 * Send our own SDTR in reply 2824 */ 2825 if (bootverbose 2826 && devinfo->role == ROLE_INITIATOR) { 2827 printf("(%s:%c:%d:%d): Target " 2828 "Initiated SDTR\n", 2829 ahc_name(ahc), devinfo->channel, 2830 devinfo->target, devinfo->lun); 2831 } 2832 ahc->msgout_index = 0; 2833 ahc->msgout_len = 0; 2834 ahc_construct_sdtr(ahc, devinfo, 2835 period, offset); 2836 ahc->msgout_index = 0; 2837 response = TRUE; 2838 } 2839 done = MSGLOOP_MSGCOMPLETE; 2840 break; 2841 } 2842 case MSG_EXT_WDTR: 2843 { 2844 u_int bus_width; 2845 u_int saved_width; 2846 u_int sending_reply; 2847 2848 sending_reply = FALSE; 2849 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 2850 reject = TRUE; 2851 break; 2852 } 2853 2854 /* 2855 * Wait until we have our arg before validating 2856 * and acting on this message. 2857 * 2858 * Add one to MSG_EXT_WDTR_LEN to account for 2859 * the extended message preamble. 2860 */ 2861 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 2862 break; 2863 2864 bus_width = ahc->msgin_buf[3]; 2865 saved_width = bus_width; 2866 ahc_validate_width(ahc, tinfo, &bus_width, 2867 devinfo->role); 2868 if (bootverbose) { 2869 printf("(%s:%c:%d:%d): Received WDTR " 2870 "%x filtered to %x\n", 2871 ahc_name(ahc), devinfo->channel, 2872 devinfo->target, devinfo->lun, 2873 saved_width, bus_width); 2874 } 2875 2876 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 2877 /* 2878 * Don't send a WDTR back to the 2879 * target, since we asked first. 2880 * If the width went higher than our 2881 * request, reject it. 2882 */ 2883 if (saved_width > bus_width) { 2884 reject = TRUE; 2885 printf("(%s:%c:%d:%d): requested %dBit " 2886 "transfers. Rejecting...\n", 2887 ahc_name(ahc), devinfo->channel, 2888 devinfo->target, devinfo->lun, 2889 8 * (0x01 << bus_width)); 2890 bus_width = 0; 2891 } 2892 } else { 2893 /* 2894 * Send our own WDTR in reply 2895 */ 2896 if (bootverbose 2897 && devinfo->role == ROLE_INITIATOR) { 2898 printf("(%s:%c:%d:%d): Target " 2899 "Initiated WDTR\n", 2900 ahc_name(ahc), devinfo->channel, 2901 devinfo->target, devinfo->lun); 2902 } 2903 ahc->msgout_index = 0; 2904 ahc->msgout_len = 0; 2905 ahc_construct_wdtr(ahc, devinfo, bus_width); 2906 ahc->msgout_index = 0; 2907 response = TRUE; 2908 sending_reply = TRUE; 2909 } 2910 ahc_set_width(ahc, devinfo, bus_width, 2911 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2912 /*paused*/TRUE); 2913 /* After a wide message, we are async */ 2914 ahc_set_syncrate(ahc, devinfo, 2915 /*syncrate*/NULL, /*period*/0, 2916 /*offset*/0, /*ppr_options*/0, 2917 AHC_TRANS_ACTIVE, /*paused*/TRUE); 2918 if (sending_reply == FALSE && reject == FALSE) { 2919 2920 if (tinfo->goal.period) { 2921 ahc->msgout_index = 0; 2922 ahc->msgout_len = 0; 2923 ahc_build_transfer_msg(ahc, devinfo); 2924 ahc->msgout_index = 0; 2925 response = TRUE; 2926 } 2927 } 2928 done = MSGLOOP_MSGCOMPLETE; 2929 break; 2930 } 2931 case MSG_EXT_PPR: 2932 { 2933 struct ahc_syncrate *syncrate; 2934 u_int period; 2935 u_int offset; 2936 u_int bus_width; 2937 u_int ppr_options; 2938 u_int saved_width; 2939 u_int saved_offset; 2940 u_int saved_ppr_options; 2941 2942 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 2943 reject = TRUE; 2944 break; 2945 } 2946 2947 /* 2948 * Wait until we have all args before validating 2949 * and acting on this message. 2950 * 2951 * Add one to MSG_EXT_PPR_LEN to account for 2952 * the extended message preamble. 2953 */ 2954 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 2955 break; 2956 2957 period = ahc->msgin_buf[3]; 2958 offset = ahc->msgin_buf[5]; 2959 bus_width = ahc->msgin_buf[6]; 2960 saved_width = bus_width; 2961 ppr_options = ahc->msgin_buf[7]; 2962 /* 2963 * According to the spec, a DT only 2964 * period factor with no DT option 2965 * set implies async. 2966 */ 2967 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 2968 && period == 9) 2969 offset = 0; 2970 saved_ppr_options = ppr_options; 2971 saved_offset = offset; 2972 2973 /* 2974 * Mask out any options we don't support 2975 * on any controller. Transfer options are 2976 * only available if we are negotiating wide. 2977 */ 2978 ppr_options &= MSG_EXT_PPR_DT_REQ; 2979 if (bus_width == 0) 2980 ppr_options = 0; 2981 2982 ahc_validate_width(ahc, tinfo, &bus_width, 2983 devinfo->role); 2984 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2985 &ppr_options, 2986 devinfo->role); 2987 ahc_validate_offset(ahc, tinfo, syncrate, 2988 &offset, bus_width, 2989 devinfo->role); 2990 2991 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 2992 /* 2993 * If we are unable to do any of the 2994 * requested options (we went too low), 2995 * then we'll have to reject the message. 2996 */ 2997 if (saved_width > bus_width 2998 || saved_offset != offset 2999 || saved_ppr_options != ppr_options) { 3000 reject = TRUE; 3001 period = 0; 3002 offset = 0; 3003 bus_width = 0; 3004 ppr_options = 0; 3005 syncrate = NULL; 3006 } 3007 } else { 3008 if (devinfo->role != ROLE_TARGET) 3009 printf("(%s:%c:%d:%d): Target " 3010 "Initiated PPR\n", 3011 ahc_name(ahc), devinfo->channel, 3012 devinfo->target, devinfo->lun); 3013 else 3014 printf("(%s:%c:%d:%d): Initiator " 3015 "Initiated PPR\n", 3016 ahc_name(ahc), devinfo->channel, 3017 devinfo->target, devinfo->lun); 3018 ahc->msgout_index = 0; 3019 ahc->msgout_len = 0; 3020 ahc_construct_ppr(ahc, devinfo, period, offset, 3021 bus_width, ppr_options); 3022 ahc->msgout_index = 0; 3023 response = TRUE; 3024 } 3025 if (bootverbose) { 3026 printf("(%s:%c:%d:%d): Received PPR width %x, " 3027 "period %x, offset %x,options %x\n" 3028 "\tFiltered to width %x, period %x, " 3029 "offset %x, options %x\n", 3030 ahc_name(ahc), devinfo->channel, 3031 devinfo->target, devinfo->lun, 3032 saved_width, ahc->msgin_buf[3], 3033 saved_offset, saved_ppr_options, 3034 bus_width, period, offset, ppr_options); 3035 } 3036 ahc_set_width(ahc, devinfo, bus_width, 3037 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3038 /*paused*/TRUE); 3039 ahc_set_syncrate(ahc, devinfo, 3040 syncrate, period, 3041 offset, ppr_options, 3042 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3043 /*paused*/TRUE); 3044 done = MSGLOOP_MSGCOMPLETE; 3045 break; 3046 } 3047 default: 3048 /* Unknown extended message. Reject it. */ 3049 reject = TRUE; 3050 break; 3051 } 3052 break; 3053 } 3054 #ifdef AHC_TARGET_MODE 3055 case MSG_BUS_DEV_RESET: 3056 ahc_handle_devreset(ahc, devinfo, 3057 CAM_BDR_SENT, 3058 "Bus Device Reset Received", 3059 /*verbose_level*/0); 3060 ahc_restart(ahc); 3061 done = MSGLOOP_TERMINATED; 3062 break; 3063 case MSG_ABORT_TAG: 3064 case MSG_ABORT: 3065 case MSG_CLEAR_QUEUE: 3066 { 3067 int tag; 3068 3069 /* Target mode messages */ 3070 if (devinfo->role != ROLE_TARGET) { 3071 reject = TRUE; 3072 break; 3073 } 3074 tag = SCB_LIST_NULL; 3075 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3076 tag = ahc_inb(ahc, INITIATOR_TAG); 3077 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3078 devinfo->lun, tag, ROLE_TARGET, 3079 CAM_REQ_ABORTED); 3080 3081 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3082 if (tstate != NULL) { 3083 struct ahc_tmode_lstate* lstate; 3084 3085 lstate = tstate->enabled_luns[devinfo->lun]; 3086 if (lstate != NULL) { 3087 ahc_queue_lstate_event(ahc, lstate, 3088 devinfo->our_scsiid, 3089 ahc->msgin_buf[0], 3090 /*arg*/tag); 3091 ahc_send_lstate_events(ahc, lstate); 3092 } 3093 } 3094 ahc_restart(ahc); 3095 done = MSGLOOP_TERMINATED; 3096 break; 3097 } 3098 #endif 3099 case MSG_TERM_IO_PROC: 3100 default: 3101 reject = TRUE; 3102 break; 3103 } 3104 3105 if (reject) { 3106 /* 3107 * Setup to reject the message. 3108 */ 3109 ahc->msgout_index = 0; 3110 ahc->msgout_len = 1; 3111 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3112 done = MSGLOOP_MSGCOMPLETE; 3113 response = TRUE; 3114 } 3115 3116 if (done != MSGLOOP_IN_PROG && !response) 3117 /* Clear the outgoing message buffer */ 3118 ahc->msgout_len = 0; 3119 3120 return (done); 3121 } 3122 3123 /* 3124 * Process a message reject message. 3125 */ 3126 static int 3127 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3128 { 3129 /* 3130 * What we care about here is if we had an 3131 * outstanding SDTR or WDTR message for this 3132 * target. If we did, this is a signal that 3133 * the target is refusing negotiation. 3134 */ 3135 struct scb *scb; 3136 struct ahc_initiator_tinfo *tinfo; 3137 struct ahc_tmode_tstate *tstate; 3138 u_int scb_index; 3139 u_int last_msg; 3140 int response = 0; 3141 3142 scb_index = ahc_inb(ahc, SCB_TAG); 3143 scb = ahc_lookup_scb(ahc, scb_index); 3144 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3145 devinfo->our_scsiid, 3146 devinfo->target, &tstate); 3147 /* Might be necessary */ 3148 last_msg = ahc_inb(ahc, LAST_MSG); 3149 3150 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3151 /* 3152 * Target does not support the PPR message. 3153 * Attempt to negotiate SPI-2 style. 3154 */ 3155 if (bootverbose) { 3156 printf("(%s:%c:%d:%d): PPR Rejected. " 3157 "Trying WDTR/SDTR\n", 3158 ahc_name(ahc), devinfo->channel, 3159 devinfo->target, devinfo->lun); 3160 } 3161 tinfo->goal.ppr_options = 0; 3162 tinfo->curr.transport_version = 2; 3163 tinfo->goal.transport_version = 2; 3164 ahc->msgout_index = 0; 3165 ahc->msgout_len = 0; 3166 ahc_build_transfer_msg(ahc, devinfo); 3167 ahc->msgout_index = 0; 3168 response = 1; 3169 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3170 3171 /* note 8bit xfers */ 3172 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3173 "8bit transfers\n", ahc_name(ahc), 3174 devinfo->channel, devinfo->target, devinfo->lun); 3175 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3176 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3177 /*paused*/TRUE); 3178 /* 3179 * No need to clear the sync rate. If the target 3180 * did not accept the command, our syncrate is 3181 * unaffected. If the target started the negotiation, 3182 * but rejected our response, we already cleared the 3183 * sync rate before sending our WDTR. 3184 */ 3185 if (tinfo->goal.period) { 3186 3187 /* Start the sync negotiation */ 3188 ahc->msgout_index = 0; 3189 ahc->msgout_len = 0; 3190 ahc_build_transfer_msg(ahc, devinfo); 3191 ahc->msgout_index = 0; 3192 response = 1; 3193 } 3194 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3195 /* note asynch xfers and clear flag */ 3196 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3197 /*offset*/0, /*ppr_options*/0, 3198 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3199 /*paused*/TRUE); 3200 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 3201 "Using asynchronous transfers\n", 3202 ahc_name(ahc), devinfo->channel, 3203 devinfo->target, devinfo->lun); 3204 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3205 int tag_type; 3206 int mask; 3207 3208 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3209 3210 if (tag_type == MSG_SIMPLE_TASK) { 3211 printf("(%s:%c:%d:%d): refuses tagged commands. " 3212 "Performing non-tagged I/O\n", ahc_name(ahc), 3213 devinfo->channel, devinfo->target, devinfo->lun); 3214 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3215 mask = ~0x23; 3216 } else { 3217 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 3218 "Performing simple queue tagged I/O only\n", 3219 ahc_name(ahc), devinfo->channel, devinfo->target, 3220 devinfo->lun, tag_type == MSG_ORDERED_TASK 3221 ? "ordered" : "head of queue"); 3222 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3223 mask = ~0x03; 3224 } 3225 3226 /* 3227 * Resend the identify for this CCB as the target 3228 * may believe that the selection is invalid otherwise. 3229 */ 3230 ahc_outb(ahc, SCB_CONTROL, 3231 ahc_inb(ahc, SCB_CONTROL) & mask); 3232 scb->hscb->control &= mask; 3233 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3234 /*type*/MSG_SIMPLE_TASK); 3235 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3236 ahc_assert_atn(ahc); 3237 3238 /* 3239 * This transaction is now at the head of 3240 * the untagged queue for this target. 3241 */ 3242 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3243 struct scb_tailq *untagged_q; 3244 3245 untagged_q = 3246 &(ahc->untagged_queues[devinfo->target_offset]); 3247 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3248 scb->flags |= SCB_UNTAGGEDQ; 3249 } 3250 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3251 scb->hscb->tag); 3252 3253 /* 3254 * Requeue all tagged commands for this target 3255 * currently in our posession so they can be 3256 * converted to untagged commands. 3257 */ 3258 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3259 SCB_GET_CHANNEL(ahc, scb), 3260 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3261 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3262 SEARCH_COMPLETE); 3263 } else { 3264 /* 3265 * Otherwise, we ignore it. 3266 */ 3267 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3268 ahc_name(ahc), devinfo->channel, devinfo->target, 3269 last_msg); 3270 } 3271 return (response); 3272 } 3273 3274 /* 3275 * Process an ingnore wide residue message. 3276 */ 3277 static void 3278 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3279 { 3280 u_int scb_index; 3281 struct scb *scb; 3282 3283 scb_index = ahc_inb(ahc, SCB_TAG); 3284 scb = ahc_lookup_scb(ahc, scb_index); 3285 /* 3286 * XXX Actually check data direction in the sequencer? 3287 * Perhaps add datadir to some spare bits in the hscb? 3288 */ 3289 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3290 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3291 /* 3292 * Ignore the message if we haven't 3293 * seen an appropriate data phase yet. 3294 */ 3295 } else { 3296 /* 3297 * If the residual occurred on the last 3298 * transfer and the transfer request was 3299 * expected to end on an odd count, do 3300 * nothing. Otherwise, subtract a byte 3301 * and update the residual count accordingly. 3302 */ 3303 uint32_t sgptr; 3304 3305 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3306 if ((sgptr & SG_LIST_NULL) != 0 3307 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3308 /* 3309 * If the residual occurred on the last 3310 * transfer and the transfer request was 3311 * expected to end on an odd count, do 3312 * nothing. 3313 */ 3314 } else { 3315 struct ahc_dma_seg *sg; 3316 uint32_t data_cnt; 3317 uint32_t data_addr; 3318 uint32_t sglen; 3319 3320 /* Pull in the rest of the sgptr */ 3321 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3322 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3323 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 3324 sgptr &= SG_PTR_MASK; 3325 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24) 3326 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 3327 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 3328 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 3329 3330 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3331 | (ahc_inb(ahc, SHADDR + 2) << 16) 3332 | (ahc_inb(ahc, SHADDR + 1) << 8) 3333 | (ahc_inb(ahc, SHADDR)); 3334 3335 data_cnt += 1; 3336 data_addr -= 1; 3337 3338 sg = ahc_sg_bus_to_virt(scb, sgptr); 3339 /* 3340 * The residual sg ptr points to the next S/G 3341 * to load so we must go back one. 3342 */ 3343 sg--; 3344 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3345 if (sg != scb->sg_list 3346 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3347 3348 sg--; 3349 sglen = ahc_le32toh(sg->len); 3350 /* 3351 * Preserve High Address and SG_LIST bits 3352 * while setting the count to 1. 3353 */ 3354 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3355 data_addr = ahc_le32toh(sg->addr) 3356 + (sglen & AHC_SG_LEN_MASK) - 1; 3357 3358 /* 3359 * Increment sg so it points to the 3360 * "next" sg. 3361 */ 3362 sg++; 3363 sgptr = ahc_sg_virt_to_bus(scb, sg); 3364 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 3365 sgptr >> 24); 3366 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 3367 sgptr >> 16); 3368 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 3369 sgptr >> 8); 3370 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3371 } 3372 3373 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 3374 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 3375 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 3376 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3377 } 3378 } 3379 } 3380 3381 3382 /* 3383 * Reinitialize the data pointers for the active transfer 3384 * based on its current residual. 3385 */ 3386 static void 3387 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3388 { 3389 struct scb *scb; 3390 struct ahc_dma_seg *sg; 3391 u_int scb_index; 3392 uint32_t sgptr; 3393 uint32_t resid; 3394 uint32_t dataptr; 3395 3396 scb_index = ahc_inb(ahc, SCB_TAG); 3397 scb = ahc_lookup_scb(ahc, scb_index); 3398 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3399 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3400 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3401 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3402 3403 sgptr &= SG_PTR_MASK; 3404 sg = ahc_sg_bus_to_virt(scb, sgptr); 3405 3406 /* The residual sg_ptr always points to the next sg */ 3407 sg--; 3408 3409 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3410 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3411 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3412 3413 dataptr = ahc_le32toh(sg->addr) 3414 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3415 - resid; 3416 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3417 u_int dscommand1; 3418 3419 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3420 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3421 ahc_outb(ahc, HADDR, 3422 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3423 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3424 } 3425 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3426 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3427 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3428 ahc_outb(ahc, HADDR, dataptr); 3429 ahc_outb(ahc, HCNT + 2, resid >> 16); 3430 ahc_outb(ahc, HCNT + 1, resid >> 8); 3431 ahc_outb(ahc, HCNT, resid); 3432 if ((ahc->features & AHC_ULTRA2) == 0) { 3433 ahc_outb(ahc, STCNT + 2, resid >> 16); 3434 ahc_outb(ahc, STCNT + 1, resid >> 8); 3435 ahc_outb(ahc, STCNT, resid); 3436 } 3437 } 3438 3439 /* 3440 * Handle the effects of issuing a bus device reset message. 3441 */ 3442 static void 3443 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3444 cam_status status, char *message, int verbose_level) 3445 { 3446 #ifdef AHC_TARGET_MODE 3447 struct ahc_tmode_tstate* tstate; 3448 u_int lun; 3449 #endif 3450 int found; 3451 3452 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3453 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3454 status); 3455 3456 #ifdef AHC_TARGET_MODE 3457 /* 3458 * Send an immediate notify ccb to all target mord peripheral 3459 * drivers affected by this action. 3460 */ 3461 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3462 if (tstate != NULL) { 3463 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3464 struct ahc_tmode_lstate* lstate; 3465 3466 lstate = tstate->enabled_luns[lun]; 3467 if (lstate == NULL) 3468 continue; 3469 3470 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3471 MSG_BUS_DEV_RESET, /*arg*/0); 3472 ahc_send_lstate_events(ahc, lstate); 3473 } 3474 } 3475 #endif 3476 3477 /* 3478 * Go back to async/narrow transfers and renegotiate. 3479 */ 3480 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3481 AHC_TRANS_CUR, /*paused*/TRUE); 3482 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3483 /*period*/0, /*offset*/0, /*ppr_options*/0, 3484 AHC_TRANS_CUR, /*paused*/TRUE); 3485 3486 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3487 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3488 3489 if (message != NULL 3490 && (verbose_level <= bootverbose)) 3491 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3492 message, devinfo->channel, devinfo->target, found); 3493 } 3494 3495 #ifdef AHC_TARGET_MODE 3496 static void 3497 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3498 struct scb *scb) 3499 { 3500 3501 /* 3502 * To facilitate adding multiple messages together, 3503 * each routine should increment the index and len 3504 * variables instead of setting them explicitly. 3505 */ 3506 ahc->msgout_index = 0; 3507 ahc->msgout_len = 0; 3508 3509 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3510 ahc_build_transfer_msg(ahc, devinfo); 3511 else 3512 panic("ahc_intr: AWAITING target message with no message"); 3513 3514 ahc->msgout_index = 0; 3515 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3516 } 3517 #endif 3518 /**************************** Initialization **********************************/ 3519 /* 3520 * Allocate a controller structure for a new device 3521 * and perform initial initializion. 3522 */ 3523 struct ahc_softc * 3524 ahc_alloc(void *platform_arg, char *name) 3525 { 3526 struct ahc_softc *ahc; 3527 int i; 3528 3529 #ifndef __FreeBSD__ 3530 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3531 if (!ahc) { 3532 printf("aic7xxx: cannot malloc softc!\n"); 3533 free(name, M_DEVBUF); 3534 return NULL; 3535 } 3536 #else 3537 ahc = device_get_softc((device_t)platform_arg); 3538 #endif 3539 memset(ahc, 0, sizeof(*ahc)); 3540 LIST_INIT(&ahc->pending_scbs); 3541 /* We don't know our unit number until the OSM sets it */ 3542 ahc->name = name; 3543 ahc->unit = -1; 3544 ahc->description = NULL; 3545 ahc->channel = 'A'; 3546 ahc->channel_b = 'B'; 3547 ahc->chip = AHC_NONE; 3548 ahc->features = AHC_FENONE; 3549 ahc->bugs = AHC_BUGNONE; 3550 ahc->flags = AHC_FNONE; 3551 3552 for (i = 0; i < AHC_NUM_TARGETS; i++) 3553 TAILQ_INIT(&ahc->untagged_queues[i]); 3554 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3555 ahc_free(ahc); 3556 ahc = NULL; 3557 } 3558 return (ahc); 3559 } 3560 3561 int 3562 ahc_softc_init(struct ahc_softc *ahc) 3563 { 3564 3565 /* The IRQMS bit is only valid on VL and EISA chips */ 3566 if ((ahc->chip & AHC_PCI) == 0) 3567 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3568 else 3569 ahc->unpause = 0; 3570 ahc->pause = ahc->unpause | PAUSE; 3571 /* XXX The shared scb data stuff should be deprecated */ 3572 if (ahc->scb_data == NULL) { 3573 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3574 M_DEVBUF, M_NOWAIT); 3575 if (ahc->scb_data == NULL) 3576 return (ENOMEM); 3577 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3578 } 3579 3580 return (0); 3581 } 3582 3583 void 3584 ahc_softc_insert(struct ahc_softc *ahc) 3585 { 3586 struct ahc_softc *list_ahc; 3587 3588 #if AHC_PCI_CONFIG > 0 3589 /* 3590 * Second Function PCI devices need to inherit some 3591 * settings from function 0. 3592 */ 3593 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3594 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3595 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3596 ahc_dev_softc_t list_pci; 3597 ahc_dev_softc_t pci; 3598 3599 list_pci = list_ahc->dev_softc; 3600 pci = ahc->dev_softc; 3601 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3602 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3603 struct ahc_softc *master; 3604 struct ahc_softc *slave; 3605 3606 if (ahc_get_pci_function(list_pci) == 0) { 3607 master = list_ahc; 3608 slave = ahc; 3609 } else { 3610 master = ahc; 3611 slave = list_ahc; 3612 } 3613 slave->flags &= ~AHC_BIOS_ENABLED; 3614 slave->flags |= 3615 master->flags & AHC_BIOS_ENABLED; 3616 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3617 slave->flags |= 3618 master->flags & AHC_PRIMARY_CHANNEL; 3619 break; 3620 } 3621 } 3622 } 3623 #endif 3624 3625 /* 3626 * Insertion sort into our list of softcs. 3627 */ 3628 list_ahc = TAILQ_FIRST(&ahc_tailq); 3629 while (list_ahc != NULL 3630 && ahc_softc_comp(list_ahc, ahc) <= 0) 3631 list_ahc = TAILQ_NEXT(list_ahc, links); 3632 if (list_ahc != NULL) 3633 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3634 else 3635 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3636 ahc->init_level++; 3637 } 3638 3639 /* 3640 * Verify that the passed in softc pointer is for a 3641 * controller that is still configured. 3642 */ 3643 struct ahc_softc * 3644 ahc_find_softc(struct ahc_softc *ahc) 3645 { 3646 struct ahc_softc *list_ahc; 3647 3648 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3649 if (list_ahc == ahc) 3650 return (ahc); 3651 } 3652 return (NULL); 3653 } 3654 3655 void 3656 ahc_set_unit(struct ahc_softc *ahc, int unit) 3657 { 3658 ahc->unit = unit; 3659 } 3660 3661 void 3662 ahc_set_name(struct ahc_softc *ahc, char *name) 3663 { 3664 if (ahc->name != NULL) 3665 free(ahc->name, M_DEVBUF); 3666 ahc->name = name; 3667 } 3668 3669 void 3670 ahc_free(struct ahc_softc *ahc) 3671 { 3672 int i; 3673 3674 ahc_fini_scbdata(ahc); 3675 switch (ahc->init_level) { 3676 default: 3677 case 5: 3678 ahc_shutdown(ahc); 3679 TAILQ_REMOVE(&ahc_tailq, ahc, links); 3680 /* FALLTHROUGH */ 3681 case 4: 3682 ahc_dmamap_unload(ahc, ahc->shared_data_dmat, 3683 ahc->shared_data_dmamap); 3684 /* FALLTHROUGH */ 3685 case 3: 3686 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3687 ahc->shared_data_dmamap); 3688 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, 3689 ahc->shared_data_dmamap); 3690 /* FALLTHROUGH */ 3691 case 2: 3692 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); 3693 case 1: 3694 #ifndef __linux__ 3695 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); 3696 #endif 3697 break; 3698 case 0: 3699 break; 3700 } 3701 3702 #ifndef __linux__ 3703 ahc_dma_tag_destroy(ahc, ahc->parent_dmat); 3704 #endif 3705 ahc_platform_free(ahc); 3706 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3707 struct ahc_tmode_tstate *tstate; 3708 3709 tstate = ahc->enabled_targets[i]; 3710 if (tstate != NULL) { 3711 #if AHC_TARGET_MODE 3712 int j; 3713 3714 for (j = 0; j < AHC_NUM_LUNS; j++) { 3715 struct ahc_tmode_lstate *lstate; 3716 3717 lstate = tstate->enabled_luns[j]; 3718 if (lstate != NULL) { 3719 xpt_free_path(lstate->path); 3720 free(lstate, M_DEVBUF); 3721 } 3722 } 3723 #endif 3724 free(tstate, M_DEVBUF); 3725 } 3726 } 3727 #if AHC_TARGET_MODE 3728 if (ahc->black_hole != NULL) { 3729 xpt_free_path(ahc->black_hole->path); 3730 free(ahc->black_hole, M_DEVBUF); 3731 } 3732 #endif 3733 if (ahc->name != NULL) 3734 free(ahc->name, M_DEVBUF); 3735 #ifndef __FreeBSD__ 3736 free(ahc, M_DEVBUF); 3737 #endif 3738 return; 3739 } 3740 3741 void 3742 ahc_shutdown(void *arg) 3743 { 3744 struct ahc_softc *ahc; 3745 int i; 3746 3747 ahc = (struct ahc_softc *)arg; 3748 3749 /* This will reset most registers to 0, but not all */ 3750 ahc_reset(ahc); 3751 ahc_outb(ahc, SCSISEQ, 0); 3752 ahc_outb(ahc, SXFRCTL0, 0); 3753 ahc_outb(ahc, DSPCISTATUS, 0); 3754 3755 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 3756 ahc_outb(ahc, i, 0); 3757 } 3758 3759 /* 3760 * Reset the controller and record some information about it 3761 * that is only availabel just after a reset. 3762 */ 3763 int 3764 ahc_reset(struct ahc_softc *ahc) 3765 { 3766 u_int sblkctl; 3767 u_int sxfrctl1_a, sxfrctl1_b; 3768 int wait; 3769 3770 /* 3771 * Preserve the value of the SXFRCTL1 register for all channels. 3772 * It contains settings that affect termination and we don't want 3773 * to disturb the integrity of the bus. 3774 */ 3775 ahc_pause(ahc); 3776 sxfrctl1_b = 0; 3777 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 3778 u_int sblkctl; 3779 3780 /* 3781 * Save channel B's settings in case this chip 3782 * is setup for TWIN channel operation. 3783 */ 3784 sblkctl = ahc_inb(ahc, SBLKCTL); 3785 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3786 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 3787 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3788 } 3789 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 3790 3791 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 3792 3793 /* 3794 * Ensure that the reset has finished. We delay 1000us 3795 * prior to reading the register to make sure the chip 3796 * has sufficiently completed its reset to handle register 3797 * accesses. 3798 */ 3799 wait = 1000; 3800 do { 3801 ahc_delay(1000); 3802 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 3803 3804 if (wait == 0) { 3805 printf("%s: WARNING - Failed chip reset! " 3806 "Trying to initialize anyway.\n", ahc_name(ahc)); 3807 } 3808 ahc_outb(ahc, HCNTRL, ahc->pause); 3809 3810 /* Determine channel configuration */ 3811 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 3812 /* No Twin Channel PCI cards */ 3813 if ((ahc->chip & AHC_PCI) != 0) 3814 sblkctl &= ~SELBUSB; 3815 switch (sblkctl) { 3816 case 0: 3817 /* Single Narrow Channel */ 3818 break; 3819 case 2: 3820 /* Wide Channel */ 3821 ahc->features |= AHC_WIDE; 3822 break; 3823 case 8: 3824 /* Twin Channel */ 3825 ahc->features |= AHC_TWIN; 3826 break; 3827 default: 3828 printf(" Unsupported adapter type. Ignoring\n"); 3829 return(-1); 3830 } 3831 3832 /* 3833 * Reload sxfrctl1. 3834 * 3835 * We must always initialize STPWEN to 1 before we 3836 * restore the saved values. STPWEN is initialized 3837 * to a tri-state condition which can only be cleared 3838 * by turning it on. 3839 */ 3840 if ((ahc->features & AHC_TWIN) != 0) { 3841 u_int sblkctl; 3842 3843 sblkctl = ahc_inb(ahc, SBLKCTL); 3844 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3845 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 3846 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3847 } 3848 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 3849 3850 #ifdef AHC_DUMP_SEQ 3851 if (ahc->init_level == 0) 3852 ahc_dumpseq(ahc); 3853 #endif 3854 3855 return (0); 3856 } 3857 3858 /* 3859 * Determine the number of SCBs available on the controller 3860 */ 3861 int 3862 ahc_probe_scbs(struct ahc_softc *ahc) { 3863 int i; 3864 3865 for (i = 0; i < AHC_SCB_MAX; i++) { 3866 3867 ahc_outb(ahc, SCBPTR, i); 3868 ahc_outb(ahc, SCB_BASE, i); 3869 if (ahc_inb(ahc, SCB_BASE) != i) 3870 break; 3871 ahc_outb(ahc, SCBPTR, 0); 3872 if (ahc_inb(ahc, SCB_BASE) != 0) 3873 break; 3874 } 3875 return (i); 3876 } 3877 3878 static void 3879 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3880 { 3881 bus_addr_t *baddr; 3882 3883 baddr = (bus_addr_t *)arg; 3884 *baddr = segs->ds_addr; 3885 } 3886 3887 static void 3888 ahc_build_free_scb_list(struct ahc_softc *ahc) 3889 { 3890 int i; 3891 3892 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 3893 ahc_outb(ahc, SCBPTR, i); 3894 3895 /* Clear the control byte. */ 3896 ahc_outb(ahc, SCB_CONTROL, 0); 3897 3898 /* Set the next pointer */ 3899 if ((ahc->flags & AHC_PAGESCBS) != 0) 3900 ahc_outb(ahc, SCB_NEXT, i+1); 3901 else 3902 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 3903 3904 /* Make the tag number invalid */ 3905 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 3906 } 3907 3908 /* Make sure that the last SCB terminates the free list */ 3909 ahc_outb(ahc, SCBPTR, i-1); 3910 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 3911 } 3912 3913 static int 3914 ahc_init_scbdata(struct ahc_softc *ahc) 3915 { 3916 struct scb_data *scb_data; 3917 3918 scb_data = ahc->scb_data; 3919 SLIST_INIT(&scb_data->free_scbs); 3920 SLIST_INIT(&scb_data->sg_maps); 3921 3922 /* Allocate SCB resources */ 3923 scb_data->scbarray = 3924 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 3925 M_DEVBUF, M_NOWAIT); 3926 if (scb_data->scbarray == NULL) 3927 return (ENOMEM); 3928 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 3929 3930 /* Determine the number of hardware SCBs and initialize them */ 3931 3932 scb_data->maxhscbs = ahc_probe_scbs(ahc); 3933 if ((ahc->flags & AHC_PAGESCBS) != 0) { 3934 /* SCB 0 heads the free list */ 3935 ahc_outb(ahc, FREE_SCBH, 0); 3936 } else { 3937 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 3938 } 3939 3940 if (ahc->scb_data->maxhscbs == 0) { 3941 printf("%s: No SCB space found\n", ahc_name(ahc)); 3942 return (ENXIO); 3943 } 3944 3945 ahc_build_free_scb_list(ahc); 3946 3947 /* 3948 * Create our DMA tags. These tags define the kinds of device 3949 * accessible memory allocations and memory mappings we will 3950 * need to perform during normal operation. 3951 * 3952 * Unless we need to further restrict the allocation, we rely 3953 * on the restrictions of the parent dmat, hence the common 3954 * use of MAXADDR and MAXSIZE. 3955 */ 3956 3957 /* DMA tag for our hardware scb structures */ 3958 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3959 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 3960 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 3961 /*highaddr*/BUS_SPACE_MAXADDR, 3962 /*filter*/NULL, /*filterarg*/NULL, 3963 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 3964 /*nsegments*/1, 3965 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3966 /*flags*/0, &scb_data->hscb_dmat) != 0) { 3967 goto error_exit; 3968 } 3969 3970 scb_data->init_level++; 3971 3972 /* Allocation for our hscbs */ 3973 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, 3974 (void **)&scb_data->hscbs, 3975 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 3976 goto error_exit; 3977 } 3978 3979 scb_data->init_level++; 3980 3981 /* And permanently map them */ 3982 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 3983 scb_data->hscbs, 3984 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 3985 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 3986 3987 scb_data->init_level++; 3988 3989 /* DMA tag for our sense buffers */ 3990 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3991 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 3992 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 3993 /*highaddr*/BUS_SPACE_MAXADDR, 3994 /*filter*/NULL, /*filterarg*/NULL, 3995 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 3996 /*nsegments*/1, 3997 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3998 /*flags*/0, &scb_data->sense_dmat) != 0) { 3999 goto error_exit; 4000 } 4001 4002 scb_data->init_level++; 4003 4004 /* Allocate them */ 4005 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, 4006 (void **)&scb_data->sense, 4007 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 4008 goto error_exit; 4009 } 4010 4011 scb_data->init_level++; 4012 4013 /* And permanently map them */ 4014 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 4015 scb_data->sense, 4016 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4017 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 4018 4019 scb_data->init_level++; 4020 4021 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 4022 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4023 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4024 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4025 /*highaddr*/BUS_SPACE_MAXADDR, 4026 /*filter*/NULL, /*filterarg*/NULL, 4027 PAGE_SIZE, /*nsegments*/1, 4028 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4029 /*flags*/0, &scb_data->sg_dmat) != 0) { 4030 goto error_exit; 4031 } 4032 4033 scb_data->init_level++; 4034 4035 /* Perform initial CCB allocation */ 4036 memset(scb_data->hscbs, 0, 4037 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4038 ahc_alloc_scbs(ahc); 4039 4040 if (scb_data->numscbs == 0) { 4041 printf("%s: ahc_init_scbdata - " 4042 "Unable to allocate initial scbs\n", 4043 ahc_name(ahc)); 4044 goto error_exit; 4045 } 4046 4047 /* 4048 * Tell the sequencer which SCB will be the next one it receives. 4049 */ 4050 ahc->next_queued_scb = ahc_get_scb(ahc); 4051 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4052 4053 /* 4054 * Note that we were successfull 4055 */ 4056 return (0); 4057 4058 error_exit: 4059 4060 return (ENOMEM); 4061 } 4062 4063 static void 4064 ahc_fini_scbdata(struct ahc_softc *ahc) 4065 { 4066 struct scb_data *scb_data; 4067 4068 scb_data = ahc->scb_data; 4069 if (scb_data == NULL) 4070 return; 4071 4072 switch (scb_data->init_level) { 4073 default: 4074 case 7: 4075 { 4076 struct sg_map_node *sg_map; 4077 4078 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4079 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4080 ahc_dmamap_unload(ahc, scb_data->sg_dmat, 4081 sg_map->sg_dmamap); 4082 ahc_dmamem_free(ahc, scb_data->sg_dmat, 4083 sg_map->sg_vaddr, 4084 sg_map->sg_dmamap); 4085 free(sg_map, M_DEVBUF); 4086 } 4087 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 4088 } 4089 case 6: 4090 ahc_dmamap_unload(ahc, scb_data->sense_dmat, 4091 scb_data->sense_dmamap); 4092 case 5: 4093 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 4094 scb_data->sense_dmamap); 4095 ahc_dmamap_destroy(ahc, scb_data->sense_dmat, 4096 scb_data->sense_dmamap); 4097 case 4: 4098 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); 4099 case 3: 4100 ahc_dmamap_unload(ahc, scb_data->hscb_dmat, 4101 scb_data->hscb_dmamap); 4102 case 2: 4103 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 4104 scb_data->hscb_dmamap); 4105 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, 4106 scb_data->hscb_dmamap); 4107 case 1: 4108 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); 4109 break; 4110 case 0: 4111 break; 4112 } 4113 if (scb_data->scbarray != NULL) 4114 free(scb_data->scbarray, M_DEVBUF); 4115 } 4116 4117 void 4118 ahc_alloc_scbs(struct ahc_softc *ahc) 4119 { 4120 struct scb_data *scb_data; 4121 struct scb *next_scb; 4122 struct sg_map_node *sg_map; 4123 bus_addr_t physaddr; 4124 struct ahc_dma_seg *segs; 4125 int newcount; 4126 int i; 4127 4128 scb_data = ahc->scb_data; 4129 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4130 /* Can't allocate any more */ 4131 return; 4132 4133 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4134 4135 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4136 4137 if (sg_map == NULL) 4138 return; 4139 4140 /* Allocate S/G space for the next batch of SCBS */ 4141 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 4142 (void **)&sg_map->sg_vaddr, 4143 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 4144 free(sg_map, M_DEVBUF); 4145 return; 4146 } 4147 4148 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4149 4150 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 4151 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 4152 &sg_map->sg_physaddr, /*flags*/0); 4153 4154 segs = sg_map->sg_vaddr; 4155 physaddr = sg_map->sg_physaddr; 4156 4157 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4158 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4159 for (i = 0; i < newcount; i++) { 4160 struct scb_platform_data *pdata; 4161 #ifndef __linux__ 4162 int error; 4163 #endif 4164 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 4165 M_DEVBUF, M_NOWAIT); 4166 if (pdata == NULL) 4167 break; 4168 next_scb->platform_data = pdata; 4169 next_scb->sg_map = sg_map; 4170 next_scb->sg_list = segs; 4171 /* 4172 * The sequencer always starts with the second entry. 4173 * The first entry is embedded in the scb. 4174 */ 4175 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4176 next_scb->ahc_softc = ahc; 4177 next_scb->flags = SCB_FREE; 4178 #ifndef __linux__ 4179 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 4180 &next_scb->dmamap); 4181 if (error != 0) 4182 break; 4183 #endif 4184 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4185 next_scb->hscb->tag = ahc->scb_data->numscbs; 4186 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4187 next_scb, links.sle); 4188 segs += AHC_NSEG; 4189 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4190 next_scb++; 4191 ahc->scb_data->numscbs++; 4192 } 4193 } 4194 4195 void 4196 ahc_controller_info(struct ahc_softc *ahc, char *buf) 4197 { 4198 int len; 4199 4200 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4201 buf += len; 4202 if ((ahc->features & AHC_TWIN) != 0) 4203 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 4204 "B SCSI Id=%d, primary %c, ", 4205 ahc->our_id, ahc->our_id_b, 4206 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4207 else { 4208 const char *speed; 4209 const char *type; 4210 4211 speed = ""; 4212 if ((ahc->features & AHC_ULTRA) != 0) { 4213 speed = "Ultra "; 4214 } else if ((ahc->features & AHC_DT) != 0) { 4215 speed = "Ultra160 "; 4216 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4217 speed = "Ultra2 "; 4218 } 4219 if ((ahc->features & AHC_WIDE) != 0) { 4220 type = "Wide"; 4221 } else { 4222 type = "Single"; 4223 } 4224 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", 4225 speed, type, ahc->channel, ahc->our_id); 4226 } 4227 buf += len; 4228 4229 if ((ahc->flags & AHC_PAGESCBS) != 0) 4230 sprintf(buf, "%d/%d SCBs", 4231 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4232 else 4233 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 4234 } 4235 4236 /* 4237 * Start the board, ready for normal operation 4238 */ 4239 int 4240 ahc_init(struct ahc_softc *ahc) 4241 { 4242 int max_targ; 4243 int i; 4244 int term; 4245 u_int scsi_conf; 4246 u_int scsiseq_template; 4247 u_int ultraenb; 4248 u_int discenable; 4249 u_int tagenable; 4250 size_t driver_data_size; 4251 uint32_t physaddr; 4252 4253 #ifdef AHC_DEBUG_SEQUENCER 4254 ahc->flags |= AHC_SEQUENCER_DEBUG; 4255 #endif 4256 4257 #ifdef AHC_PRINT_SRAM 4258 printf("Scratch Ram:"); 4259 for (i = 0x20; i < 0x5f; i++) { 4260 if (((i % 8) == 0) && (i != 0)) { 4261 printf ("\n "); 4262 } 4263 printf (" 0x%x", ahc_inb(ahc, i)); 4264 } 4265 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4266 for (i = 0x70; i < 0x7f; i++) { 4267 if (((i % 8) == 0) && (i != 0)) { 4268 printf ("\n "); 4269 } 4270 printf (" 0x%x", ahc_inb(ahc, i)); 4271 } 4272 } 4273 printf ("\n"); 4274 /* 4275 * Reading uninitialized scratch ram may 4276 * generate parity errors. 4277 */ 4278 ahc_outb(ahc, CLRINT, CLRPARERR); 4279 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4280 #endif 4281 max_targ = 15; 4282 4283 /* 4284 * Assume we have a board at this stage and it has been reset. 4285 */ 4286 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4287 ahc->our_id = ahc->our_id_b = 7; 4288 4289 /* 4290 * Default to allowing initiator operations. 4291 */ 4292 ahc->flags |= AHC_INITIATORROLE; 4293 4294 /* 4295 * Only allow target mode features if this unit has them enabled. 4296 */ 4297 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4298 ahc->features &= ~AHC_TARGETMODE; 4299 4300 #ifndef __linux__ 4301 /* DMA tag for mapping buffers into device visible space. */ 4302 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4303 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4304 /*lowaddr*/BUS_SPACE_MAXADDR, 4305 /*highaddr*/BUS_SPACE_MAXADDR, 4306 /*filter*/NULL, /*filterarg*/NULL, 4307 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 4308 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4309 /*flags*/BUS_DMA_ALLOCNOW, 4310 &ahc->buffer_dmat) != 0) { 4311 return (ENOMEM); 4312 } 4313 #endif 4314 4315 ahc->init_level++; 4316 4317 /* 4318 * DMA tag for our command fifos and other data in system memory 4319 * the card's sequencer must be able to access. For initiator 4320 * roles, we need to allocate space for the the qinfifo and qoutfifo. 4321 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4322 * When providing for the target mode role, we must additionally 4323 * provide space for the incoming target command fifo and an extra 4324 * byte to deal with a dma bug in some chip versions. 4325 */ 4326 driver_data_size = 2 * 256 * sizeof(uint8_t); 4327 if ((ahc->features & AHC_TARGETMODE) != 0) 4328 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4329 + /*DMA WideOdd Bug Buffer*/1; 4330 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4331 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4332 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4333 /*highaddr*/BUS_SPACE_MAXADDR, 4334 /*filter*/NULL, /*filterarg*/NULL, 4335 driver_data_size, 4336 /*nsegments*/1, 4337 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4338 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4339 return (ENOMEM); 4340 } 4341 4342 ahc->init_level++; 4343 4344 /* Allocation of driver data */ 4345 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, 4346 (void **)&ahc->qoutfifo, 4347 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4348 return (ENOMEM); 4349 } 4350 4351 ahc->init_level++; 4352 4353 /* And permanently map it in */ 4354 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 4355 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 4356 &ahc->shared_data_busaddr, /*flags*/0); 4357 4358 if ((ahc->features & AHC_TARGETMODE) != 0) { 4359 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4360 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4361 ahc->dma_bug_buf = ahc->shared_data_busaddr 4362 + driver_data_size - 1; 4363 /* All target command blocks start out invalid. */ 4364 for (i = 0; i < AHC_TMODE_CMDS; i++) 4365 ahc->targetcmds[i].cmd_valid = 0; 4366 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4367 ahc->tqinfifonext = 1; 4368 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4369 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4370 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4371 } 4372 ahc->qinfifo = &ahc->qoutfifo[256]; 4373 4374 ahc->init_level++; 4375 4376 /* Allocate SCB data now that buffer_dmat is initialized */ 4377 if (ahc->scb_data->maxhscbs == 0) 4378 if (ahc_init_scbdata(ahc) != 0) 4379 return (ENOMEM); 4380 4381 /* 4382 * Allocate a tstate to house information for our 4383 * initiator presence on the bus as well as the user 4384 * data for any target mode initiator. 4385 */ 4386 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4387 printf("%s: unable to allocate ahc_tmode_tstate. " 4388 "Failing attach\n", ahc_name(ahc)); 4389 return (ENOMEM); 4390 } 4391 4392 if ((ahc->features & AHC_TWIN) != 0) { 4393 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4394 printf("%s: unable to allocate ahc_tmode_tstate. " 4395 "Failing attach\n", ahc_name(ahc)); 4396 return (ENOMEM); 4397 } 4398 } 4399 4400 ahc_outb(ahc, SEQ_FLAGS, 0); 4401 ahc_outb(ahc, SEQ_FLAGS2, 0); 4402 4403 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4404 ahc->flags |= AHC_PAGESCBS; 4405 } else { 4406 ahc->flags &= ~AHC_PAGESCBS; 4407 } 4408 4409 #ifdef AHC_DEBUG 4410 if (ahc_debug & AHC_SHOWMISC) { 4411 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 4412 "ahc_dma %d bytes\n", 4413 ahc_name(ahc), 4414 sizeof(struct hardware_scb), 4415 sizeof(struct scb), 4416 sizeof(struct ahc_dma_seg)); 4417 } 4418 #endif /* AHC_DEBUG */ 4419 4420 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4421 if (ahc->features & AHC_TWIN) { 4422 4423 /* 4424 * The device is gated to channel B after a chip reset, 4425 * so set those values first 4426 */ 4427 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4428 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4429 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4430 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4431 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4432 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4433 if ((ahc->features & AHC_ULTRA2) != 0) 4434 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4435 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4436 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4437 4438 if ((scsi_conf & RESET_SCSI) != 0 4439 && (ahc->flags & AHC_INITIATORROLE) != 0) 4440 ahc->flags |= AHC_RESET_BUS_B; 4441 4442 /* Select Channel A */ 4443 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4444 } 4445 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4446 if ((ahc->features & AHC_ULTRA2) != 0) 4447 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4448 else 4449 ahc_outb(ahc, SCSIID, ahc->our_id); 4450 scsi_conf = ahc_inb(ahc, SCSICONF); 4451 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4452 |term|ahc->seltime 4453 |ENSTIMER|ACTNEGEN); 4454 if ((ahc->features & AHC_ULTRA2) != 0) 4455 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4456 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4457 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4458 4459 if ((scsi_conf & RESET_SCSI) != 0 4460 && (ahc->flags & AHC_INITIATORROLE) != 0) 4461 ahc->flags |= AHC_RESET_BUS_A; 4462 4463 /* 4464 * Look at the information that board initialization or 4465 * the board bios has left us. 4466 */ 4467 ultraenb = 0; 4468 tagenable = ALL_TARGETS_MASK; 4469 4470 /* Grab the disconnection disable table and invert it for our needs */ 4471 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4472 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4473 "device parameters\n", ahc_name(ahc)); 4474 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4475 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4476 discenable = ALL_TARGETS_MASK; 4477 if ((ahc->features & AHC_ULTRA) != 0) 4478 ultraenb = ALL_TARGETS_MASK; 4479 } else { 4480 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4481 | ahc_inb(ahc, DISC_DSB)); 4482 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4483 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4484 | ahc_inb(ahc, ULTRA_ENB); 4485 } 4486 4487 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4488 max_targ = 7; 4489 4490 for (i = 0; i <= max_targ; i++) { 4491 struct ahc_initiator_tinfo *tinfo; 4492 struct ahc_tmode_tstate *tstate; 4493 u_int our_id; 4494 u_int target_id; 4495 char channel; 4496 4497 channel = 'A'; 4498 our_id = ahc->our_id; 4499 target_id = i; 4500 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4501 channel = 'B'; 4502 our_id = ahc->our_id_b; 4503 target_id = i % 8; 4504 } 4505 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4506 target_id, &tstate); 4507 /* Default to async narrow across the board */ 4508 memset(tinfo, 0, sizeof(*tinfo)); 4509 if (ahc->flags & AHC_USEDEFAULTS) { 4510 if ((ahc->features & AHC_WIDE) != 0) 4511 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4512 4513 /* 4514 * These will be truncated when we determine the 4515 * connection type we have with the target. 4516 */ 4517 tinfo->user.period = ahc_syncrates->period; 4518 tinfo->user.offset = ~0; 4519 } else { 4520 u_int scsirate; 4521 uint16_t mask; 4522 4523 /* Take the settings leftover in scratch RAM. */ 4524 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4525 mask = (0x01 << i); 4526 if ((ahc->features & AHC_ULTRA2) != 0) { 4527 u_int offset; 4528 u_int maxsync; 4529 4530 if ((scsirate & SOFS) == 0x0F) { 4531 /* 4532 * Haven't negotiated yet, 4533 * so the format is different. 4534 */ 4535 scsirate = (scsirate & SXFR) >> 4 4536 | (ultraenb & mask) 4537 ? 0x08 : 0x0 4538 | (scsirate & WIDEXFER); 4539 offset = MAX_OFFSET_ULTRA2; 4540 } else 4541 offset = ahc_inb(ahc, TARG_OFFSET + i); 4542 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4543 /* Set to the lowest sync rate, 5MHz */ 4544 scsirate |= 0x1c; 4545 maxsync = AHC_SYNCRATE_ULTRA2; 4546 if ((ahc->features & AHC_DT) != 0) 4547 maxsync = AHC_SYNCRATE_DT; 4548 tinfo->user.period = 4549 ahc_find_period(ahc, scsirate, maxsync); 4550 if (offset == 0) 4551 tinfo->user.period = 0; 4552 else 4553 tinfo->user.offset = ~0; 4554 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4555 && (ahc->features & AHC_DT) != 0) 4556 tinfo->user.ppr_options = 4557 MSG_EXT_PPR_DT_REQ; 4558 } else if ((scsirate & SOFS) != 0) { 4559 if ((scsirate & SXFR) == 0x40 4560 && (ultraenb & mask) != 0) { 4561 /* Treat 10MHz as a non-ultra speed */ 4562 scsirate &= ~SXFR; 4563 ultraenb &= ~mask; 4564 } 4565 tinfo->user.period = 4566 ahc_find_period(ahc, scsirate, 4567 (ultraenb & mask) 4568 ? AHC_SYNCRATE_ULTRA 4569 : AHC_SYNCRATE_FAST); 4570 if (tinfo->user.period != 0) 4571 tinfo->user.offset = ~0; 4572 } 4573 if (tinfo->user.period == 0) 4574 tinfo->user.offset = 0; 4575 if ((scsirate & WIDEXFER) != 0 4576 && (ahc->features & AHC_WIDE) != 0) 4577 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4578 tinfo->user.protocol_version = 4; 4579 if ((ahc->features & AHC_DT) != 0) 4580 tinfo->user.transport_version = 3; 4581 else 4582 tinfo->user.transport_version = 2; 4583 tinfo->goal.protocol_version = 2; 4584 tinfo->goal.transport_version = 2; 4585 tinfo->curr.protocol_version = 2; 4586 tinfo->curr.transport_version = 2; 4587 } 4588 tstate->ultraenb = ultraenb; 4589 } 4590 ahc->user_discenable = discenable; 4591 ahc->user_tagenable = tagenable; 4592 4593 /* There are no untagged SCBs active yet. */ 4594 for (i = 0; i < 16; i++) { 4595 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4596 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4597 int lun; 4598 4599 /* 4600 * The SCB based BTT allows an entry per 4601 * target and lun pair. 4602 */ 4603 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4604 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4605 } 4606 } 4607 4608 /* All of our queues are empty */ 4609 for (i = 0; i < 256; i++) 4610 ahc->qoutfifo[i] = SCB_LIST_NULL; 4611 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4612 4613 for (i = 0; i < 256; i++) 4614 ahc->qinfifo[i] = SCB_LIST_NULL; 4615 4616 if ((ahc->features & AHC_MULTI_TID) != 0) { 4617 ahc_outb(ahc, TARGID, 0); 4618 ahc_outb(ahc, TARGID + 1, 0); 4619 } 4620 4621 /* 4622 * Tell the sequencer where it can find our arrays in memory. 4623 */ 4624 physaddr = ahc->scb_data->hscb_busaddr; 4625 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4626 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4627 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4628 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4629 4630 physaddr = ahc->shared_data_busaddr; 4631 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4632 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4633 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4634 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4635 4636 /* 4637 * Initialize the group code to command length table. 4638 * This overrides the values in TARG_SCSIRATE, so only 4639 * setup the table after we have processed that information. 4640 */ 4641 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4642 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4643 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4644 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4645 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4646 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4647 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4648 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4649 4650 /* Tell the sequencer of our initial queue positions */ 4651 ahc_outb(ahc, KERNEL_QINPOS, 0); 4652 ahc_outb(ahc, QINPOS, 0); 4653 ahc_outb(ahc, QOUTPOS, 0); 4654 4655 /* 4656 * Use the built in queue management registers 4657 * if they are available. 4658 */ 4659 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4660 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4661 ahc_outb(ahc, SDSCB_QOFF, 0); 4662 ahc_outb(ahc, SNSCB_QOFF, 0); 4663 ahc_outb(ahc, HNSCB_QOFF, 0); 4664 } 4665 4666 4667 /* We don't have any waiting selections */ 4668 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4669 4670 /* Our disconnection list is empty too */ 4671 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4672 4673 /* Message out buffer starts empty */ 4674 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4675 4676 /* 4677 * Setup the allowed SCSI Sequences based on operational mode. 4678 * If we are a target, we'll enalbe select in operations once 4679 * we've had a lun enabled. 4680 */ 4681 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4682 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4683 scsiseq_template |= ENRSELI; 4684 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4685 4686 /* 4687 * Load the Sequencer program and Enable the adapter 4688 * in "fast" mode. 4689 */ 4690 if (bootverbose) 4691 printf("%s: Downloading Sequencer Program...", 4692 ahc_name(ahc)); 4693 4694 ahc_loadseq(ahc); 4695 4696 if ((ahc->features & AHC_ULTRA2) != 0) { 4697 int wait; 4698 4699 /* 4700 * Wait for up to 500ms for our transceivers 4701 * to settle. If the adapter does not have 4702 * a cable attached, the tranceivers may 4703 * never settle, so don't complain if we 4704 * fail here. 4705 */ 4706 ahc_pause(ahc); 4707 for (wait = 5000; 4708 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4709 wait--) 4710 ahc_delay(100); 4711 ahc_unpause(ahc); 4712 } 4713 return (0); 4714 } 4715 4716 void 4717 ahc_intr_enable(struct ahc_softc *ahc, int enable) 4718 { 4719 u_int hcntrl; 4720 4721 hcntrl = ahc_inb(ahc, HCNTRL); 4722 hcntrl &= ~INTEN; 4723 ahc->pause &= ~INTEN; 4724 ahc->unpause &= ~INTEN; 4725 if (enable) { 4726 hcntrl |= INTEN; 4727 ahc->pause |= INTEN; 4728 ahc->unpause |= INTEN; 4729 } 4730 ahc_outb(ahc, HCNTRL, hcntrl); 4731 } 4732 4733 /* 4734 * Ensure that the card is paused in a location 4735 * outside of all critical sections and that all 4736 * pending work is completed prior to returning. 4737 * This routine should only be called from outside 4738 * an interrupt context. 4739 */ 4740 void 4741 ahc_pause_and_flushwork(struct ahc_softc *ahc) 4742 { 4743 int intstat; 4744 int maxloops; 4745 4746 maxloops = 1000; 4747 ahc->flags |= AHC_ALL_INTERRUPTS; 4748 intstat = 0; 4749 do { 4750 ahc_intr(ahc); 4751 ahc_pause(ahc); 4752 ahc_clear_critical_section(ahc); 4753 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 4754 break; 4755 maxloops--; 4756 } while (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) && --maxloops); 4757 if (maxloops == 0) { 4758 printf("Infinite interrupt loop, INTSTAT = %x", 4759 ahc_inb(ahc, INTSTAT)); 4760 } 4761 ahc_platform_flushwork(ahc); 4762 ahc->flags &= ~AHC_ALL_INTERRUPTS; 4763 } 4764 4765 int 4766 ahc_suspend(struct ahc_softc *ahc) 4767 { 4768 uint8_t *ptr; 4769 int i; 4770 4771 ahc_pause_and_flushwork(ahc); 4772 4773 if (LIST_FIRST(&ahc->pending_scbs) != NULL) 4774 return (EBUSY); 4775 4776 #if AHC_TARGET_MODE 4777 /* 4778 * XXX What about ATIOs that have not yet been serviced? 4779 * Perhaps we should just refuse to be suspended if we 4780 * are acting in a target role. 4781 */ 4782 if (ahc->pending_device != NULL) 4783 return (EBUSY); 4784 #endif 4785 4786 /* Save volatile registers */ 4787 if ((ahc->features & AHC_TWIN) != 0) { 4788 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4789 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ); 4790 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4791 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4792 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0); 4793 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1); 4794 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER); 4795 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL); 4796 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4797 } 4798 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ); 4799 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4800 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4801 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0); 4802 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1); 4803 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER); 4804 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL); 4805 4806 if ((ahc->chip & AHC_PCI) != 0) { 4807 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0); 4808 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS); 4809 } 4810 4811 if ((ahc->features & AHC_DT) != 0) { 4812 u_int sfunct; 4813 4814 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4815 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4816 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE); 4817 ahc_outb(ahc, SFUNCT, sfunct); 4818 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); 4819 } 4820 4821 if ((ahc->features & AHC_MULTI_FUNC) != 0) 4822 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR); 4823 4824 if ((ahc->features & AHC_ULTRA2) != 0) 4825 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH); 4826 4827 ptr = ahc->suspend_state.scratch_ram; 4828 for (i = 0; i < 64; i++) 4829 *ptr++ = ahc_inb(ahc, SRAM_BASE + i); 4830 4831 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4832 for (i = 0; i < 16; i++) 4833 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i); 4834 } 4835 4836 ptr = ahc->suspend_state.btt; 4837 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4838 for (i = 0;i < AHC_NUM_TARGETS; i++) { 4839 int j; 4840 4841 for (j = 0;j < AHC_NUM_LUNS; j++) { 4842 u_int tcl; 4843 4844 tcl = BUILD_TCL(i << 4, j); 4845 *ptr = ahc_index_busy_tcl(ahc, tcl); 4846 } 4847 } 4848 } 4849 ahc_shutdown(ahc); 4850 return (0); 4851 } 4852 4853 int 4854 ahc_resume(struct ahc_softc *ahc) 4855 { 4856 uint8_t *ptr; 4857 int i; 4858 4859 ahc_reset(ahc); 4860 4861 ahc_build_free_scb_list(ahc); 4862 4863 /* Restore volatile registers */ 4864 if ((ahc->features & AHC_TWIN) != 0) { 4865 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4866 ahc_outb(ahc, SCSIID, ahc->our_id); 4867 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq); 4868 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0); 4869 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1); 4870 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0); 4871 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1); 4872 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer); 4873 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl); 4874 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4875 } 4876 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq); 4877 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0); 4878 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1); 4879 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0); 4880 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1); 4881 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer); 4882 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl); 4883 if ((ahc->features & AHC_ULTRA2) != 0) 4884 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4885 else 4886 ahc_outb(ahc, SCSIID, ahc->our_id); 4887 4888 if ((ahc->chip & AHC_PCI) != 0) { 4889 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0); 4890 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus); 4891 } 4892 4893 if ((ahc->features & AHC_DT) != 0) { 4894 u_int sfunct; 4895 4896 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4897 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4898 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode); 4899 ahc_outb(ahc, SFUNCT, sfunct); 4900 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1); 4901 } 4902 4903 if ((ahc->features & AHC_MULTI_FUNC) != 0) 4904 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr); 4905 4906 if ((ahc->features & AHC_ULTRA2) != 0) 4907 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh); 4908 4909 ptr = ahc->suspend_state.scratch_ram; 4910 for (i = 0; i < 64; i++) 4911 ahc_outb(ahc, SRAM_BASE + i, *ptr++); 4912 4913 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4914 for (i = 0; i < 16; i++) 4915 ahc_outb(ahc, TARG_OFFSET + i, *ptr++); 4916 } 4917 4918 ptr = ahc->suspend_state.btt; 4919 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4920 for (i = 0;i < AHC_NUM_TARGETS; i++) { 4921 int j; 4922 4923 for (j = 0;j < AHC_NUM_LUNS; j++) { 4924 u_int tcl; 4925 4926 tcl = BUILD_TCL(i << 4, j); 4927 ahc_busy_tcl(ahc, tcl, *ptr); 4928 } 4929 } 4930 } 4931 return (0); 4932 } 4933 4934 /************************** Busy Target Table *********************************/ 4935 /* 4936 * Return the untagged transaction id for a given target/channel lun. 4937 * Optionally, clear the entry. 4938 */ 4939 u_int 4940 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 4941 { 4942 u_int scbid; 4943 u_int target_offset; 4944 4945 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4946 u_int saved_scbptr; 4947 4948 saved_scbptr = ahc_inb(ahc, SCBPTR); 4949 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4950 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 4951 ahc_outb(ahc, SCBPTR, saved_scbptr); 4952 } else { 4953 target_offset = TCL_TARGET_OFFSET(tcl); 4954 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 4955 } 4956 4957 return (scbid); 4958 } 4959 4960 void 4961 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 4962 { 4963 u_int target_offset; 4964 4965 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4966 u_int saved_scbptr; 4967 4968 saved_scbptr = ahc_inb(ahc, SCBPTR); 4969 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4970 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 4971 ahc_outb(ahc, SCBPTR, saved_scbptr); 4972 } else { 4973 target_offset = TCL_TARGET_OFFSET(tcl); 4974 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 4975 } 4976 } 4977 4978 void 4979 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 4980 { 4981 u_int target_offset; 4982 4983 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4984 u_int saved_scbptr; 4985 4986 saved_scbptr = ahc_inb(ahc, SCBPTR); 4987 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4988 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 4989 ahc_outb(ahc, SCBPTR, saved_scbptr); 4990 } else { 4991 target_offset = TCL_TARGET_OFFSET(tcl); 4992 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 4993 } 4994 } 4995 4996 /************************** SCB and SCB queue management **********************/ 4997 int 4998 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 4999 char channel, int lun, u_int tag, role_t role) 5000 { 5001 int targ = SCB_GET_TARGET(ahc, scb); 5002 char chan = SCB_GET_CHANNEL(ahc, scb); 5003 int slun = SCB_GET_LUN(scb); 5004 int match; 5005 5006 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5007 if (match != 0) 5008 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5009 if (match != 0) 5010 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5011 if (match != 0) { 5012 #if AHC_TARGET_MODE 5013 int group; 5014 5015 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5016 if (role == ROLE_INITIATOR) { 5017 match = (group != XPT_FC_GROUP_TMODE) 5018 && ((tag == scb->hscb->tag) 5019 || (tag == SCB_LIST_NULL)); 5020 } else if (role == ROLE_TARGET) { 5021 match = (group == XPT_FC_GROUP_TMODE) 5022 && ((tag == scb->io_ctx->csio.tag_id) 5023 || (tag == SCB_LIST_NULL)); 5024 } 5025 #else /* !AHC_TARGET_MODE */ 5026 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5027 #endif /* AHC_TARGET_MODE */ 5028 } 5029 5030 return match; 5031 } 5032 5033 void 5034 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5035 { 5036 int target; 5037 char channel; 5038 int lun; 5039 5040 target = SCB_GET_TARGET(ahc, scb); 5041 lun = SCB_GET_LUN(scb); 5042 channel = SCB_GET_CHANNEL(ahc, scb); 5043 5044 ahc_search_qinfifo(ahc, target, channel, lun, 5045 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5046 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5047 5048 ahc_platform_freeze_devq(ahc, scb); 5049 } 5050 5051 void 5052 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5053 { 5054 struct scb *prev_scb; 5055 5056 prev_scb = NULL; 5057 if (ahc_qinfifo_count(ahc) != 0) { 5058 u_int prev_tag; 5059 uint8_t prev_pos; 5060 5061 prev_pos = ahc->qinfifonext - 1; 5062 prev_tag = ahc->qinfifo[prev_pos]; 5063 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5064 } 5065 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5066 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5067 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5068 } else { 5069 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5070 } 5071 } 5072 5073 static void 5074 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5075 struct scb *scb) 5076 { 5077 if (prev_scb == NULL) { 5078 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5079 } else { 5080 prev_scb->hscb->next = scb->hscb->tag; 5081 ahc_sync_scb(ahc, prev_scb, 5082 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5083 } 5084 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5085 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5086 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5087 } 5088 5089 static int 5090 ahc_qinfifo_count(struct ahc_softc *ahc) 5091 { 5092 u_int8_t qinpos; 5093 u_int8_t diff; 5094 5095 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5096 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5097 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5098 } else 5099 qinpos = ahc_inb(ahc, QINPOS); 5100 diff = ahc->qinfifonext - qinpos; 5101 return (diff); 5102 } 5103 5104 int 5105 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5106 int lun, u_int tag, role_t role, uint32_t status, 5107 ahc_search_action action) 5108 { 5109 struct scb *scb; 5110 struct scb *prev_scb; 5111 uint8_t qinstart; 5112 uint8_t qinpos; 5113 uint8_t qintail; 5114 uint8_t next; 5115 uint8_t prev; 5116 uint8_t curscbptr; 5117 int found; 5118 int maxtarget; 5119 int i; 5120 int have_qregs; 5121 5122 qintail = ahc->qinfifonext; 5123 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5124 if (have_qregs) { 5125 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5126 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5127 } else 5128 qinstart = ahc_inb(ahc, QINPOS); 5129 qinpos = qinstart; 5130 found = 0; 5131 prev_scb = NULL; 5132 5133 if (action == SEARCH_COMPLETE) { 5134 /* 5135 * Don't attempt to run any queued untagged transactions 5136 * until we are done with the abort process. 5137 */ 5138 ahc_freeze_untagged_queues(ahc); 5139 } 5140 5141 /* 5142 * Start with an empty queue. Entries that are not chosen 5143 * for removal will be re-added to the queue as we go. 5144 */ 5145 ahc->qinfifonext = qinpos; 5146 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5147 5148 while (qinpos != qintail) { 5149 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5150 if (scb == NULL) { 5151 printf("qinpos = %d, SCB index = %d\n", 5152 qinpos, ahc->qinfifo[qinpos]); 5153 panic("Loop 1\n"); 5154 } 5155 5156 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5157 /* 5158 * We found an scb that needs to be acted on. 5159 */ 5160 found++; 5161 switch (action) { 5162 case SEARCH_COMPLETE: 5163 { 5164 cam_status ostat; 5165 cam_status cstat; 5166 5167 ostat = ahc_get_transaction_status(scb); 5168 if (ostat == CAM_REQ_INPROG) 5169 ahc_set_transaction_status(scb, 5170 status); 5171 cstat = ahc_get_transaction_status(scb); 5172 if (cstat != CAM_REQ_CMP) 5173 ahc_freeze_scb(scb); 5174 if ((scb->flags & SCB_ACTIVE) == 0) 5175 printf("Inactive SCB in qinfifo\n"); 5176 ahc_done(ahc, scb); 5177 5178 /* FALLTHROUGH */ 5179 } 5180 case SEARCH_REMOVE: 5181 break; 5182 case SEARCH_COUNT: 5183 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5184 prev_scb = scb; 5185 break; 5186 } 5187 } else { 5188 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5189 prev_scb = scb; 5190 } 5191 qinpos++; 5192 } 5193 5194 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5195 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5196 } else { 5197 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5198 } 5199 5200 if (action != SEARCH_COUNT 5201 && (found != 0) 5202 && (qinstart != ahc->qinfifonext)) { 5203 /* 5204 * The sequencer may be in the process of dmaing 5205 * down the SCB at the beginning of the queue. 5206 * This could be problematic if either the first, 5207 * or the second SCB is removed from the queue 5208 * (the first SCB includes a pointer to the "next" 5209 * SCB to dma). If we have removed any entries, swap 5210 * the first element in the queue with the next HSCB 5211 * so the sequencer will notice that NEXT_QUEUED_SCB 5212 * has changed during its dma attempt and will retry 5213 * the DMA. 5214 */ 5215 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5216 5217 if (scb == NULL) { 5218 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5219 found, qinstart, ahc->qinfifonext); 5220 panic("First/Second Qinfifo fixup\n"); 5221 } 5222 /* 5223 * ahc_swap_with_next_hscb forces our next pointer to 5224 * point to the reserved SCB for future commands. Save 5225 * and restore our original next pointer to maintain 5226 * queue integrity. 5227 */ 5228 next = scb->hscb->next; 5229 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5230 ahc_swap_with_next_hscb(ahc, scb); 5231 scb->hscb->next = next; 5232 ahc->qinfifo[qinstart] = scb->hscb->tag; 5233 5234 /* Tell the card about the new head of the qinfifo. */ 5235 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5236 5237 /* Fixup the tail "next" pointer. */ 5238 qintail = ahc->qinfifonext - 1; 5239 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5240 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5241 } 5242 5243 /* 5244 * Search waiting for selection list. 5245 */ 5246 curscbptr = ahc_inb(ahc, SCBPTR); 5247 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5248 prev = SCB_LIST_NULL; 5249 5250 while (next != SCB_LIST_NULL) { 5251 uint8_t scb_index; 5252 5253 ahc_outb(ahc, SCBPTR, next); 5254 scb_index = ahc_inb(ahc, SCB_TAG); 5255 if (scb_index >= ahc->scb_data->numscbs) { 5256 printf("Waiting List inconsistency. " 5257 "SCB index == %d, yet numscbs == %d.", 5258 scb_index, ahc->scb_data->numscbs); 5259 ahc_dump_card_state(ahc); 5260 panic("for safety"); 5261 } 5262 scb = ahc_lookup_scb(ahc, scb_index); 5263 if (scb == NULL) { 5264 printf("scb_index = %d, next = %d\n", 5265 scb_index, next); 5266 panic("Waiting List traversal\n"); 5267 } 5268 if (ahc_match_scb(ahc, scb, target, channel, 5269 lun, SCB_LIST_NULL, role)) { 5270 /* 5271 * We found an scb that needs to be acted on. 5272 */ 5273 found++; 5274 switch (action) { 5275 case SEARCH_COMPLETE: 5276 { 5277 cam_status ostat; 5278 cam_status cstat; 5279 5280 ostat = ahc_get_transaction_status(scb); 5281 if (ostat == CAM_REQ_INPROG) 5282 ahc_set_transaction_status(scb, 5283 status); 5284 cstat = ahc_get_transaction_status(scb); 5285 if (cstat != CAM_REQ_CMP) 5286 ahc_freeze_scb(scb); 5287 if ((scb->flags & SCB_ACTIVE) == 0) 5288 printf("Inactive SCB in Waiting List\n"); 5289 ahc_done(ahc, scb); 5290 /* FALLTHROUGH */ 5291 } 5292 case SEARCH_REMOVE: 5293 next = ahc_rem_wscb(ahc, next, prev); 5294 break; 5295 case SEARCH_COUNT: 5296 prev = next; 5297 next = ahc_inb(ahc, SCB_NEXT); 5298 break; 5299 } 5300 } else { 5301 5302 prev = next; 5303 next = ahc_inb(ahc, SCB_NEXT); 5304 } 5305 } 5306 ahc_outb(ahc, SCBPTR, curscbptr); 5307 5308 /* 5309 * And lastly, the untagged holding queues. 5310 */ 5311 i = 0; 5312 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5313 5314 maxtarget = 16; 5315 if (target != CAM_TARGET_WILDCARD) { 5316 5317 i = target; 5318 if (channel == 'B') 5319 i += 8; 5320 maxtarget = i + 1; 5321 } 5322 } else { 5323 maxtarget = 0; 5324 } 5325 5326 for (; i < maxtarget; i++) { 5327 struct scb_tailq *untagged_q; 5328 struct scb *next_scb; 5329 5330 untagged_q = &(ahc->untagged_queues[i]); 5331 next_scb = TAILQ_FIRST(untagged_q); 5332 while (next_scb != NULL) { 5333 5334 scb = next_scb; 5335 next_scb = TAILQ_NEXT(scb, links.tqe); 5336 5337 /* 5338 * The head of the list may be the currently 5339 * active untagged command for a device. 5340 * We're only searching for commands that 5341 * have not been started. A transaction 5342 * marked active but still in the qinfifo 5343 * is removed by the qinfifo scanning code 5344 * above. 5345 */ 5346 if ((scb->flags & SCB_ACTIVE) != 0) 5347 continue; 5348 5349 if (ahc_match_scb(ahc, scb, target, channel, 5350 lun, SCB_LIST_NULL, role)) { 5351 /* 5352 * We found an scb that needs to be acted on. 5353 */ 5354 found++; 5355 switch (action) { 5356 case SEARCH_COMPLETE: 5357 { 5358 cam_status ostat; 5359 cam_status cstat; 5360 5361 ostat = ahc_get_transaction_status(scb); 5362 if (ostat == CAM_REQ_INPROG) 5363 ahc_set_transaction_status(scb, 5364 status); 5365 cstat = ahc_get_transaction_status(scb); 5366 if (cstat != CAM_REQ_CMP) 5367 ahc_freeze_scb(scb); 5368 if ((scb->flags & SCB_ACTIVE) == 0) 5369 printf("Inactive SCB in untaggedQ\n"); 5370 ahc_done(ahc, scb); 5371 break; 5372 } 5373 case SEARCH_REMOVE: 5374 TAILQ_REMOVE(untagged_q, scb, 5375 links.tqe); 5376 break; 5377 case SEARCH_COUNT: 5378 break; 5379 } 5380 } 5381 } 5382 } 5383 5384 if (action == SEARCH_COMPLETE) 5385 ahc_release_untagged_queues(ahc); 5386 return (found); 5387 } 5388 5389 int 5390 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5391 int lun, u_int tag, int stop_on_first, int remove, 5392 int save_state) 5393 { 5394 struct scb *scbp; 5395 u_int next; 5396 u_int prev; 5397 u_int count; 5398 u_int active_scb; 5399 5400 count = 0; 5401 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5402 prev = SCB_LIST_NULL; 5403 5404 if (save_state) { 5405 /* restore this when we're done */ 5406 active_scb = ahc_inb(ahc, SCBPTR); 5407 } else 5408 /* Silence compiler */ 5409 active_scb = SCB_LIST_NULL; 5410 5411 while (next != SCB_LIST_NULL) { 5412 u_int scb_index; 5413 5414 ahc_outb(ahc, SCBPTR, next); 5415 scb_index = ahc_inb(ahc, SCB_TAG); 5416 if (scb_index >= ahc->scb_data->numscbs) { 5417 printf("Disconnected List inconsistency. " 5418 "SCB index == %d, yet numscbs == %d.", 5419 scb_index, ahc->scb_data->numscbs); 5420 ahc_dump_card_state(ahc); 5421 panic("for safety"); 5422 } 5423 5424 if (next == prev) { 5425 panic("Disconnected List Loop. " 5426 "cur SCBPTR == %x, prev SCBPTR == %x.", 5427 next, prev); 5428 } 5429 scbp = ahc_lookup_scb(ahc, scb_index); 5430 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5431 tag, ROLE_INITIATOR)) { 5432 count++; 5433 if (remove) { 5434 next = 5435 ahc_rem_scb_from_disc_list(ahc, prev, next); 5436 } else { 5437 prev = next; 5438 next = ahc_inb(ahc, SCB_NEXT); 5439 } 5440 if (stop_on_first) 5441 break; 5442 } else { 5443 prev = next; 5444 next = ahc_inb(ahc, SCB_NEXT); 5445 } 5446 } 5447 if (save_state) 5448 ahc_outb(ahc, SCBPTR, active_scb); 5449 return (count); 5450 } 5451 5452 /* 5453 * Remove an SCB from the on chip list of disconnected transactions. 5454 * This is empty/unused if we are not performing SCB paging. 5455 */ 5456 static u_int 5457 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5458 { 5459 u_int next; 5460 5461 ahc_outb(ahc, SCBPTR, scbptr); 5462 next = ahc_inb(ahc, SCB_NEXT); 5463 5464 ahc_outb(ahc, SCB_CONTROL, 0); 5465 5466 ahc_add_curscb_to_free_list(ahc); 5467 5468 if (prev != SCB_LIST_NULL) { 5469 ahc_outb(ahc, SCBPTR, prev); 5470 ahc_outb(ahc, SCB_NEXT, next); 5471 } else 5472 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5473 5474 return (next); 5475 } 5476 5477 /* 5478 * Add the SCB as selected by SCBPTR onto the on chip list of 5479 * free hardware SCBs. This list is empty/unused if we are not 5480 * performing SCB paging. 5481 */ 5482 static void 5483 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5484 { 5485 /* 5486 * Invalidate the tag so that our abort 5487 * routines don't think it's active. 5488 */ 5489 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5490 5491 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5492 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5493 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5494 } 5495 } 5496 5497 /* 5498 * Manipulate the waiting for selection list and return the 5499 * scb that follows the one that we remove. 5500 */ 5501 static u_int 5502 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5503 { 5504 u_int curscb, next; 5505 5506 /* 5507 * Select the SCB we want to abort and 5508 * pull the next pointer out of it. 5509 */ 5510 curscb = ahc_inb(ahc, SCBPTR); 5511 ahc_outb(ahc, SCBPTR, scbpos); 5512 next = ahc_inb(ahc, SCB_NEXT); 5513 5514 /* Clear the necessary fields */ 5515 ahc_outb(ahc, SCB_CONTROL, 0); 5516 5517 ahc_add_curscb_to_free_list(ahc); 5518 5519 /* update the waiting list */ 5520 if (prev == SCB_LIST_NULL) { 5521 /* First in the list */ 5522 ahc_outb(ahc, WAITING_SCBH, next); 5523 5524 /* 5525 * Ensure we aren't attempting to perform 5526 * selection for this entry. 5527 */ 5528 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5529 } else { 5530 /* 5531 * Select the scb that pointed to us 5532 * and update its next pointer. 5533 */ 5534 ahc_outb(ahc, SCBPTR, prev); 5535 ahc_outb(ahc, SCB_NEXT, next); 5536 } 5537 5538 /* 5539 * Point us back at the original scb position. 5540 */ 5541 ahc_outb(ahc, SCBPTR, curscb); 5542 return next; 5543 } 5544 5545 /******************************** Error Handling ******************************/ 5546 /* 5547 * Abort all SCBs that match the given description (target/channel/lun/tag), 5548 * setting their status to the passed in status if the status has not already 5549 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5550 * is paused before it is called. 5551 */ 5552 int 5553 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5554 int lun, u_int tag, role_t role, uint32_t status) 5555 { 5556 struct scb *scbp; 5557 struct scb *scbp_next; 5558 u_int active_scb; 5559 int i, j; 5560 int maxtarget; 5561 int minlun; 5562 int maxlun; 5563 5564 int found; 5565 5566 /* 5567 * Don't attempt to run any queued untagged transactions 5568 * until we are done with the abort process. 5569 */ 5570 ahc_freeze_untagged_queues(ahc); 5571 5572 /* restore this when we're done */ 5573 active_scb = ahc_inb(ahc, SCBPTR); 5574 5575 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5576 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5577 5578 /* 5579 * Clean out the busy target table for any untagged commands. 5580 */ 5581 i = 0; 5582 maxtarget = 16; 5583 if (target != CAM_TARGET_WILDCARD) { 5584 i = target; 5585 if (channel == 'B') 5586 i += 8; 5587 maxtarget = i + 1; 5588 } 5589 5590 if (lun == CAM_LUN_WILDCARD) { 5591 5592 /* 5593 * Unless we are using an SCB based 5594 * busy targets table, there is only 5595 * one table entry for all luns of 5596 * a target. 5597 */ 5598 minlun = 0; 5599 maxlun = 1; 5600 if ((ahc->flags & AHC_SCB_BTT) != 0) 5601 maxlun = AHC_NUM_LUNS; 5602 } else { 5603 minlun = lun; 5604 maxlun = lun + 1; 5605 } 5606 5607 if (role != ROLE_TARGET) { 5608 for (;i < maxtarget; i++) { 5609 for (j = minlun;j < maxlun; j++) { 5610 u_int scbid; 5611 u_int tcl; 5612 5613 tcl = BUILD_TCL(i << 4, j); 5614 scbid = ahc_index_busy_tcl(ahc, tcl); 5615 scbp = ahc_lookup_scb(ahc, scbid); 5616 if (scbp == NULL 5617 || ahc_match_scb(ahc, scbp, target, channel, 5618 lun, tag, role) == 0) 5619 continue; 5620 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5621 } 5622 } 5623 5624 /* 5625 * Go through the disconnected list and remove any entries we 5626 * have queued for completion, 0'ing their control byte too. 5627 * We save the active SCB and restore it ourselves, so there 5628 * is no reason for this search to restore it too. 5629 */ 5630 ahc_search_disc_list(ahc, target, channel, lun, tag, 5631 /*stop_on_first*/FALSE, /*remove*/TRUE, 5632 /*save_state*/FALSE); 5633 } 5634 5635 /* 5636 * Go through the hardware SCB array looking for commands that 5637 * were active but not on any list. In some cases, these remnants 5638 * might not still have mappings in the scbindex array (e.g. unexpected 5639 * bus free with the same scb queued for an abort). Don't hold this 5640 * against them. 5641 */ 5642 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5643 u_int scbid; 5644 5645 ahc_outb(ahc, SCBPTR, i); 5646 scbid = ahc_inb(ahc, SCB_TAG); 5647 scbp = ahc_lookup_scb(ahc, scbid); 5648 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5649 || (scbp != NULL 5650 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5651 ahc_add_curscb_to_free_list(ahc); 5652 } 5653 5654 /* 5655 * Go through the pending CCB list and look for 5656 * commands for this target that are still active. 5657 * These are other tagged commands that were 5658 * disconnected when the reset occurred. 5659 */ 5660 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5661 while (scbp_next != NULL) { 5662 scbp = scbp_next; 5663 scbp_next = LIST_NEXT(scbp, pending_links); 5664 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5665 cam_status ostat; 5666 5667 ostat = ahc_get_transaction_status(scbp); 5668 if (ostat == CAM_REQ_INPROG) 5669 ahc_set_transaction_status(scbp, status); 5670 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5671 ahc_freeze_scb(scbp); 5672 if ((scbp->flags & SCB_ACTIVE) == 0) 5673 printf("Inactive SCB on pending list\n"); 5674 ahc_done(ahc, scbp); 5675 found++; 5676 } 5677 } 5678 ahc_outb(ahc, SCBPTR, active_scb); 5679 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5680 ahc_release_untagged_queues(ahc); 5681 return found; 5682 } 5683 5684 static void 5685 ahc_reset_current_bus(struct ahc_softc *ahc) 5686 { 5687 uint8_t scsiseq; 5688 5689 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5690 scsiseq = ahc_inb(ahc, SCSISEQ); 5691 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5692 ahc_flush_device_writes(ahc); 5693 ahc_delay(AHC_BUSRESET_DELAY); 5694 /* Turn off the bus reset */ 5695 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5696 5697 ahc_clear_intstat(ahc); 5698 5699 /* Re-enable reset interrupts */ 5700 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5701 } 5702 5703 int 5704 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5705 { 5706 struct ahc_devinfo devinfo; 5707 u_int initiator, target, max_scsiid; 5708 u_int sblkctl; 5709 u_int scsiseq; 5710 u_int simode1; 5711 int found; 5712 int restart_needed; 5713 char cur_channel; 5714 5715 ahc->pending_device = NULL; 5716 5717 ahc_compile_devinfo(&devinfo, 5718 CAM_TARGET_WILDCARD, 5719 CAM_TARGET_WILDCARD, 5720 CAM_LUN_WILDCARD, 5721 channel, ROLE_UNKNOWN); 5722 ahc_pause(ahc); 5723 5724 /* Make sure the sequencer is in a safe location. */ 5725 ahc_clear_critical_section(ahc); 5726 5727 /* 5728 * Run our command complete fifos to ensure that we perform 5729 * completion processing on any commands that 'completed' 5730 * before the reset occurred. 5731 */ 5732 ahc_run_qoutfifo(ahc); 5733 #if AHC_TARGET_MODE 5734 /* 5735 * XXX - In Twin mode, the tqinfifo may have commands 5736 * for an unaffected channel in it. However, if 5737 * we have run out of ATIO resources to drain that 5738 * queue, we may not get them all out here. Further, 5739 * the blocked transactions for the reset channel 5740 * should just be killed off, irrespecitve of whether 5741 * we are blocked on ATIO resources. Write a routine 5742 * to compact the tqinfifo appropriately. 5743 */ 5744 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5745 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5746 } 5747 #endif 5748 5749 /* 5750 * Reset the bus if we are initiating this reset 5751 */ 5752 sblkctl = ahc_inb(ahc, SBLKCTL); 5753 cur_channel = 'A'; 5754 if ((ahc->features & AHC_TWIN) != 0 5755 && ((sblkctl & SELBUSB) != 0)) 5756 cur_channel = 'B'; 5757 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5758 if (cur_channel != channel) { 5759 /* Case 1: Command for another bus is active 5760 * Stealthily reset the other bus without 5761 * upsetting the current bus. 5762 */ 5763 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5764 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5765 #if AHC_TARGET_MODE 5766 /* 5767 * Bus resets clear ENSELI, so we cannot 5768 * defer re-enabling bus reset interrupts 5769 * if we are in target mode. 5770 */ 5771 if ((ahc->flags & AHC_TARGETROLE) != 0) 5772 simode1 |= ENSCSIRST; 5773 #endif 5774 ahc_outb(ahc, SIMODE1, simode1); 5775 if (initiate_reset) 5776 ahc_reset_current_bus(ahc); 5777 ahc_clear_intstat(ahc); 5778 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5779 ahc_outb(ahc, SBLKCTL, sblkctl); 5780 restart_needed = FALSE; 5781 } else { 5782 /* Case 2: A command from this bus is active or we're idle */ 5783 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5784 #if AHC_TARGET_MODE 5785 /* 5786 * Bus resets clear ENSELI, so we cannot 5787 * defer re-enabling bus reset interrupts 5788 * if we are in target mode. 5789 */ 5790 if ((ahc->flags & AHC_TARGETROLE) != 0) 5791 simode1 |= ENSCSIRST; 5792 #endif 5793 ahc_outb(ahc, SIMODE1, simode1); 5794 if (initiate_reset) 5795 ahc_reset_current_bus(ahc); 5796 ahc_clear_intstat(ahc); 5797 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5798 restart_needed = TRUE; 5799 } 5800 5801 /* 5802 * Clean up all the state information for the 5803 * pending transactions on this bus. 5804 */ 5805 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 5806 CAM_LUN_WILDCARD, SCB_LIST_NULL, 5807 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 5808 5809 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 5810 5811 #ifdef AHC_TARGET_MODE 5812 /* 5813 * Send an immediate notify ccb to all target more peripheral 5814 * drivers affected by this action. 5815 */ 5816 for (target = 0; target <= max_scsiid; target++) { 5817 struct ahc_tmode_tstate* tstate; 5818 u_int lun; 5819 5820 tstate = ahc->enabled_targets[target]; 5821 if (tstate == NULL) 5822 continue; 5823 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 5824 struct ahc_tmode_lstate* lstate; 5825 5826 lstate = tstate->enabled_luns[lun]; 5827 if (lstate == NULL) 5828 continue; 5829 5830 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 5831 EVENT_TYPE_BUS_RESET, /*arg*/0); 5832 ahc_send_lstate_events(ahc, lstate); 5833 } 5834 } 5835 #endif 5836 /* Notify the XPT that a bus reset occurred */ 5837 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 5838 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 5839 5840 /* 5841 * Revert to async/narrow transfers until we renegotiate. 5842 */ 5843 for (target = 0; target <= max_scsiid; target++) { 5844 5845 if (ahc->enabled_targets[target] == NULL) 5846 continue; 5847 for (initiator = 0; initiator <= max_scsiid; initiator++) { 5848 struct ahc_devinfo devinfo; 5849 5850 ahc_compile_devinfo(&devinfo, target, initiator, 5851 CAM_LUN_WILDCARD, 5852 channel, ROLE_UNKNOWN); 5853 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5854 AHC_TRANS_CUR, /*paused*/TRUE); 5855 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 5856 /*period*/0, /*offset*/0, 5857 /*ppr_options*/0, AHC_TRANS_CUR, 5858 /*paused*/TRUE); 5859 } 5860 } 5861 5862 if (restart_needed) 5863 ahc_restart(ahc); 5864 else 5865 ahc_unpause(ahc); 5866 return found; 5867 } 5868 5869 5870 /***************************** Residual Processing ****************************/ 5871 /* 5872 * Calculate the residual for a just completed SCB. 5873 */ 5874 void 5875 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 5876 { 5877 struct hardware_scb *hscb; 5878 struct status_pkt *spkt; 5879 uint32_t sgptr; 5880 uint32_t resid_sgptr; 5881 uint32_t resid; 5882 5883 /* 5884 * 5 cases. 5885 * 1) No residual. 5886 * SG_RESID_VALID clear in sgptr. 5887 * 2) Transferless command 5888 * 3) Never performed any transfers. 5889 * sgptr has SG_FULL_RESID set. 5890 * 4) No residual but target did not 5891 * save data pointers after the 5892 * last transfer, so sgptr was 5893 * never updated. 5894 * 5) We have a partial residual. 5895 * Use residual_sgptr to determine 5896 * where we are. 5897 */ 5898 5899 hscb = scb->hscb; 5900 sgptr = ahc_le32toh(hscb->sgptr); 5901 if ((sgptr & SG_RESID_VALID) == 0) 5902 /* Case 1 */ 5903 return; 5904 sgptr &= ~SG_RESID_VALID; 5905 5906 if ((sgptr & SG_LIST_NULL) != 0) 5907 /* Case 2 */ 5908 return; 5909 5910 spkt = &hscb->shared_data.status; 5911 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 5912 if ((sgptr & SG_FULL_RESID) != 0) { 5913 /* Case 3 */ 5914 resid = ahc_get_transfer_length(scb); 5915 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 5916 /* Case 4 */ 5917 return; 5918 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 5919 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 5920 } else { 5921 struct ahc_dma_seg *sg; 5922 5923 /* 5924 * Remainder of the SG where the transfer 5925 * stopped. 5926 */ 5927 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 5928 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 5929 5930 /* The residual sg_ptr always points to the next sg */ 5931 sg--; 5932 5933 /* 5934 * Add up the contents of all residual 5935 * SG segments that are after the SG where 5936 * the transfer stopped. 5937 */ 5938 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 5939 sg++; 5940 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 5941 } 5942 } 5943 if ((scb->flags & SCB_SENSE) == 0) 5944 ahc_set_residual(scb, resid); 5945 else 5946 ahc_set_sense_residual(scb, resid); 5947 5948 #ifdef AHC_DEBUG 5949 if ((ahc_debug & AHC_SHOWMISC) != 0) { 5950 ahc_print_path(ahc, scb); 5951 printf("Handled Residual of %d bytes\n", resid); 5952 } 5953 #endif 5954 } 5955 5956 /******************************* Target Mode **********************************/ 5957 #ifdef AHC_TARGET_MODE 5958 /* 5959 * Add a target mode event to this lun's queue 5960 */ 5961 static void 5962 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 5963 u_int initiator_id, u_int event_type, u_int event_arg) 5964 { 5965 struct ahc_tmode_event *event; 5966 int pending; 5967 5968 xpt_freeze_devq(lstate->path, /*count*/1); 5969 if (lstate->event_w_idx >= lstate->event_r_idx) 5970 pending = lstate->event_w_idx - lstate->event_r_idx; 5971 else 5972 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 5973 - (lstate->event_r_idx - lstate->event_w_idx); 5974 5975 if (event_type == EVENT_TYPE_BUS_RESET 5976 || event_type == MSG_BUS_DEV_RESET) { 5977 /* 5978 * Any earlier events are irrelevant, so reset our buffer. 5979 * This has the effect of allowing us to deal with reset 5980 * floods (an external device holding down the reset line) 5981 * without losing the event that is really interesting. 5982 */ 5983 lstate->event_r_idx = 0; 5984 lstate->event_w_idx = 0; 5985 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 5986 } 5987 5988 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 5989 xpt_print_path(lstate->path); 5990 printf("immediate event %x:%x lost\n", 5991 lstate->event_buffer[lstate->event_r_idx].event_type, 5992 lstate->event_buffer[lstate->event_r_idx].event_arg); 5993 lstate->event_r_idx++; 5994 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 5995 lstate->event_r_idx = 0; 5996 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 5997 } 5998 5999 event = &lstate->event_buffer[lstate->event_w_idx]; 6000 event->initiator_id = initiator_id; 6001 event->event_type = event_type; 6002 event->event_arg = event_arg; 6003 lstate->event_w_idx++; 6004 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6005 lstate->event_w_idx = 0; 6006 } 6007 6008 /* 6009 * Send any target mode events queued up waiting 6010 * for immediate notify resources. 6011 */ 6012 void 6013 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6014 { 6015 struct ccb_hdr *ccbh; 6016 struct ccb_immed_notify *inot; 6017 6018 while (lstate->event_r_idx != lstate->event_w_idx 6019 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6020 struct ahc_tmode_event *event; 6021 6022 event = &lstate->event_buffer[lstate->event_r_idx]; 6023 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6024 inot = (struct ccb_immed_notify *)ccbh; 6025 switch (event->event_type) { 6026 case EVENT_TYPE_BUS_RESET: 6027 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6028 break; 6029 default: 6030 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6031 inot->message_args[0] = event->event_type; 6032 inot->message_args[1] = event->event_arg; 6033 break; 6034 } 6035 inot->initiator_id = event->initiator_id; 6036 inot->sense_len = 0; 6037 xpt_done((union ccb *)inot); 6038 lstate->event_r_idx++; 6039 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6040 lstate->event_r_idx = 0; 6041 } 6042 } 6043 #endif 6044 6045 /******************** Sequencer Program Patching/Download *********************/ 6046 6047 #ifdef AHC_DUMP_SEQ 6048 void 6049 ahc_dumpseq(struct ahc_softc* ahc) 6050 { 6051 int i; 6052 int max_prog; 6053 6054 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 6055 max_prog = 448; 6056 else if ((ahc->features & AHC_ULTRA2) != 0) 6057 max_prog = 768; 6058 else 6059 max_prog = 512; 6060 6061 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6062 ahc_outb(ahc, SEQADDR0, 0); 6063 ahc_outb(ahc, SEQADDR1, 0); 6064 for (i = 0; i < max_prog; i++) { 6065 uint8_t ins_bytes[4]; 6066 6067 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6068 printf("0x%08x\n", ins_bytes[0] << 24 6069 | ins_bytes[1] << 16 6070 | ins_bytes[2] << 8 6071 | ins_bytes[3]); 6072 } 6073 } 6074 #endif 6075 6076 static void 6077 ahc_loadseq(struct ahc_softc *ahc) 6078 { 6079 struct cs cs_table[num_critical_sections]; 6080 u_int begin_set[num_critical_sections]; 6081 u_int end_set[num_critical_sections]; 6082 struct patch *cur_patch; 6083 u_int cs_count; 6084 u_int cur_cs; 6085 u_int i; 6086 int downloaded; 6087 u_int skip_addr; 6088 u_int sg_prefetch_cnt; 6089 uint8_t download_consts[7]; 6090 6091 /* 6092 * Start out with 0 critical sections 6093 * that apply to this firmware load. 6094 */ 6095 cs_count = 0; 6096 cur_cs = 0; 6097 memset(begin_set, 0, sizeof(begin_set)); 6098 memset(end_set, 0, sizeof(end_set)); 6099 6100 /* Setup downloadable constant table */ 6101 download_consts[QOUTFIFO_OFFSET] = 0; 6102 if (ahc->targetcmds != NULL) 6103 download_consts[QOUTFIFO_OFFSET] += 32; 6104 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6105 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6106 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6107 sg_prefetch_cnt = ahc->pci_cachesize; 6108 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6109 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6110 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6111 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6112 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6113 6114 cur_patch = patches; 6115 downloaded = 0; 6116 skip_addr = 0; 6117 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6118 ahc_outb(ahc, SEQADDR0, 0); 6119 ahc_outb(ahc, SEQADDR1, 0); 6120 6121 for (i = 0; i < sizeof(seqprog)/4; i++) { 6122 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6123 /* 6124 * Don't download this instruction as it 6125 * is in a patch that was removed. 6126 */ 6127 continue; 6128 } 6129 /* 6130 * Move through the CS table until we find a CS 6131 * that might apply to this instruction. 6132 */ 6133 for (; cur_cs < num_critical_sections; cur_cs++) { 6134 if (critical_sections[cur_cs].end <= i) { 6135 if (begin_set[cs_count] == TRUE 6136 && end_set[cs_count] == FALSE) { 6137 cs_table[cs_count].end = downloaded; 6138 end_set[cs_count] = TRUE; 6139 cs_count++; 6140 } 6141 continue; 6142 } 6143 if (critical_sections[cur_cs].begin <= i 6144 && begin_set[cs_count] == FALSE) { 6145 cs_table[cs_count].begin = downloaded; 6146 begin_set[cs_count] = TRUE; 6147 } 6148 break; 6149 } 6150 ahc_download_instr(ahc, i, download_consts); 6151 downloaded++; 6152 } 6153 6154 ahc->num_critical_sections = cs_count; 6155 if (cs_count != 0) { 6156 6157 cs_count *= sizeof(struct cs); 6158 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6159 if (ahc->critical_sections == NULL) 6160 panic("ahc_loadseq: Could not malloc"); 6161 memcpy(ahc->critical_sections, cs_table, cs_count); 6162 } 6163 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6164 ahc_restart(ahc); 6165 6166 if (bootverbose) 6167 printf(" %d instructions downloaded\n", downloaded); 6168 } 6169 6170 static int 6171 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6172 u_int start_instr, u_int *skip_addr) 6173 { 6174 struct patch *cur_patch; 6175 struct patch *last_patch; 6176 u_int num_patches; 6177 6178 num_patches = sizeof(patches)/sizeof(struct patch); 6179 last_patch = &patches[num_patches]; 6180 cur_patch = *start_patch; 6181 6182 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6183 6184 if (cur_patch->patch_func(ahc) == 0) { 6185 6186 /* Start rejecting code */ 6187 *skip_addr = start_instr + cur_patch->skip_instr; 6188 cur_patch += cur_patch->skip_patch; 6189 } else { 6190 /* Accepted this patch. Advance to the next 6191 * one and wait for our intruction pointer to 6192 * hit this point. 6193 */ 6194 cur_patch++; 6195 } 6196 } 6197 6198 *start_patch = cur_patch; 6199 if (start_instr < *skip_addr) 6200 /* Still skipping */ 6201 return (0); 6202 6203 return (1); 6204 } 6205 6206 static void 6207 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6208 { 6209 union ins_formats instr; 6210 struct ins_format1 *fmt1_ins; 6211 struct ins_format3 *fmt3_ins; 6212 u_int opcode; 6213 6214 /* 6215 * The firmware is always compiled into a little endian format. 6216 */ 6217 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6218 6219 fmt1_ins = &instr.format1; 6220 fmt3_ins = NULL; 6221 6222 /* Pull the opcode */ 6223 opcode = instr.format1.opcode; 6224 switch (opcode) { 6225 case AIC_OP_JMP: 6226 case AIC_OP_JC: 6227 case AIC_OP_JNC: 6228 case AIC_OP_CALL: 6229 case AIC_OP_JNE: 6230 case AIC_OP_JNZ: 6231 case AIC_OP_JE: 6232 case AIC_OP_JZ: 6233 { 6234 struct patch *cur_patch; 6235 int address_offset; 6236 u_int address; 6237 u_int skip_addr; 6238 u_int i; 6239 6240 fmt3_ins = &instr.format3; 6241 address_offset = 0; 6242 address = fmt3_ins->address; 6243 cur_patch = patches; 6244 skip_addr = 0; 6245 6246 for (i = 0; i < address;) { 6247 6248 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6249 6250 if (skip_addr > i) { 6251 int end_addr; 6252 6253 end_addr = MIN(address, skip_addr); 6254 address_offset += end_addr - i; 6255 i = skip_addr; 6256 } else { 6257 i++; 6258 } 6259 } 6260 address -= address_offset; 6261 fmt3_ins->address = address; 6262 /* FALLTHROUGH */ 6263 } 6264 case AIC_OP_OR: 6265 case AIC_OP_AND: 6266 case AIC_OP_XOR: 6267 case AIC_OP_ADD: 6268 case AIC_OP_ADC: 6269 case AIC_OP_BMOV: 6270 if (fmt1_ins->parity != 0) { 6271 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6272 } 6273 fmt1_ins->parity = 0; 6274 if ((ahc->features & AHC_CMD_CHAN) == 0 6275 && opcode == AIC_OP_BMOV) { 6276 /* 6277 * Block move was added at the same time 6278 * as the command channel. Verify that 6279 * this is only a move of a single element 6280 * and convert the BMOV to a MOV 6281 * (AND with an immediate of FF). 6282 */ 6283 if (fmt1_ins->immediate != 1) 6284 panic("%s: BMOV not supported\n", 6285 ahc_name(ahc)); 6286 fmt1_ins->opcode = AIC_OP_AND; 6287 fmt1_ins->immediate = 0xff; 6288 } 6289 /* FALLTHROUGH */ 6290 case AIC_OP_ROL: 6291 if ((ahc->features & AHC_ULTRA2) != 0) { 6292 int i, count; 6293 6294 /* Calculate odd parity for the instruction */ 6295 for (i = 0, count = 0; i < 31; i++) { 6296 uint32_t mask; 6297 6298 mask = 0x01 << i; 6299 if ((instr.integer & mask) != 0) 6300 count++; 6301 } 6302 if ((count & 0x01) == 0) 6303 instr.format1.parity = 1; 6304 } else { 6305 /* Compress the instruction for older sequencers */ 6306 if (fmt3_ins != NULL) { 6307 instr.integer = 6308 fmt3_ins->immediate 6309 | (fmt3_ins->source << 8) 6310 | (fmt3_ins->address << 16) 6311 | (fmt3_ins->opcode << 25); 6312 } else { 6313 instr.integer = 6314 fmt1_ins->immediate 6315 | (fmt1_ins->source << 8) 6316 | (fmt1_ins->destination << 16) 6317 | (fmt1_ins->ret << 24) 6318 | (fmt1_ins->opcode << 25); 6319 } 6320 } 6321 /* The sequencer is a little endian cpu */ 6322 instr.integer = ahc_htole32(instr.integer); 6323 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6324 break; 6325 default: 6326 panic("Unknown opcode encountered in seq program"); 6327 break; 6328 } 6329 } 6330 6331 void 6332 ahc_dump_card_state(struct ahc_softc *ahc) 6333 { 6334 struct scb *scb; 6335 struct scb_tailq *untagged_q; 6336 int target; 6337 int maxtarget; 6338 int i; 6339 uint8_t last_phase; 6340 uint8_t qinpos; 6341 uint8_t qintail; 6342 uint8_t qoutpos; 6343 uint8_t scb_index; 6344 uint8_t saved_scbptr; 6345 6346 saved_scbptr = ahc_inb(ahc, SCBPTR); 6347 6348 last_phase = ahc_inb(ahc, LASTPHASE); 6349 printf("%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6350 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6351 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6352 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6353 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6354 ahc_inb(ahc, ARG_2)); 6355 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6356 ahc_inb(ahc, SCBPTR)); 6357 printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x\n", 6358 ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL)); 6359 printf(" DFCNTRL = 0x%x, DFSTATUS = 0x%x\n", 6360 ahc_inb(ahc, DFCNTRL), ahc_inb(ahc, DFSTATUS)); 6361 printf("LASTPHASE = 0x%x, SCSISIGI = 0x%x, SXFRCTL0 = 0x%x\n", 6362 last_phase, ahc_inb(ahc, SCSISIGI), ahc_inb(ahc, SXFRCTL0)); 6363 printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x\n", 6364 ahc_inb(ahc, SSTAT0), ahc_inb(ahc, SSTAT1)); 6365 if ((ahc->features & AHC_DT) != 0) 6366 printf("SCSIPHASE = 0x%x\n", ahc_inb(ahc, SCSIPHASE)); 6367 printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n", 6368 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6369 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6370 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6371 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8)); 6372 printf("SCB count = %d\n", ahc->scb_data->numscbs); 6373 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6374 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6375 /* QINFIFO */ 6376 printf("QINFIFO entries: "); 6377 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6378 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6379 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6380 } else 6381 qinpos = ahc_inb(ahc, QINPOS); 6382 qintail = ahc->qinfifonext; 6383 while (qinpos != qintail) { 6384 printf("%d ", ahc->qinfifo[qinpos]); 6385 qinpos++; 6386 } 6387 printf("\n"); 6388 6389 printf("Waiting Queue entries: "); 6390 scb_index = ahc_inb(ahc, WAITING_SCBH); 6391 i = 0; 6392 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6393 ahc_outb(ahc, SCBPTR, scb_index); 6394 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6395 scb_index = ahc_inb(ahc, SCB_NEXT); 6396 } 6397 printf("\n"); 6398 6399 printf("Disconnected Queue entries: "); 6400 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6401 i = 0; 6402 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6403 ahc_outb(ahc, SCBPTR, scb_index); 6404 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6405 scb_index = ahc_inb(ahc, SCB_NEXT); 6406 } 6407 printf("\n"); 6408 6409 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6410 printf("QOUTFIFO entries: "); 6411 qoutpos = ahc->qoutfifonext; 6412 i = 0; 6413 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6414 printf("%d ", ahc->qoutfifo[qoutpos]); 6415 qoutpos++; 6416 } 6417 printf("\n"); 6418 6419 printf("Sequencer Free SCB List: "); 6420 scb_index = ahc_inb(ahc, FREE_SCBH); 6421 i = 0; 6422 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6423 ahc_outb(ahc, SCBPTR, scb_index); 6424 printf("%d ", scb_index); 6425 scb_index = ahc_inb(ahc, SCB_NEXT); 6426 } 6427 printf("\n"); 6428 6429 printf("Sequencer SCB Info: "); 6430 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6431 ahc_outb(ahc, SCBPTR, i); 6432 printf("%d(c 0x%x, s 0x%x, l %d, t 0x%x) ", 6433 i, ahc_inb(ahc, SCB_CONTROL), 6434 ahc_inb(ahc, SCB_SCSIID), 6435 ahc_inb(ahc, SCB_LUN), 6436 ahc_inb(ahc, SCB_TAG)); 6437 } 6438 printf("\n"); 6439 6440 printf("Pending list: "); 6441 i = 0; 6442 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6443 if (i++ > 256) 6444 break; 6445 if (scb != LIST_FIRST(&ahc->pending_scbs)) 6446 printf(", "); 6447 printf("%d(c 0x%x, s 0x%x, l %d)", scb->hscb->tag, 6448 scb->hscb->control, scb->hscb->scsiid, scb->hscb->lun); 6449 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6450 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6451 printf("(0x%x, 0x%x)", ahc_inb(ahc, SCB_CONTROL), 6452 ahc_inb(ahc, SCB_TAG)); 6453 } 6454 } 6455 printf("\n"); 6456 6457 printf("Kernel Free SCB list: "); 6458 i = 0; 6459 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6460 if (i++ > 256) 6461 break; 6462 printf("%d ", scb->hscb->tag); 6463 } 6464 printf("\n"); 6465 6466 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6467 for (target = 0; target <= maxtarget; target++) { 6468 untagged_q = &ahc->untagged_queues[target]; 6469 if (TAILQ_FIRST(untagged_q) == NULL) 6470 continue; 6471 printf("Untagged Q(%d): ", target); 6472 i = 0; 6473 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6474 if (i++ > 256) 6475 break; 6476 printf("%d ", scb->hscb->tag); 6477 } 6478 printf("\n"); 6479 } 6480 6481 ahc_platform_dump_card_state(ahc); 6482 ahc_outb(ahc, SCBPTR, saved_scbptr); 6483 } 6484 6485 /************************* Target Mode ****************************************/ 6486 #ifdef AHC_TARGET_MODE 6487 cam_status 6488 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6489 struct ahc_tmode_tstate **tstate, 6490 struct ahc_tmode_lstate **lstate, 6491 int notfound_failure) 6492 { 6493 6494 if ((ahc->features & AHC_TARGETMODE) == 0) 6495 return (CAM_REQ_INVALID); 6496 6497 /* 6498 * Handle the 'black hole' device that sucks up 6499 * requests to unattached luns on enabled targets. 6500 */ 6501 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6502 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6503 *tstate = NULL; 6504 *lstate = ahc->black_hole; 6505 } else { 6506 u_int max_id; 6507 6508 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6509 if (ccb->ccb_h.target_id > max_id) 6510 return (CAM_TID_INVALID); 6511 6512 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6513 return (CAM_LUN_INVALID); 6514 6515 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6516 *lstate = NULL; 6517 if (*tstate != NULL) 6518 *lstate = 6519 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6520 } 6521 6522 if (notfound_failure != 0 && *lstate == NULL) 6523 return (CAM_PATH_INVALID); 6524 6525 return (CAM_REQ_CMP); 6526 } 6527 6528 void 6529 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6530 { 6531 struct ahc_tmode_tstate *tstate; 6532 struct ahc_tmode_lstate *lstate; 6533 struct ccb_en_lun *cel; 6534 cam_status status; 6535 u_int target; 6536 u_int lun; 6537 u_int target_mask; 6538 u_long s; 6539 char channel; 6540 6541 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6542 /*notfound_failure*/FALSE); 6543 6544 if (status != CAM_REQ_CMP) { 6545 ccb->ccb_h.status = status; 6546 return; 6547 } 6548 6549 if ((ahc->features & AHC_MULTIROLE) != 0) { 6550 u_int our_id; 6551 6552 if (cam_sim_bus(sim) == 0) 6553 our_id = ahc->our_id; 6554 else 6555 our_id = ahc->our_id_b; 6556 6557 if (ccb->ccb_h.target_id != our_id) { 6558 if ((ahc->features & AHC_MULTI_TID) != 0 6559 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6560 /* 6561 * Only allow additional targets if 6562 * the initiator role is disabled. 6563 * The hardware cannot handle a re-select-in 6564 * on the initiator id during a re-select-out 6565 * on a different target id. 6566 */ 6567 status = CAM_TID_INVALID; 6568 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6569 || ahc->enabled_luns > 0) { 6570 /* 6571 * Only allow our target id to change 6572 * if the initiator role is not configured 6573 * and there are no enabled luns which 6574 * are attached to the currently registered 6575 * scsi id. 6576 */ 6577 status = CAM_TID_INVALID; 6578 } 6579 } 6580 } 6581 6582 if (status != CAM_REQ_CMP) { 6583 ccb->ccb_h.status = status; 6584 return; 6585 } 6586 6587 /* 6588 * We now have an id that is valid. 6589 * If we aren't in target mode, switch modes. 6590 */ 6591 if ((ahc->flags & AHC_TARGETROLE) == 0 6592 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6593 u_long s; 6594 6595 printf("Configuring Target Mode\n"); 6596 ahc_lock(ahc, &s); 6597 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6598 ccb->ccb_h.status = CAM_BUSY; 6599 ahc_unlock(ahc, &s); 6600 return; 6601 } 6602 ahc->flags |= AHC_TARGETROLE; 6603 if ((ahc->features & AHC_MULTIROLE) == 0) 6604 ahc->flags &= ~AHC_INITIATORROLE; 6605 ahc_pause(ahc); 6606 ahc_loadseq(ahc); 6607 ahc_unlock(ahc, &s); 6608 } 6609 cel = &ccb->cel; 6610 target = ccb->ccb_h.target_id; 6611 lun = ccb->ccb_h.target_lun; 6612 channel = SIM_CHANNEL(ahc, sim); 6613 target_mask = 0x01 << target; 6614 if (channel == 'B') 6615 target_mask <<= 8; 6616 6617 if (cel->enable != 0) { 6618 u_int scsiseq; 6619 6620 /* Are we already enabled?? */ 6621 if (lstate != NULL) { 6622 xpt_print_path(ccb->ccb_h.path); 6623 printf("Lun already enabled\n"); 6624 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6625 return; 6626 } 6627 6628 if (cel->grp6_len != 0 6629 || cel->grp7_len != 0) { 6630 /* 6631 * Don't (yet?) support vendor 6632 * specific commands. 6633 */ 6634 ccb->ccb_h.status = CAM_REQ_INVALID; 6635 printf("Non-zero Group Codes\n"); 6636 return; 6637 } 6638 6639 /* 6640 * Seems to be okay. 6641 * Setup our data structures. 6642 */ 6643 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6644 tstate = ahc_alloc_tstate(ahc, target, channel); 6645 if (tstate == NULL) { 6646 xpt_print_path(ccb->ccb_h.path); 6647 printf("Couldn't allocate tstate\n"); 6648 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6649 return; 6650 } 6651 } 6652 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6653 if (lstate == NULL) { 6654 xpt_print_path(ccb->ccb_h.path); 6655 printf("Couldn't allocate lstate\n"); 6656 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6657 return; 6658 } 6659 memset(lstate, 0, sizeof(*lstate)); 6660 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6661 xpt_path_path_id(ccb->ccb_h.path), 6662 xpt_path_target_id(ccb->ccb_h.path), 6663 xpt_path_lun_id(ccb->ccb_h.path)); 6664 if (status != CAM_REQ_CMP) { 6665 free(lstate, M_DEVBUF); 6666 xpt_print_path(ccb->ccb_h.path); 6667 printf("Couldn't allocate path\n"); 6668 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6669 return; 6670 } 6671 SLIST_INIT(&lstate->accept_tios); 6672 SLIST_INIT(&lstate->immed_notifies); 6673 ahc_lock(ahc, &s); 6674 ahc_pause(ahc); 6675 if (target != CAM_TARGET_WILDCARD) { 6676 tstate->enabled_luns[lun] = lstate; 6677 ahc->enabled_luns++; 6678 6679 if ((ahc->features & AHC_MULTI_TID) != 0) { 6680 u_int targid_mask; 6681 6682 targid_mask = ahc_inb(ahc, TARGID) 6683 | (ahc_inb(ahc, TARGID + 1) << 8); 6684 6685 targid_mask |= target_mask; 6686 ahc_outb(ahc, TARGID, targid_mask); 6687 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 6688 6689 ahc_update_scsiid(ahc, targid_mask); 6690 } else { 6691 u_int our_id; 6692 char channel; 6693 6694 channel = SIM_CHANNEL(ahc, sim); 6695 our_id = SIM_SCSI_ID(ahc, sim); 6696 6697 /* 6698 * This can only happen if selections 6699 * are not enabled 6700 */ 6701 if (target != our_id) { 6702 u_int sblkctl; 6703 char cur_channel; 6704 int swap; 6705 6706 sblkctl = ahc_inb(ahc, SBLKCTL); 6707 cur_channel = (sblkctl & SELBUSB) 6708 ? 'B' : 'A'; 6709 if ((ahc->features & AHC_TWIN) == 0) 6710 cur_channel = 'A'; 6711 swap = cur_channel != channel; 6712 if (channel == 'A') 6713 ahc->our_id = target; 6714 else 6715 ahc->our_id_b = target; 6716 6717 if (swap) 6718 ahc_outb(ahc, SBLKCTL, 6719 sblkctl ^ SELBUSB); 6720 6721 ahc_outb(ahc, SCSIID, target); 6722 6723 if (swap) 6724 ahc_outb(ahc, SBLKCTL, sblkctl); 6725 } 6726 } 6727 } else 6728 ahc->black_hole = lstate; 6729 /* Allow select-in operations */ 6730 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 6731 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6732 scsiseq |= ENSELI; 6733 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 6734 scsiseq = ahc_inb(ahc, SCSISEQ); 6735 scsiseq |= ENSELI; 6736 ahc_outb(ahc, SCSISEQ, scsiseq); 6737 } 6738 ahc_unpause(ahc); 6739 ahc_unlock(ahc, &s); 6740 ccb->ccb_h.status = CAM_REQ_CMP; 6741 xpt_print_path(ccb->ccb_h.path); 6742 printf("Lun now enabled for target mode\n"); 6743 } else { 6744 struct scb *scb; 6745 int i, empty; 6746 6747 if (lstate == NULL) { 6748 ccb->ccb_h.status = CAM_LUN_INVALID; 6749 return; 6750 } 6751 6752 ahc_lock(ahc, &s); 6753 6754 ccb->ccb_h.status = CAM_REQ_CMP; 6755 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6756 struct ccb_hdr *ccbh; 6757 6758 ccbh = &scb->io_ctx->ccb_h; 6759 if (ccbh->func_code == XPT_CONT_TARGET_IO 6760 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 6761 printf("CTIO pending\n"); 6762 ccb->ccb_h.status = CAM_REQ_INVALID; 6763 ahc_unlock(ahc, &s); 6764 return; 6765 } 6766 } 6767 6768 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 6769 printf("ATIOs pending\n"); 6770 ccb->ccb_h.status = CAM_REQ_INVALID; 6771 } 6772 6773 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 6774 printf("INOTs pending\n"); 6775 ccb->ccb_h.status = CAM_REQ_INVALID; 6776 } 6777 6778 if (ccb->ccb_h.status != CAM_REQ_CMP) { 6779 ahc_unlock(ahc, &s); 6780 return; 6781 } 6782 6783 xpt_print_path(ccb->ccb_h.path); 6784 printf("Target mode disabled\n"); 6785 xpt_free_path(lstate->path); 6786 free(lstate, M_DEVBUF); 6787 6788 ahc_pause(ahc); 6789 /* Can we clean up the target too? */ 6790 if (target != CAM_TARGET_WILDCARD) { 6791 tstate->enabled_luns[lun] = NULL; 6792 ahc->enabled_luns--; 6793 for (empty = 1, i = 0; i < 8; i++) 6794 if (tstate->enabled_luns[i] != NULL) { 6795 empty = 0; 6796 break; 6797 } 6798 6799 if (empty) { 6800 ahc_free_tstate(ahc, target, channel, 6801 /*force*/FALSE); 6802 if (ahc->features & AHC_MULTI_TID) { 6803 u_int targid_mask; 6804 6805 targid_mask = ahc_inb(ahc, TARGID) 6806 | (ahc_inb(ahc, TARGID + 1) 6807 << 8); 6808 6809 targid_mask &= ~target_mask; 6810 ahc_outb(ahc, TARGID, targid_mask); 6811 ahc_outb(ahc, TARGID+1, 6812 (targid_mask >> 8)); 6813 ahc_update_scsiid(ahc, targid_mask); 6814 } 6815 } 6816 } else { 6817 6818 ahc->black_hole = NULL; 6819 6820 /* 6821 * We can't allow selections without 6822 * our black hole device. 6823 */ 6824 empty = TRUE; 6825 } 6826 if (ahc->enabled_luns == 0) { 6827 /* Disallow select-in */ 6828 u_int scsiseq; 6829 6830 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6831 scsiseq &= ~ENSELI; 6832 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 6833 scsiseq = ahc_inb(ahc, SCSISEQ); 6834 scsiseq &= ~ENSELI; 6835 ahc_outb(ahc, SCSISEQ, scsiseq); 6836 6837 if ((ahc->features & AHC_MULTIROLE) == 0) { 6838 printf("Configuring Initiator Mode\n"); 6839 ahc->flags &= ~AHC_TARGETROLE; 6840 ahc->flags |= AHC_INITIATORROLE; 6841 ahc_pause(ahc); 6842 ahc_loadseq(ahc); 6843 } 6844 } 6845 ahc_unpause(ahc); 6846 ahc_unlock(ahc, &s); 6847 } 6848 } 6849 6850 static void 6851 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 6852 { 6853 u_int scsiid_mask; 6854 u_int scsiid; 6855 6856 if ((ahc->features & AHC_MULTI_TID) == 0) 6857 panic("ahc_update_scsiid called on non-multitid unit\n"); 6858 6859 /* 6860 * Since we will rely on the the TARGID mask 6861 * for selection enables, ensure that OID 6862 * in SCSIID is not set to some other ID 6863 * that we don't want to allow selections on. 6864 */ 6865 if ((ahc->features & AHC_ULTRA2) != 0) 6866 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 6867 else 6868 scsiid = ahc_inb(ahc, SCSIID); 6869 scsiid_mask = 0x1 << (scsiid & OID); 6870 if ((targid_mask & scsiid_mask) == 0) { 6871 u_int our_id; 6872 6873 /* ffs counts from 1 */ 6874 our_id = ffs(targid_mask); 6875 if (our_id == 0) 6876 our_id = ahc->our_id; 6877 else 6878 our_id--; 6879 scsiid &= TID; 6880 scsiid |= our_id; 6881 } 6882 if ((ahc->features & AHC_ULTRA2) != 0) 6883 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 6884 else 6885 ahc_outb(ahc, SCSIID, scsiid); 6886 } 6887 6888 void 6889 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 6890 { 6891 struct target_cmd *cmd; 6892 6893 /* 6894 * If the card supports auto-access pause, 6895 * we can access the card directly regardless 6896 * of whether it is paused or not. 6897 */ 6898 if ((ahc->features & AHC_AUTOPAUSE) != 0) 6899 paused = TRUE; 6900 6901 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 6902 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 6903 6904 /* 6905 * Only advance through the queue if we 6906 * have the resources to process the command. 6907 */ 6908 if (ahc_handle_target_cmd(ahc, cmd) != 0) 6909 break; 6910 6911 cmd->cmd_valid = 0; 6912 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 6913 ahc->shared_data_dmamap, 6914 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 6915 sizeof(struct target_cmd), 6916 BUS_DMASYNC_PREREAD); 6917 ahc->tqinfifonext++; 6918 6919 /* 6920 * Lazily update our position in the target mode incoming 6921 * command queue as seen by the sequencer. 6922 */ 6923 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 6924 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 6925 u_int hs_mailbox; 6926 6927 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 6928 hs_mailbox &= ~HOST_TQINPOS; 6929 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 6930 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 6931 } else { 6932 if (!paused) 6933 ahc_pause(ahc); 6934 ahc_outb(ahc, KERNEL_TQINPOS, 6935 ahc->tqinfifonext & HOST_TQINPOS); 6936 if (!paused) 6937 ahc_unpause(ahc); 6938 } 6939 } 6940 } 6941 } 6942 6943 static int 6944 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 6945 { 6946 struct ahc_tmode_tstate *tstate; 6947 struct ahc_tmode_lstate *lstate; 6948 struct ccb_accept_tio *atio; 6949 uint8_t *byte; 6950 int initiator; 6951 int target; 6952 int lun; 6953 6954 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 6955 target = SCSIID_OUR_ID(cmd->scsiid); 6956 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 6957 6958 byte = cmd->bytes; 6959 tstate = ahc->enabled_targets[target]; 6960 lstate = NULL; 6961 if (tstate != NULL) 6962 lstate = tstate->enabled_luns[lun]; 6963 6964 /* 6965 * Commands for disabled luns go to the black hole driver. 6966 */ 6967 if (lstate == NULL) 6968 lstate = ahc->black_hole; 6969 6970 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 6971 if (atio == NULL) { 6972 ahc->flags |= AHC_TQINFIFO_BLOCKED; 6973 /* 6974 * Wait for more ATIOs from the peripheral driver for this lun. 6975 */ 6976 if (bootverbose) 6977 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 6978 return (1); 6979 } else 6980 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 6981 #if 0 6982 printf("Incoming command from %d for %d:%d%s\n", 6983 initiator, target, lun, 6984 lstate == ahc->black_hole ? "(Black Holed)" : ""); 6985 #endif 6986 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 6987 6988 if (lstate == ahc->black_hole) { 6989 /* Fill in the wildcards */ 6990 atio->ccb_h.target_id = target; 6991 atio->ccb_h.target_lun = lun; 6992 } 6993 6994 /* 6995 * Package it up and send it off to 6996 * whomever has this lun enabled. 6997 */ 6998 atio->sense_len = 0; 6999 atio->init_id = initiator; 7000 if (byte[0] != 0xFF) { 7001 /* Tag was included */ 7002 atio->tag_action = *byte++; 7003 atio->tag_id = *byte++; 7004 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7005 } else { 7006 atio->ccb_h.flags = 0; 7007 } 7008 byte++; 7009 7010 /* Okay. Now determine the cdb size based on the command code */ 7011 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7012 case 0: 7013 atio->cdb_len = 6; 7014 break; 7015 case 1: 7016 case 2: 7017 atio->cdb_len = 10; 7018 break; 7019 case 4: 7020 atio->cdb_len = 16; 7021 break; 7022 case 5: 7023 atio->cdb_len = 12; 7024 break; 7025 case 3: 7026 default: 7027 /* Only copy the opcode. */ 7028 atio->cdb_len = 1; 7029 printf("Reserved or VU command code type encountered\n"); 7030 break; 7031 } 7032 7033 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7034 7035 atio->ccb_h.status |= CAM_CDB_RECVD; 7036 7037 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7038 /* 7039 * We weren't allowed to disconnect. 7040 * We're hanging on the bus until a 7041 * continue target I/O comes in response 7042 * to this accept tio. 7043 */ 7044 #if 0 7045 printf("Received Immediate Command %d:%d:%d - %p\n", 7046 initiator, target, lun, ahc->pending_device); 7047 #endif 7048 ahc->pending_device = lstate; 7049 ahc_freeze_ccb((union ccb *)atio); 7050 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7051 } 7052 xpt_done((union ccb*)atio); 7053 return (0); 7054 } 7055 7056 #endif 7057